prompt
stringlengths 162
4.26M
| response
stringlengths 109
5.16M
|
---|---|
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_110( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [31:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7]
wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54]
wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_462( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:68:19]
wire _sync_2_T = io_d_0; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= _sync_2_T; // @[SynchronizerReg.scala:51:87, :54:22]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File ProbePicker.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, IdRange}
/* A ProbePicker is used to unify multiple cache banks into one logical cache */
class ProbePicker(implicit p: Parameters) extends LazyModule
{
val node = TLAdapterNode(
clientFn = { p =>
// The ProbePicker assembles multiple clients based on the assumption they are contiguous in the clients list
// This should be true for custers of xbar :=* BankBinder connections
def combine(next: TLMasterParameters, pair: (TLMasterParameters, Seq[TLMasterParameters])) = {
val (head, output) = pair
if (head.visibility.exists(x => next.visibility.exists(_.overlaps(x)))) {
(next, head +: output) // pair is not banked, push head without merging
} else {
def redact(x: TLMasterParameters) = x.v1copy(sourceId = IdRange(0,1), nodePath = Nil, visibility = Seq(AddressSet(0, ~0)))
require (redact(next) == redact(head), s"${redact(next)} != ${redact(head)}")
val merge = head.v1copy(
sourceId = IdRange(
head.sourceId.start min next.sourceId.start,
head.sourceId.end max next.sourceId.end),
visibility = AddressSet.unify(head.visibility ++ next.visibility))
(merge, output)
}
}
val myNil: Seq[TLMasterParameters] = Nil
val (head, output) = p.clients.init.foldRight((p.clients.last, myNil))(combine)
p.v1copy(clients = head +: output)
},
managerFn = { p => p })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
// Based on address, adjust source to route to the correct bank
if (edgeIn.client.clients.size != edgeOut.client.clients.size) {
in.b.bits.source := Mux1H(
edgeOut.client.clients.map(_.sourceId contains out.b.bits.source),
edgeOut.client.clients.map { c =>
val banks = edgeIn.client.clients.filter(c.sourceId contains _.sourceId)
if (banks.size == 1) {
out.b.bits.source // allow sharing the value between single-bank cases
} else {
Mux1H(
banks.map(_.visibility.map(_ contains out.b.bits.address).reduce(_ || _)),
banks.map(_.sourceId.start.U))
}
}
)
}
}
}
}
object ProbePicker
{
def apply()(implicit p: Parameters): TLNode = {
val picker = LazyModule(new ProbePicker)
picker.node
}
}
| module ProbePicker( // @[ProbePicker.scala:42:9]
input clock, // @[ProbePicker.scala:42:9]
input reset, // @[ProbePicker.scala:42:9]
output auto_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [27:0] auto_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_in_0_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_0_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_0_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_0_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_1_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_1_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [27:0] auto_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_1_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_0_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_0_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_0_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_0_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
TLMonitor_39 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (auto_out_0_a_ready),
.io_in_a_valid (auto_in_0_a_valid),
.io_in_a_bits_opcode (auto_in_0_a_bits_opcode),
.io_in_a_bits_param (auto_in_0_a_bits_param),
.io_in_a_bits_size (auto_in_0_a_bits_size),
.io_in_a_bits_source (auto_in_0_a_bits_source),
.io_in_a_bits_address (auto_in_0_a_bits_address),
.io_in_a_bits_mask (auto_in_0_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_0_a_bits_corrupt),
.io_in_d_ready (auto_in_0_d_ready),
.io_in_d_valid (auto_out_0_d_valid),
.io_in_d_bits_opcode (auto_out_0_d_bits_opcode),
.io_in_d_bits_size (auto_out_0_d_bits_size),
.io_in_d_bits_source (auto_out_0_d_bits_source),
.io_in_d_bits_denied (auto_out_0_d_bits_denied),
.io_in_d_bits_corrupt (auto_out_0_d_bits_corrupt)
); // @[Nodes.scala:27:25]
TLMonitor_40 monitor_1 ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (auto_out_1_a_ready),
.io_in_a_valid (auto_in_1_a_valid),
.io_in_a_bits_opcode (auto_in_1_a_bits_opcode),
.io_in_a_bits_param (auto_in_1_a_bits_param),
.io_in_a_bits_size (auto_in_1_a_bits_size),
.io_in_a_bits_source (auto_in_1_a_bits_source),
.io_in_a_bits_address (auto_in_1_a_bits_address),
.io_in_a_bits_mask (auto_in_1_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_1_a_bits_corrupt),
.io_in_d_ready (auto_in_1_d_ready),
.io_in_d_valid (auto_out_1_d_valid),
.io_in_d_bits_opcode (auto_out_1_d_bits_opcode),
.io_in_d_bits_param (auto_out_1_d_bits_param),
.io_in_d_bits_size (auto_out_1_d_bits_size),
.io_in_d_bits_source (auto_out_1_d_bits_source),
.io_in_d_bits_sink (auto_out_1_d_bits_sink),
.io_in_d_bits_denied (auto_out_1_d_bits_denied),
.io_in_d_bits_corrupt (auto_out_1_d_bits_corrupt)
); // @[Nodes.scala:27:25]
assign auto_in_1_a_ready = auto_out_1_a_ready; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_valid = auto_out_1_d_valid; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_opcode = auto_out_1_d_bits_opcode; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_param = auto_out_1_d_bits_param; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_size = auto_out_1_d_bits_size; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_source = auto_out_1_d_bits_source; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_sink = auto_out_1_d_bits_sink; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_denied = auto_out_1_d_bits_denied; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_data = auto_out_1_d_bits_data; // @[ProbePicker.scala:42:9]
assign auto_in_1_d_bits_corrupt = auto_out_1_d_bits_corrupt; // @[ProbePicker.scala:42:9]
assign auto_in_0_a_ready = auto_out_0_a_ready; // @[ProbePicker.scala:42:9]
assign auto_in_0_d_valid = auto_out_0_d_valid; // @[ProbePicker.scala:42:9]
assign auto_in_0_d_bits_opcode = auto_out_0_d_bits_opcode; // @[ProbePicker.scala:42:9]
assign auto_in_0_d_bits_size = auto_out_0_d_bits_size; // @[ProbePicker.scala:42:9]
assign auto_in_0_d_bits_source = auto_out_0_d_bits_source; // @[ProbePicker.scala:42:9]
assign auto_in_0_d_bits_denied = auto_out_0_d_bits_denied; // @[ProbePicker.scala:42:9]
assign auto_in_0_d_bits_data = auto_out_0_d_bits_data; // @[ProbePicker.scala:42:9]
assign auto_in_0_d_bits_corrupt = auto_out_0_d_bits_corrupt; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_valid = auto_in_1_a_valid; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_opcode = auto_in_1_a_bits_opcode; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_param = auto_in_1_a_bits_param; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_size = auto_in_1_a_bits_size; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_source = auto_in_1_a_bits_source; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_address = auto_in_1_a_bits_address; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_mask = auto_in_1_a_bits_mask; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_data = auto_in_1_a_bits_data; // @[ProbePicker.scala:42:9]
assign auto_out_1_a_bits_corrupt = auto_in_1_a_bits_corrupt; // @[ProbePicker.scala:42:9]
assign auto_out_1_d_ready = auto_in_1_d_ready; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_valid = auto_in_0_a_valid; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_opcode = auto_in_0_a_bits_opcode; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_param = auto_in_0_a_bits_param; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_size = auto_in_0_a_bits_size; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_source = auto_in_0_a_bits_source; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_address = auto_in_0_a_bits_address; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_mask = auto_in_0_a_bits_mask; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_data = auto_in_0_a_bits_data; // @[ProbePicker.scala:42:9]
assign auto_out_0_a_bits_corrupt = auto_in_0_a_bits_corrupt; // @[ProbePicker.scala:42:9]
assign auto_out_0_d_ready = auto_in_0_d_ready; // @[ProbePicker.scala:42:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module InputUnit_62( // @[InputUnit.scala:158:7]
input clock, // @[InputUnit.scala:158:7]
input reset, // @[InputUnit.scala:158:7]
output [2:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14]
output [2:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
output [4:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_4, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_5, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_6, // @[InputUnit.scala:170:14]
input io_router_resp_vc_sel_1_7, // @[InputUnit.scala:170:14]
input io_vcalloc_req_ready, // @[InputUnit.scala:170:14]
output io_vcalloc_req_valid, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_4, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_5, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_6, // @[InputUnit.scala:170:14]
output io_vcalloc_req_bits_vc_sel_1_7, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_4, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_5, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_6, // @[InputUnit.scala:170:14]
input io_vcalloc_resp_vc_sel_1_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_3_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_0, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_2_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_1_7, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_1, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_2, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_3, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_4, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_5, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_6, // @[InputUnit.scala:170:14]
input io_out_credit_available_0_7, // @[InputUnit.scala:170:14]
input io_salloc_req_0_ready, // @[InputUnit.scala:170:14]
output io_salloc_req_0_valid, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_3_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_2_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_1_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_3, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_4, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_5, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_6, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_vc_sel_0_7, // @[InputUnit.scala:170:14]
output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14]
output io_out_0_valid, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14]
output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14]
output [72:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14]
output [4:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14]
output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14]
output [2:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14]
output [2:0] io_debug_va_stall, // @[InputUnit.scala:170:14]
output [2:0] io_debug_sa_stall, // @[InputUnit.scala:170:14]
input io_in_flit_0_valid, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14]
input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14]
input [72:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14]
input [4:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14]
input [2:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14]
output [7:0] io_in_credit_return, // @[InputUnit.scala:170:14]
output [7:0] io_in_vc_free // @[InputUnit.scala:170:14]
);
wire vcalloc_vals_7; // @[InputUnit.scala:266:32]
wire vcalloc_vals_6; // @[InputUnit.scala:266:32]
wire vcalloc_vals_5; // @[InputUnit.scala:266:32]
wire vcalloc_vals_4; // @[InputUnit.scala:266:32]
wire _salloc_arb_io_in_4_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_5_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_6_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_in_7_ready; // @[InputUnit.scala:296:26]
wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26]
wire [7:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26]
wire _route_arbiter_io_in_4_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_5_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_6_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_in_7_ready; // @[InputUnit.scala:187:29]
wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29]
wire [2:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29]
wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_3_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_3_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_4_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_4_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_5_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_5_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_6_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_6_bits_payload; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_valid; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_head; // @[InputUnit.scala:181:28]
wire _input_buffer_io_deq_7_bits_tail; // @[InputUnit.scala:181:28]
wire [72:0] _input_buffer_io_deq_7_bits_payload; // @[InputUnit.scala:181:28]
reg [2:0] states_4_g; // @[InputUnit.scala:192:19]
reg states_4_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg [2:0] states_4_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_4_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_4_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_5_g; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_5; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_6; // @[InputUnit.scala:192:19]
reg states_5_vc_sel_1_7; // @[InputUnit.scala:192:19]
reg [2:0] states_5_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_5_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_5_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_6_g; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_5; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_6; // @[InputUnit.scala:192:19]
reg states_6_vc_sel_1_7; // @[InputUnit.scala:192:19]
reg [2:0] states_6_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_6_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_6_flow_egress_node_id; // @[InputUnit.scala:192:19]
reg [2:0] states_7_g; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_4; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_5; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_6; // @[InputUnit.scala:192:19]
reg states_7_vc_sel_1_7; // @[InputUnit.scala:192:19]
reg [2:0] states_7_flow_vnet_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_ingress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_ingress_node_id; // @[InputUnit.scala:192:19]
reg [4:0] states_7_flow_egress_node; // @[InputUnit.scala:192:19]
reg [1:0] states_7_flow_egress_node_id; // @[InputUnit.scala:192:19]
wire _GEN = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30]
wire route_arbiter_io_in_4_valid = states_4_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_5_valid = states_5_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_6_valid = states_6_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
wire route_arbiter_io_in_7_valid = states_7_g == 3'h1; // @[InputUnit.scala:192:19, :229:22]
reg [7:0] mask; // @[InputUnit.scala:250:21]
wire [7:0] _vcalloc_filter_T_3 = {vcalloc_vals_7, vcalloc_vals_6, vcalloc_vals_5, vcalloc_vals_4, 4'h0} & ~mask; // @[InputUnit.scala:250:21, :253:{80,87,89}, :266:32]
wire [15:0] vcalloc_filter = _vcalloc_filter_T_3[0] ? 16'h1 : _vcalloc_filter_T_3[1] ? 16'h2 : _vcalloc_filter_T_3[2] ? 16'h4 : _vcalloc_filter_T_3[3] ? 16'h8 : _vcalloc_filter_T_3[4] ? 16'h10 : _vcalloc_filter_T_3[5] ? 16'h20 : _vcalloc_filter_T_3[6] ? 16'h40 : _vcalloc_filter_T_3[7] ? 16'h80 : vcalloc_vals_4 ? 16'h1000 : vcalloc_vals_5 ? 16'h2000 : vcalloc_vals_6 ? 16'h4000 : {vcalloc_vals_7, 15'h0}; // @[OneHot.scala:85:71]
wire [7:0] vcalloc_sel = vcalloc_filter[7:0] | vcalloc_filter[15:8]; // @[Mux.scala:50:70]
wire io_vcalloc_req_valid_0 = vcalloc_vals_4 | vcalloc_vals_5 | vcalloc_vals_6 | vcalloc_vals_7; // @[package.scala:81:59]
assign vcalloc_vals_4 = states_4_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_5 = states_5_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_6 = states_6_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
assign vcalloc_vals_7 = states_7_g == 3'h2; // @[InputUnit.scala:192:19, :266:32]
wire _GEN_0 = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
wire _GEN_1 = _GEN_0 & vcalloc_sel[4]; // @[Mux.scala:32:36]
wire _GEN_2 = _GEN_0 & vcalloc_sel[5]; // @[Mux.scala:32:36]
wire _GEN_3 = _GEN_0 & vcalloc_sel[6]; // @[Mux.scala:32:36]
wire _GEN_4 = _GEN_0 & vcalloc_sel[7]; // @[Mux.scala:32:36] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a32d64s6k3z3c( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [5:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_b_ready_0 = auto_in_b_ready; // @[Buffer.scala:40:9]
wire auto_in_c_valid_0 = auto_in_c_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_c_bits_opcode_0 = auto_in_c_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_c_bits_param_0 = auto_in_c_bits_param; // @[Buffer.scala:40:9]
wire [2:0] auto_in_c_bits_size_0 = auto_in_c_bits_size; // @[Buffer.scala:40:9]
wire [5:0] auto_in_c_bits_source_0 = auto_in_c_bits_source; // @[Buffer.scala:40:9]
wire [31:0] auto_in_c_bits_address_0 = auto_in_c_bits_address; // @[Buffer.scala:40:9]
wire [63:0] auto_in_c_bits_data_0 = auto_in_c_bits_data; // @[Buffer.scala:40:9]
wire auto_in_c_bits_corrupt_0 = auto_in_c_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_in_e_valid_0 = auto_in_e_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_e_bits_sink_0 = auto_in_e_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_b_valid_0 = auto_out_b_valid; // @[Buffer.scala:40:9]
wire [1:0] auto_out_b_bits_param_0 = auto_out_b_bits_param; // @[Buffer.scala:40:9]
wire [31:0] auto_out_b_bits_address_0 = auto_out_b_bits_address; // @[Buffer.scala:40:9]
wire auto_out_c_ready_0 = auto_out_c_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [5:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_e_ready = 1'h1; // @[Nodes.scala:27:25]
wire auto_out_e_ready = 1'h1; // @[Nodes.scala:27:25]
wire nodeIn_e_ready = 1'h1; // @[Nodes.scala:27:25]
wire nodeOut_e_ready = 1'h1; // @[Nodes.scala:27:25]
wire auto_in_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25]
wire auto_out_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25]
wire nodeIn_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25]
wire nodeOut_b_bits_corrupt = 1'h0; // @[Nodes.scala:27:25]
wire [63:0] auto_in_b_bits_data = 64'h0; // @[Nodes.scala:27:25]
wire [63:0] auto_out_b_bits_data = 64'h0; // @[Nodes.scala:27:25]
wire [63:0] nodeIn_b_bits_data = 64'h0; // @[Nodes.scala:27:25]
wire [63:0] nodeOut_b_bits_data = 64'h0; // @[Nodes.scala:27:25]
wire [7:0] auto_in_b_bits_mask = 8'hFF; // @[Nodes.scala:27:25]
wire [7:0] auto_out_b_bits_mask = 8'hFF; // @[Nodes.scala:27:25]
wire [7:0] nodeIn_b_bits_mask = 8'hFF; // @[Nodes.scala:27:25]
wire [7:0] nodeOut_b_bits_mask = 8'hFF; // @[Nodes.scala:27:25]
wire [5:0] auto_in_b_bits_source = 6'h21; // @[Nodes.scala:27:25]
wire [5:0] auto_out_b_bits_source = 6'h21; // @[Nodes.scala:27:25]
wire [5:0] nodeIn_b_bits_source = 6'h21; // @[Nodes.scala:27:25]
wire [5:0] nodeOut_b_bits_source = 6'h21; // @[Nodes.scala:27:25]
wire [2:0] auto_in_b_bits_opcode = 3'h6; // @[Nodes.scala:27:25]
wire [2:0] auto_in_b_bits_size = 3'h6; // @[Nodes.scala:27:25]
wire [2:0] auto_out_b_bits_opcode = 3'h6; // @[Nodes.scala:27:25]
wire [2:0] auto_out_b_bits_size = 3'h6; // @[Nodes.scala:27:25]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_b_bits_opcode = 3'h6; // @[Nodes.scala:27:25]
wire [2:0] nodeIn_b_bits_size = 3'h6; // @[Nodes.scala:27:25]
wire [2:0] nodeOut_b_bits_opcode = 3'h6; // @[Nodes.scala:27:25]
wire [2:0] nodeOut_b_bits_size = 3'h6; // @[Nodes.scala:27:25]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_b_ready = auto_in_b_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_b_valid; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_b_bits_param; // @[MixedNode.scala:551:17]
wire [31:0] nodeIn_b_bits_address; // @[MixedNode.scala:551:17]
wire nodeIn_c_ready; // @[MixedNode.scala:551:17]
wire nodeIn_c_valid = auto_in_c_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_c_bits_opcode = auto_in_c_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_c_bits_param = auto_in_c_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_c_bits_size = auto_in_c_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] nodeIn_c_bits_source = auto_in_c_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_c_bits_address = auto_in_c_bits_address_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_c_bits_data = auto_in_c_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_c_bits_corrupt = auto_in_c_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [5:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeIn_e_valid = auto_in_e_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_e_bits_sink = auto_in_e_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [5:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_b_ready; // @[MixedNode.scala:542:17]
wire nodeOut_b_valid = auto_out_b_valid_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_b_bits_param = auto_out_b_bits_param_0; // @[Buffer.scala:40:9]
wire [31:0] nodeOut_b_bits_address = auto_out_b_bits_address_0; // @[Buffer.scala:40:9]
wire nodeOut_c_ready = auto_out_c_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [5:0] nodeOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_c_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_c_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_b_bits_param_0; // @[Buffer.scala:40:9]
wire [31:0] auto_in_b_bits_address_0; // @[Buffer.scala:40:9]
wire auto_in_b_valid_0; // @[Buffer.scala:40:9]
wire auto_in_c_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_b_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_c_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_c_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_c_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] auto_out_c_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_c_bits_address_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_c_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_c_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_c_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_e_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_out_e_valid_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign nodeOut_b_ready = nodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_in_b_valid_0 = nodeIn_b_valid; // @[Buffer.scala:40:9]
assign auto_in_b_bits_param_0 = nodeIn_b_bits_param; // @[Buffer.scala:40:9]
assign auto_in_b_bits_address_0 = nodeIn_b_bits_address; // @[Buffer.scala:40:9]
assign auto_in_c_ready_0 = nodeIn_c_ready; // @[Buffer.scala:40:9]
assign nodeOut_c_valid = nodeIn_c_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_c_bits_opcode = nodeIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_c_bits_param = nodeIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_c_bits_size = nodeIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_c_bits_source = nodeIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_c_bits_address = nodeIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_c_bits_data = nodeIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_c_bits_corrupt = nodeIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign nodeOut_e_valid = nodeIn_e_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeOut_e_bits_sink = nodeIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_b_ready_0 = nodeOut_b_ready; // @[Buffer.scala:40:9]
assign nodeIn_b_valid = nodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_b_bits_param = nodeOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_b_bits_address = nodeOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign nodeIn_c_ready = nodeOut_c_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_c_valid_0 = nodeOut_c_valid; // @[Buffer.scala:40:9]
assign auto_out_c_bits_opcode_0 = nodeOut_c_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_c_bits_param_0 = nodeOut_c_bits_param; // @[Buffer.scala:40:9]
assign auto_out_c_bits_size_0 = nodeOut_c_bits_size; // @[Buffer.scala:40:9]
assign auto_out_c_bits_source_0 = nodeOut_c_bits_source; // @[Buffer.scala:40:9]
assign auto_out_c_bits_address_0 = nodeOut_c_bits_address; // @[Buffer.scala:40:9]
assign auto_out_c_bits_data_0 = nodeOut_c_bits_data; // @[Buffer.scala:40:9]
assign auto_out_c_bits_corrupt_0 = nodeOut_c_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
assign auto_out_e_valid_0 = nodeOut_e_valid; // @[Buffer.scala:40:9]
assign auto_out_e_bits_sink_0 = nodeOut_e_bits_sink; // @[Buffer.scala:40:9]
TLMonitor_35 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_b_ready (nodeIn_b_ready), // @[MixedNode.scala:551:17]
.io_in_b_valid (nodeIn_b_valid), // @[MixedNode.scala:551:17]
.io_in_b_bits_param (nodeIn_b_bits_param), // @[MixedNode.scala:551:17]
.io_in_b_bits_address (nodeIn_b_bits_address), // @[MixedNode.scala:551:17]
.io_in_c_ready (nodeIn_c_ready), // @[MixedNode.scala:551:17]
.io_in_c_valid (nodeIn_c_valid), // @[MixedNode.scala:551:17]
.io_in_c_bits_opcode (nodeIn_c_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_c_bits_param (nodeIn_c_bits_param), // @[MixedNode.scala:551:17]
.io_in_c_bits_size (nodeIn_c_bits_size), // @[MixedNode.scala:551:17]
.io_in_c_bits_source (nodeIn_c_bits_source), // @[MixedNode.scala:551:17]
.io_in_c_bits_address (nodeIn_c_bits_address), // @[MixedNode.scala:551:17]
.io_in_c_bits_data (nodeIn_c_bits_data), // @[MixedNode.scala:551:17]
.io_in_c_bits_corrupt (nodeIn_c_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_e_valid (nodeIn_e_valid), // @[MixedNode.scala:551:17]
.io_in_e_bits_sink (nodeIn_e_bits_sink) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue1_TLBundleA_a32d64s6k3z3c nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue1_TLBundleD_a32d64s6k3z3c nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_b_valid = auto_in_b_valid_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_param = auto_in_b_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_b_bits_address = auto_in_b_bits_address_0; // @[Buffer.scala:40:9]
assign auto_in_c_ready = auto_in_c_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_b_ready = auto_out_b_ready_0; // @[Buffer.scala:40:9]
assign auto_out_c_valid = auto_out_c_valid_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_opcode = auto_out_c_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_param = auto_out_c_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_size = auto_out_c_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_source = auto_out_c_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_address = auto_out_c_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_data = auto_out_c_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_c_bits_corrupt = auto_out_c_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_out_e_valid = auto_out_e_valid_0; // @[Buffer.scala:40:9]
assign auto_out_e_bits_sink = auto_out_e_bits_sink_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_143( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_247 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes}
import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1}
import scala.math.min
import freechips.rocketchip.util.DataToAugmentedData
object EarlyAck {
sealed trait T
case object AllPuts extends T
case object PutFulls extends T
case object None extends T
}
// minSize: minimum size of transfers supported by all outward managers
// maxSize: maximum size of transfers supported after the Fragmenter is applied
// alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager)
// earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat
// holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst
// nameSuffix: appends a suffix to the module name
// Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint
// Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin)
// Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides
class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize")
require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize")
require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize")
val fragmentBits = log2Ceil(maxSize / minSize)
val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0
val toggleBits = 1
val addedBits = fragmentBits + toggleBits + fullBits
def expandTransfer(x: TransferSizes, op: String) = if (!x) x else {
// validate that we can apply the fragmenter correctly
require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})")
TransferSizes(x.min, maxSize)
}
private def noChangeRequired = minSize == maxSize
private def shrinkTransfer(x: TransferSizes) =
if (!alwaysMin) x
else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max))
else TransferSizes.none
private def mapManager(m: TLSlaveParameters) = m.v1copy(
supportsArithmetic = shrinkTransfer(m.supportsArithmetic),
supportsLogical = shrinkTransfer(m.supportsLogical),
supportsGet = expandTransfer(m.supportsGet, "Get"),
supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"),
supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"),
supportsHint = expandTransfer(m.supportsHint, "Hint"))
val node = new TLAdapterNode(
// We require that all the responses are mutually FIFO
// Thus we need to compact all of the masters into one big master
clientFn = { c => (if (noChangeRequired) c else c.v2copy(
masters = Seq(TLMasterParameters.v2(
name = "TLFragmenter",
sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)),
requestFifo = true,
emits = TLMasterToSlaveTransferSizes(
acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)),
acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)),
arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)),
logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)),
get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)),
putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)),
putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)),
hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _))
)
))
))},
managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) }
) {
override def circuitIdentity = noChangeRequired
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
if (noChangeRequired) {
out <> in
} else {
// All managers must share a common FIFO domain (responses might end up interleaved)
val manager = edgeOut.manager
val managers = manager.managers
val beatBytes = manager.beatBytes
val fifoId = managers(0).fifoId
require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _))
require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe,
s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region")
require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses")
// We can't support devices which are cached on both sides of us
require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe)
// We can't support denied because we reassemble fragments
require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true")
require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None)
/* The Fragmenter is a bit tricky, because there are 5 sizes in play:
* max size -- the maximum transfer size possible
* orig size -- the original pre-fragmenter size
* frag size -- the modified post-fragmenter size
* min size -- the threshold below which frag=orig
* beat size -- the amount transfered on any given beat
*
* The relationships are as follows:
* max >= orig >= frag
* max > min >= beat
* It IS possible that orig <= min (then frag=orig; ie: no fragmentation)
*
* The fragment# (sent via TL.source) is measured in multiples of min size.
* Meanwhile, to track the progress, counters measure in multiples of beat size.
*
* Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16.
*
* in.A out.A (frag#) out.D (frag#) in.D gen# ack#
* get64 get16 6 ackD16 6 ackD64 12 15
* ackD16 6 ackD64 14
* ackD16 6 ackD64 13
* ackD16 6 ackD64 12
* get16 4 ackD16 4 ackD64 8 11
* ackD16 4 ackD64 10
* ackD16 4 ackD64 9
* ackD16 4 ackD64 8
* get16 2 ackD16 2 ackD64 4 7
* ackD16 2 ackD64 6
* ackD16 2 ackD64 5
* ackD16 2 ackD64 4
* get16 0 ackD16 0 ackD64 0 3
* ackD16 0 ackD64 2
* ackD16 0 ackD64 1
* ackD16 0 ackD64 0
*
* get8 get8 0 ackD8 0 ackD8 0 1
* ackD8 0 ackD8 0
*
* get4 get4 0 ackD4 0 ackD4 0 0
* get1 get1 0 ackD1 0 ackD1 0 0
*
* put64 put16 6 15
* put64 put16 6 14
* put64 put16 6 13
* put64 put16 6 ack16 6 12 12
* put64 put16 4 11
* put64 put16 4 10
* put64 put16 4 9
* put64 put16 4 ack16 4 8 8
* put64 put16 2 7
* put64 put16 2 6
* put64 put16 2 5
* put64 put16 2 ack16 2 4 4
* put64 put16 0 3
* put64 put16 0 2
* put64 put16 0 1
* put64 put16 0 ack16 0 ack64 0 0
*
* put8 put8 0 1
* put8 put8 0 ack8 0 ack8 0 0
*
* put4 put4 0 ack4 0 ack4 0 0
* put1 put1 0 ack1 0 ack1 0 0
*/
val counterBits = log2Up(maxSize/beatBytes)
val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize)
// Consider the following waveform for two 4-beat bursts:
// ---A----A------------
// -------D-----DDD-DDDD
// Under TL rules, the second A can use the same source as the first A,
// because the source is released for reuse on the first response beat.
//
// However, if we fragment the requests, it looks like this:
// ---3210-3210---------
// -------3-----210-3210
// ... now we've broken the rules because 210 are twice inflight.
//
// This phenomenon means we can have essentially 2*maxSize/minSize-1
// fragmented transactions in flight per original transaction source.
//
// To keep the source unique, we encode the beat counter in the low
// bits of the source. To solve the overlap, we use a toggle bit.
// Whatever toggle bit the D is reassembling, A will use the opposite.
// First, handle the return path
val acknum = RegInit(0.U(counterBits.W))
val dOrig = Reg(UInt())
val dToggle = RegInit(false.B)
val dFragnum = out.d.bits.source(fragmentBits-1, 0)
val dFirst = acknum === 0.U
val dLast = dFragnum === 0.U // only for AccessAck (!Data)
val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1)
val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize))
val dHasData = edgeOut.hasData(out.d.bits)
// calculate new acknum
val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes)
val acknum_size = dsizeOH1 >> log2Ceil(beatBytes)
assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U)
val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U)
val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes))
// calculate the original size
val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1)
when (out.d.fire) {
acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement)
when (dFirst) {
dOrig := dFirst_size
dToggle := out.d.bits.source(fragmentBits)
}
}
// Swallow up non-data ack fragments
val doEarlyAck = earlyAck match {
case EarlyAck.AllPuts => true.B
case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1)
case EarlyAck.None => false.B
}
val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast)
out.d.ready := in.d.ready || drop
in.d.valid := out.d.valid && !drop
in.d.bits := out.d.bits // pass most stuff unchanged
in.d.bits.source := out.d.bits.source >> addedBits
in.d.bits.size := Mux(dFirst, dFirst_size, dOrig)
if (edgeOut.manager.mayDenyPut) {
val r_denied = Reg(Bool())
val d_denied = (!dFirst && r_denied) || out.d.bits.denied
when (out.d.fire) { r_denied := d_denied }
in.d.bits.denied := d_denied
}
if (edgeOut.manager.mayDenyGet) {
// Take denied only from the first beat and hold that value
val d_denied = out.d.bits.denied holdUnless dFirst
when (dHasData) {
in.d.bits.denied := d_denied
in.d.bits.corrupt := d_denied || out.d.bits.corrupt
}
}
// What maximum transfer sizes do downstream devices support?
val maxArithmetics = managers.map(_.supportsArithmetic.max)
val maxLogicals = managers.map(_.supportsLogical.max)
val maxGets = managers.map(_.supportsGet.max)
val maxPutFulls = managers.map(_.supportsPutFull.max)
val maxPutPartials = managers.map(_.supportsPutPartial.max)
val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0)
// We assume that the request is valid => size 0 is impossible
val lgMinSize = log2Ceil(minSize).U
val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
// Make the request repeatable
val repeater = Module(new Repeater(in.a.bits))
repeater.io.enq <> in.a
val in_a = repeater.io.deq
// If this is infront of a single manager, these become constants
val find = manager.findFast(edgeIn.address(in_a.bits))
val maxLgArithmetic = Mux1H(find, maxLgArithmetics)
val maxLgLogical = Mux1H(find, maxLgLogicals)
val maxLgGet = Mux1H(find, maxLgGets)
val maxLgPutFull = Mux1H(find, maxLgPutFulls)
val maxLgPutPartial = Mux1H(find, maxLgPutPartials)
val maxLgHint = Mux1H(find, maxLgHints)
val limit = if (alwaysMin) lgMinSize else
MuxLookup(in_a.bits.opcode, lgMinSize)(Array(
TLMessages.PutFullData -> maxLgPutFull,
TLMessages.PutPartialData -> maxLgPutPartial,
TLMessages.ArithmeticData -> maxLgArithmetic,
TLMessages.LogicalData -> maxLgLogical,
TLMessages.Get -> maxLgGet,
TLMessages.Hint -> maxLgHint))
val aOrig = in_a.bits.size
val aFrag = Mux(aOrig > limit, limit, aOrig)
val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize))
val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize))
val aHasData = edgeIn.hasData(in_a.bits)
val aMask = Mux(aHasData, 0.U, aFragOH1)
val gennum = RegInit(0.U(counterBits.W))
val aFirst = gennum === 0.U
val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U)
val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe
val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize)))
val aLast = aFragnum === 0.U
val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst))
val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None
when (out.a.fire) { gennum := new_gennum }
repeater.io.repeat := !aHasData && aFragnum =/= 0.U
out.a <> in_a
out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U)
out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum))
out.a.bits.size := aFrag
// Optimize away some of the Repeater's registers
assert (!repeater.io.full || !aHasData)
out.a.bits.data := in.a.bits.data
val fullMask = ((BigInt(1) << beatBytes) - 1).U
assert (!repeater.io.full || in_a.bits.mask === fullMask)
out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask)
out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData)
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFragmenter
{
def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
if (minSize <= maxSize) {
val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix))
fragmenter.node
} else { TLEphemeralNode()(ValName("no_fragmenter")) }
}
def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix)
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Fragmenter"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes))
(ram.node
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLDelayer(0.1)
:= TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLFragmenter(ramBeatBytes, maxSize/2)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
| module TLFragmenter( // @[Fragmenter.scala:92:9]
input clock, // @[Fragmenter.scala:92:9]
input reset // @[Fragmenter.scala:92:9]
);
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
AsyncResetSynchronizerShiftReg_w1_d3_i0_1 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in),
.io_q (io_out)
); // @[ShiftReg.scala:45:23]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File cpath.scala:
//**************************************************************************
// RISCV Processor 5-Stage Control Path
//--------------------------------------------------------------------------
//
// Christopher Celio
// 2012 Jan 20
//
// Supports both a fully-bypassed datapath (with stalls for load-use), and a
// fully interlocked (no bypass) datapath that stalls for all hazards.
package sodor.stage5
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.{CSR, Causes}
import sodor.stage5.Constants._
import sodor.common._
import sodor.common.Instructions._
class CtlToDatIo extends Bundle()
{
val dec_stall = Output(Bool()) // stall IF/DEC stages (due to hazards)
val full_stall = Output(Bool()) // stall entire pipeline (due to D$ misses)
val exe_pc_sel = Output(UInt(2.W))
val br_type = Output(UInt(4.W))
val if_kill = Output(Bool())
val dec_kill = Output(Bool())
val op1_sel = Output(UInt(2.W))
val op2_sel = Output(UInt(3.W))
val alu_fun = Output(UInt(4.W))
val wb_sel = Output(UInt(2.W))
val rf_wen = Output(Bool())
val mem_val = Output(Bool())
val mem_fcn = Output(UInt(2.W))
val mem_typ = Output(UInt(3.W))
val csr_cmd = Output(UInt(CSR.SZ.W))
val fencei = Output(Bool()) // pipeline is executing a fencei
val pipeline_kill = Output(Bool()) // an exception occurred (detected in mem stage).
// Kill the entire pipeline disregard stalls
// and kill if,dec,exe stages.
val mem_exception = Output(Bool()) // tell the CSR that the core detected an exception
val mem_exception_cause = Output(UInt(32.W))
}
class CpathIo(implicit val conf: SodorCoreParams) extends Bundle()
{
val dcpath = Flipped(new DebugCPath())
val imem = new MemPortIo(conf.xprlen)
val dmem = new MemPortIo(conf.xprlen)
val dat = Flipped(new DatToCtlIo())
val ctl = new CtlToDatIo()
}
class CtlPath(implicit val conf: SodorCoreParams) extends Module
{
val io = IO(new CpathIo())
io := DontCare
val csignals =
ListLookup(io.dat.dec_inst,
List(N, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N),
Array( /* val | BR | op1 | op2 | R1 | R2 | ALU | wb | rf | mem | mem | mask | csr | fence.i */
/* inst | type | sel | sel | oen | oen | fcn | sel | wen | en | wr | type | cmd | */
LW -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_ADD , WB_MEM, REN_1, MEN_1, M_XRD, MT_W, CSR.N, N),
LB -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_ADD , WB_MEM, REN_1, MEN_1, M_XRD, MT_B, CSR.N, N),
LBU -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_ADD , WB_MEM, REN_1, MEN_1, M_XRD, MT_BU,CSR.N, N),
LH -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_ADD , WB_MEM, REN_1, MEN_1, M_XRD, MT_H, CSR.N, N),
LHU -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_ADD , WB_MEM, REN_1, MEN_1, M_XRD, MT_HU,CSR.N, N),
SW -> List(Y, BR_N , OP1_RS1, OP2_STYPE , OEN_1, OEN_1, ALU_ADD , WB_X , REN_0, MEN_1, M_XWR, MT_W, CSR.N, N),
SB -> List(Y, BR_N , OP1_RS1, OP2_STYPE , OEN_1, OEN_1, ALU_ADD , WB_X , REN_0, MEN_1, M_XWR, MT_B, CSR.N, N),
SH -> List(Y, BR_N , OP1_RS1, OP2_STYPE , OEN_1, OEN_1, ALU_ADD , WB_X , REN_0, MEN_1, M_XWR, MT_H, CSR.N, N),
AUIPC -> List(Y, BR_N , OP1_PC , OP2_UTYPE , OEN_0, OEN_0, ALU_ADD ,WB_ALU,REN_1, MEN_0, M_X , MT_X, CSR.N, N),
LUI -> List(Y, BR_N , OP1_X , OP2_UTYPE , OEN_0, OEN_0, ALU_COPY_2,WB_ALU,REN_1, MEN_0, M_X , MT_X, CSR.N, N),
ADDI -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_ADD , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
ANDI -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_AND , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
ORI -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_OR , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
XORI -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_XOR , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SLTI -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_SLT , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SLTIU -> List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_SLTU, WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SLLI_RV32->List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_SLL , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SRAI_RV32->List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_SRA , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SRLI_RV32->List(Y, BR_N , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_SRL , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SLL -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_SLL , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
ADD -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_ADD , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SUB -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_SUB , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SLT -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_SLT , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SLTU -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_SLTU, WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
AND -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_AND , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
OR -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_OR , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
XOR -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_XOR , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SRA -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_SRA , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
SRL -> List(Y, BR_N , OP1_RS1, OP2_RS2 , OEN_1, OEN_1, ALU_SRL , WB_ALU, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
JAL -> List(Y, BR_J , OP1_RS1, OP2_UJTYPE, OEN_0, OEN_0, ALU_X , WB_PC4, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
JALR -> List(Y, BR_JR , OP1_RS1, OP2_ITYPE , OEN_1, OEN_0, ALU_X , WB_PC4, REN_1, MEN_0, M_X , MT_X, CSR.N, N),
BEQ -> List(Y, BR_EQ , OP1_RS1, OP2_SBTYPE, OEN_1, OEN_1, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N),
BNE -> List(Y, BR_NE , OP1_RS1, OP2_SBTYPE, OEN_1, OEN_1, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N),
BGE -> List(Y, BR_GE , OP1_RS1, OP2_SBTYPE, OEN_1, OEN_1, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N),
BGEU -> List(Y, BR_GEU, OP1_RS1, OP2_SBTYPE, OEN_1, OEN_1, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N),
BLT -> List(Y, BR_LT , OP1_RS1, OP2_SBTYPE, OEN_1, OEN_1, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N),
BLTU -> List(Y, BR_LTU, OP1_RS1, OP2_SBTYPE, OEN_1, OEN_1, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N),
CSRRWI -> List(Y, BR_N , OP1_IMZ, OP2_X , OEN_1, OEN_1, ALU_COPY_1,WB_CSR,REN_1, MEN_0, M_X , MT_X, CSR.W, N),
CSRRSI -> List(Y, BR_N , OP1_IMZ, OP2_X , OEN_1, OEN_1, ALU_COPY_1,WB_CSR,REN_1, MEN_0, M_X , MT_X, CSR.S, N),
CSRRW -> List(Y, BR_N , OP1_RS1, OP2_X , OEN_1, OEN_1, ALU_COPY_1,WB_CSR,REN_1, MEN_0, M_X , MT_X, CSR.W, N),
CSRRS -> List(Y, BR_N , OP1_RS1, OP2_X , OEN_1, OEN_1, ALU_COPY_1,WB_CSR,REN_1, MEN_0, M_X , MT_X, CSR.S, N),
CSRRC -> List(Y, BR_N , OP1_RS1, OP2_X , OEN_1, OEN_1, ALU_COPY_1,WB_CSR,REN_1, MEN_0, M_X , MT_X, CSR.C, N),
CSRRCI -> List(Y, BR_N , OP1_IMZ, OP2_X , OEN_1, OEN_1, ALU_COPY_1,WB_CSR,REN_1, MEN_0, M_X , MT_X, CSR.C, N),
ECALL -> List(Y, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.I, N),
MRET -> List(Y, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.I, N),
DRET -> List(Y, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.I, N),
EBREAK -> List(Y, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.I, N),
WFI -> List(Y, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N), // implemented as a NOP
FENCE_I-> List(Y, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, Y),
// kill pipeline and refetch instructions since the pipeline will be holding stall instructions.
FENCE -> List(Y, BR_N , OP1_X , OP2_X , OEN_0, OEN_0, ALU_X , WB_X , REN_0, MEN_0, M_X , MT_X, CSR.N, N)
// we are already sequentially consistent, so no need to honor the fence instruction
))
// Put these control signals in variables
val (cs_val_inst: Bool) :: cs_br_type :: cs_op1_sel :: cs_op2_sel :: (cs_rs1_oen: Bool) :: (cs_rs2_oen: Bool) :: cs0 = csignals
val cs_alu_fun :: cs_wb_sel :: (cs_rf_wen: Bool) :: (cs_mem_en: Bool) :: cs_mem_fcn :: cs_msk_sel :: cs_csr_cmd :: (cs_fencei: Bool) :: Nil = cs0
// Branch Logic
val ctrl_exe_pc_sel = Mux(io.ctl.pipeline_kill , PC_EXC,
Mux(io.dat.exe_br_type === BR_N , PC_4,
Mux(io.dat.exe_br_type === BR_NE , Mux(!io.dat.exe_br_eq, PC_BRJMP, PC_4),
Mux(io.dat.exe_br_type === BR_EQ , Mux( io.dat.exe_br_eq, PC_BRJMP, PC_4),
Mux(io.dat.exe_br_type === BR_GE , Mux(!io.dat.exe_br_lt, PC_BRJMP, PC_4),
Mux(io.dat.exe_br_type === BR_GEU, Mux(!io.dat.exe_br_ltu, PC_BRJMP, PC_4),
Mux(io.dat.exe_br_type === BR_LT , Mux( io.dat.exe_br_lt, PC_BRJMP, PC_4),
Mux(io.dat.exe_br_type === BR_LTU, Mux( io.dat.exe_br_ltu, PC_BRJMP, PC_4),
Mux(io.dat.exe_br_type === BR_J , PC_BRJMP,
Mux(io.dat.exe_br_type === BR_JR , PC_JALR,
PC_4
))))))))))
val ifkill = (ctrl_exe_pc_sel =/= PC_4) || cs_fencei || RegNext(cs_fencei)
val deckill = (ctrl_exe_pc_sel =/= PC_4)
// Exception Handling ---------------------
io.ctl.pipeline_kill := (io.dat.csr_eret || io.ctl.mem_exception || io.dat.csr_interrupt)
val dec_illegal = (!cs_val_inst && io.dat.dec_valid)
// Stall Signal Logic --------------------
val stall = Wire(Bool())
val dec_rs1_addr = io.dat.dec_inst(19, 15)
val dec_rs2_addr = io.dat.dec_inst(24, 20)
val dec_wbaddr = io.dat.dec_inst(11, 7)
val dec_rs1_oen = Mux(deckill, false.B, cs_rs1_oen)
val dec_rs2_oen = Mux(deckill, false.B, cs_rs2_oen)
val exe_reg_wbaddr = Reg(UInt())
val mem_reg_wbaddr = Reg(UInt())
val wb_reg_wbaddr = Reg(UInt())
val exe_reg_ctrl_rf_wen = RegInit(false.B)
val mem_reg_ctrl_rf_wen = RegInit(false.B)
val wb_reg_ctrl_rf_wen = RegInit(false.B)
val exe_reg_illegal = RegInit(false.B)
val exe_reg_is_csr = RegInit(false.B)
// TODO rename stall==hazard_stall full_stall == cmiss_stall
val full_stall = Wire(Bool())
when (!stall && !full_stall)
{
when (deckill)
{
exe_reg_wbaddr := 0.U
exe_reg_ctrl_rf_wen := false.B
exe_reg_is_csr := false.B
exe_reg_illegal := false.B
}
.otherwise
{
exe_reg_wbaddr := dec_wbaddr
exe_reg_ctrl_rf_wen := cs_rf_wen
exe_reg_is_csr := cs_csr_cmd =/= CSR.N && cs_csr_cmd =/= CSR.I
exe_reg_illegal := dec_illegal
}
}
.elsewhen (stall && !full_stall)
{
// kill exe stage
exe_reg_wbaddr := 0.U
exe_reg_ctrl_rf_wen := false.B
exe_reg_is_csr := false.B
exe_reg_illegal := false.B
}
when (!full_stall) {
mem_reg_wbaddr := exe_reg_wbaddr
wb_reg_wbaddr := mem_reg_wbaddr
mem_reg_ctrl_rf_wen := exe_reg_ctrl_rf_wen
wb_reg_ctrl_rf_wen := mem_reg_ctrl_rf_wen
}
val exe_inst_is_load = RegInit(false.B)
when (!full_stall)
{
exe_inst_is_load := cs_mem_en && (cs_mem_fcn === M_XRD)
}
// Clear instruction exception (from the "instruction" following xret) when returning from trap
when (io.dat.csr_eret)
{
exe_reg_illegal := false.B
}
// Stall signal stalls instruction fetch & decode stages,
// inserts NOP into execute stage, and drains execute, memory, and writeback stages
// stalls on I$ misses and on hazards
if (USE_FULL_BYPASSING)
{
// stall for load-use hazard
stall := ((exe_inst_is_load) && (exe_reg_wbaddr === dec_rs1_addr) && (exe_reg_wbaddr =/= 0.U) && dec_rs1_oen) ||
((exe_inst_is_load) && (exe_reg_wbaddr === dec_rs2_addr) && (exe_reg_wbaddr =/= 0.U) && dec_rs2_oen) ||
(exe_reg_is_csr)
}
else
{
// stall for all hazards
stall := ((exe_reg_wbaddr === dec_rs1_addr) && (dec_rs1_addr =/= 0.U) && exe_reg_ctrl_rf_wen && dec_rs1_oen) ||
((mem_reg_wbaddr === dec_rs1_addr) && (dec_rs1_addr =/= 0.U) && mem_reg_ctrl_rf_wen && dec_rs1_oen) ||
((wb_reg_wbaddr === dec_rs1_addr) && (dec_rs1_addr =/= 0.U) && wb_reg_ctrl_rf_wen && dec_rs1_oen) ||
((exe_reg_wbaddr === dec_rs2_addr) && (dec_rs2_addr =/= 0.U) && exe_reg_ctrl_rf_wen && dec_rs2_oen) ||
((mem_reg_wbaddr === dec_rs2_addr) && (dec_rs2_addr =/= 0.U) && mem_reg_ctrl_rf_wen && dec_rs2_oen) ||
((wb_reg_wbaddr === dec_rs2_addr) && (dec_rs2_addr =/= 0.U) && wb_reg_ctrl_rf_wen && dec_rs2_oen) ||
((exe_inst_is_load) && (exe_reg_wbaddr === dec_rs1_addr) && (exe_reg_wbaddr =/= 0.U) && dec_rs1_oen) ||
((exe_inst_is_load) && (exe_reg_wbaddr === dec_rs2_addr) && (exe_reg_wbaddr =/= 0.U) && dec_rs2_oen) ||
((exe_reg_is_csr))
}
// stall full pipeline on D$ miss
val dmem_val = io.dat.mem_ctrl_dmem_val
full_stall := !((dmem_val && io.dmem.resp.valid) || !dmem_val)
io.ctl.dec_stall := stall // stall if, dec stage (pipeline hazard)
io.ctl.full_stall := full_stall // stall entire pipeline (cache miss)
io.ctl.exe_pc_sel := ctrl_exe_pc_sel
io.ctl.br_type := cs_br_type
io.ctl.if_kill := ifkill
io.ctl.dec_kill := deckill
io.ctl.op1_sel := cs_op1_sel
io.ctl.op2_sel := cs_op2_sel
io.ctl.alu_fun := cs_alu_fun
io.ctl.wb_sel := cs_wb_sel
io.ctl.rf_wen := cs_rf_wen
// we need to stall IF while fencei goes through DEC and EXE, as there may
// be a store we need to wait to clear in MEM.
io.ctl.fencei := cs_fencei || RegNext(cs_fencei)
// Exception priority matters!
io.ctl.mem_exception := RegNext((exe_reg_illegal || io.dat.exe_inst_misaligned) && !io.dat.csr_eret) || io.dat.mem_data_misaligned
io.ctl.mem_exception_cause := Mux(RegNext(exe_reg_illegal), Causes.illegal_instruction.U,
Mux(RegNext(io.dat.exe_inst_misaligned), Causes.misaligned_fetch.U,
Mux(io.dat.mem_store, Causes.misaligned_store.U,
Causes.misaligned_load.U
)))
// convert CSR instructions with raddr1 == 0 to read-only CSR commands
val rs1_addr = io.dat.dec_inst(RS1_MSB, RS1_LSB)
val csr_ren = (cs_csr_cmd === CSR.S || cs_csr_cmd === CSR.C) && rs1_addr === 0.U
io.ctl.csr_cmd := Mux(csr_ren, CSR.R, cs_csr_cmd)
io.ctl.mem_val := cs_mem_en
io.ctl.mem_fcn := cs_mem_fcn
io.ctl.mem_typ := cs_msk_sel
}
| module CtlPath( // @[cpath.scala:58:7]
input clock, // @[cpath.scala:58:7]
input reset, // @[cpath.scala:58:7]
input io_imem_req_ready, // @[cpath.scala:60:14]
input io_imem_resp_valid, // @[cpath.scala:60:14]
input [31:0] io_imem_resp_bits_data, // @[cpath.scala:60:14]
input io_dmem_req_ready, // @[cpath.scala:60:14]
input io_dmem_resp_valid, // @[cpath.scala:60:14]
input [31:0] io_dmem_resp_bits_data, // @[cpath.scala:60:14]
input [31:0] io_dat_dec_inst, // @[cpath.scala:60:14]
input io_dat_dec_valid, // @[cpath.scala:60:14]
input io_dat_exe_br_eq, // @[cpath.scala:60:14]
input io_dat_exe_br_lt, // @[cpath.scala:60:14]
input io_dat_exe_br_ltu, // @[cpath.scala:60:14]
input [3:0] io_dat_exe_br_type, // @[cpath.scala:60:14]
input io_dat_exe_inst_misaligned, // @[cpath.scala:60:14]
input io_dat_mem_ctrl_dmem_val, // @[cpath.scala:60:14]
input io_dat_mem_data_misaligned, // @[cpath.scala:60:14]
input io_dat_mem_store, // @[cpath.scala:60:14]
input io_dat_csr_eret, // @[cpath.scala:60:14]
input io_dat_csr_interrupt, // @[cpath.scala:60:14]
output io_ctl_dec_stall, // @[cpath.scala:60:14]
output io_ctl_full_stall, // @[cpath.scala:60:14]
output [1:0] io_ctl_exe_pc_sel, // @[cpath.scala:60:14]
output [3:0] io_ctl_br_type, // @[cpath.scala:60:14]
output io_ctl_if_kill, // @[cpath.scala:60:14]
output io_ctl_dec_kill, // @[cpath.scala:60:14]
output [1:0] io_ctl_op1_sel, // @[cpath.scala:60:14]
output [2:0] io_ctl_op2_sel, // @[cpath.scala:60:14]
output [3:0] io_ctl_alu_fun, // @[cpath.scala:60:14]
output [1:0] io_ctl_wb_sel, // @[cpath.scala:60:14]
output io_ctl_rf_wen, // @[cpath.scala:60:14]
output io_ctl_mem_val, // @[cpath.scala:60:14]
output [1:0] io_ctl_mem_fcn, // @[cpath.scala:60:14]
output [2:0] io_ctl_mem_typ, // @[cpath.scala:60:14]
output [2:0] io_ctl_csr_cmd, // @[cpath.scala:60:14]
output io_ctl_fencei, // @[cpath.scala:60:14]
output io_ctl_pipeline_kill, // @[cpath.scala:60:14]
output io_ctl_mem_exception, // @[cpath.scala:60:14]
output [31:0] io_ctl_mem_exception_cause // @[cpath.scala:60:14]
);
wire io_imem_req_ready_0 = io_imem_req_ready; // @[cpath.scala:58:7]
wire io_imem_resp_valid_0 = io_imem_resp_valid; // @[cpath.scala:58:7]
wire [31:0] io_imem_resp_bits_data_0 = io_imem_resp_bits_data; // @[cpath.scala:58:7]
wire io_dmem_req_ready_0 = io_dmem_req_ready; // @[cpath.scala:58:7]
wire io_dmem_resp_valid_0 = io_dmem_resp_valid; // @[cpath.scala:58:7]
wire [31:0] io_dmem_resp_bits_data_0 = io_dmem_resp_bits_data; // @[cpath.scala:58:7]
wire [31:0] io_dat_dec_inst_0 = io_dat_dec_inst; // @[cpath.scala:58:7]
wire io_dat_dec_valid_0 = io_dat_dec_valid; // @[cpath.scala:58:7]
wire io_dat_exe_br_eq_0 = io_dat_exe_br_eq; // @[cpath.scala:58:7]
wire io_dat_exe_br_lt_0 = io_dat_exe_br_lt; // @[cpath.scala:58:7]
wire io_dat_exe_br_ltu_0 = io_dat_exe_br_ltu; // @[cpath.scala:58:7]
wire [3:0] io_dat_exe_br_type_0 = io_dat_exe_br_type; // @[cpath.scala:58:7]
wire io_dat_exe_inst_misaligned_0 = io_dat_exe_inst_misaligned; // @[cpath.scala:58:7]
wire io_dat_mem_ctrl_dmem_val_0 = io_dat_mem_ctrl_dmem_val; // @[cpath.scala:58:7]
wire io_dat_mem_data_misaligned_0 = io_dat_mem_data_misaligned; // @[cpath.scala:58:7]
wire io_dat_mem_store_0 = io_dat_mem_store; // @[cpath.scala:58:7]
wire io_dat_csr_eret_0 = io_dat_csr_eret; // @[cpath.scala:58:7]
wire io_dat_csr_interrupt_0 = io_dat_csr_interrupt; // @[cpath.scala:58:7]
wire [3:0] _csignals_T_149 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_150 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_151 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_152 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_153 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_154 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_155 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_156 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_157 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_158 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_159 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_160 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_161 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_394 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_395 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_396 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_397 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_398 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_399 = 4'h0; // @[Lookup.scala:34:39]
wire [3:0] _csignals_T_400 = 4'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_198 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_199 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_200 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_201 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_202 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_203 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_204 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_443 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_444 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_445 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_446 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_447 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_448 = 2'h0; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_449 = 2'h0; // @[Lookup.scala:34:39]
wire [2:0] io_imem_req_bits_typ = 3'h0; // @[cpath.scala:58:7]
wire [2:0] io_dmem_req_bits_typ = 3'h0; // @[cpath.scala:58:7]
wire [2:0] _csignals_T_247 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_248 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_249 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_250 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_251 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_252 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_253 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_254 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_255 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_256 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_257 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_258 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_259 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_639 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_640 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_641 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_642 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_643 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_644 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_645 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_646 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_647 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_648 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_649 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_650 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_651 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_652 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_653 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_654 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_655 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_656 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_657 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_658 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_659 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_660 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_661 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_662 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_663 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_664 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_665 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_666 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_667 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_668 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_669 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_670 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_671 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_672 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_673 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_674 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_675 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_676 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_677 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_678 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_679 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_680 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_688 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_689 = 3'h0; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_690 = 3'h0; // @[Lookup.scala:34:39]
wire [31:0] io_imem_req_bits_addr = 32'h0; // @[cpath.scala:58:7]
wire [31:0] io_imem_req_bits_data = 32'h0; // @[cpath.scala:58:7]
wire [31:0] io_dmem_req_bits_addr = 32'h0; // @[cpath.scala:58:7]
wire [31:0] io_dmem_req_bits_data = 32'h0; // @[cpath.scala:58:7]
wire io_dcpath_halt = 1'h0; // @[cpath.scala:58:7]
wire io_imem_req_valid = 1'h0; // @[cpath.scala:58:7]
wire io_imem_req_bits_fcn = 1'h0; // @[cpath.scala:58:7]
wire io_dmem_req_valid = 1'h0; // @[cpath.scala:58:7]
wire io_dmem_req_bits_fcn = 1'h0; // @[cpath.scala:58:7]
wire _csignals_T_296 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_297 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_298 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_299 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_300 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_301 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_302 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_345 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_346 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_347 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_348 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_349 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_350 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_351 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_492 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_493 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_494 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_495 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_496 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_497 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_498 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_541 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_542 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_543 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_544 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_545 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_546 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_547 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_548 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_549 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_550 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_551 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_552 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_553 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_554 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_555 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_556 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_557 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_558 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_559 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_560 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_561 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_562 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_563 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_564 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_565 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_566 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_567 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_568 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_569 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_570 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_571 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_572 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_573 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_574 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_575 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_576 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_577 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_578 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_579 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_580 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_581 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_582 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_590 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_591 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_592 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_593 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_594 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_595 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_596 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_597 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_598 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_599 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_600 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_601 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_602 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_603 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_604 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_605 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_606 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_607 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_608 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_609 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_610 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_611 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_612 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_613 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_614 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_615 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_616 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_617 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_618 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_619 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_620 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_621 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_622 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_623 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_624 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_625 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_626 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_627 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_628 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_629 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_630 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_631 = 1'h0; // @[Lookup.scala:34:39]
wire _csignals_T_737 = 1'h0; // @[Lookup.scala:34:39]
wire [31:0] _csignals_T_86 = io_dat_dec_inst_0; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_88 = io_dat_dec_inst_0; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_90 = io_dat_dec_inst_0; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_92 = io_dat_dec_inst_0; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_94 = io_dat_dec_inst_0; // @[Lookup.scala:31:38]
wire stall; // @[cpath.scala:158:22]
wire full_stall; // @[cpath.scala:177:25]
wire [1:0] ctrl_exe_pc_sel; // @[cpath.scala:135:29]
wire [3:0] csignals_1; // @[Lookup.scala:34:39]
wire ifkill; // @[cpath.scala:148:58]
wire deckill; // @[cpath.scala:149:35]
wire [1:0] csignals_2; // @[Lookup.scala:34:39]
wire [2:0] csignals_3; // @[Lookup.scala:34:39]
wire [3:0] csignals_6; // @[Lookup.scala:34:39]
wire [1:0] csignals_7; // @[Lookup.scala:34:39]
wire csignals_8; // @[Lookup.scala:34:39]
wire csignals_9; // @[Lookup.scala:34:39]
wire [2:0] csignals_11; // @[Lookup.scala:34:39]
wire [2:0] _io_ctl_csr_cmd_T; // @[cpath.scala:280:25]
wire _io_ctl_fencei_T; // @[cpath.scala:267:35]
wire _io_ctl_pipeline_kill_T_1; // @[cpath.scala:153:69]
wire _io_ctl_mem_exception_T_3; // @[cpath.scala:270:105]
wire io_ctl_dec_stall_0; // @[cpath.scala:58:7]
wire io_ctl_full_stall_0; // @[cpath.scala:58:7]
wire [1:0] io_ctl_exe_pc_sel_0; // @[cpath.scala:58:7]
wire [3:0] io_ctl_br_type_0; // @[cpath.scala:58:7]
wire io_ctl_if_kill_0; // @[cpath.scala:58:7]
wire io_ctl_dec_kill_0; // @[cpath.scala:58:7]
wire [1:0] io_ctl_op1_sel_0; // @[cpath.scala:58:7]
wire [2:0] io_ctl_op2_sel_0; // @[cpath.scala:58:7]
wire [3:0] io_ctl_alu_fun_0; // @[cpath.scala:58:7]
wire [1:0] io_ctl_wb_sel_0; // @[cpath.scala:58:7]
wire io_ctl_rf_wen_0; // @[cpath.scala:58:7]
wire io_ctl_mem_val_0; // @[cpath.scala:58:7]
wire [1:0] io_ctl_mem_fcn_0; // @[cpath.scala:58:7]
wire [2:0] io_ctl_mem_typ_0; // @[cpath.scala:58:7]
wire [2:0] io_ctl_csr_cmd_0; // @[cpath.scala:58:7]
wire io_ctl_fencei_0; // @[cpath.scala:58:7]
wire io_ctl_pipeline_kill_0; // @[cpath.scala:58:7]
wire io_ctl_mem_exception_0; // @[cpath.scala:58:7]
wire [31:0] io_ctl_mem_exception_cause_0; // @[cpath.scala:58:7]
wire [31:0] _GEN = {17'h0, io_dat_dec_inst_0[14:0] & 15'h707F}; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T; // @[Lookup.scala:31:38]
assign _csignals_T = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_2; // @[Lookup.scala:31:38]
assign _csignals_T_2 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_4; // @[Lookup.scala:31:38]
assign _csignals_T_4 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_6; // @[Lookup.scala:31:38]
assign _csignals_T_6 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_8; // @[Lookup.scala:31:38]
assign _csignals_T_8 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_10; // @[Lookup.scala:31:38]
assign _csignals_T_10 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_12; // @[Lookup.scala:31:38]
assign _csignals_T_12 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_14; // @[Lookup.scala:31:38]
assign _csignals_T_14 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_20; // @[Lookup.scala:31:38]
assign _csignals_T_20 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_22; // @[Lookup.scala:31:38]
assign _csignals_T_22 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_24; // @[Lookup.scala:31:38]
assign _csignals_T_24 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_26; // @[Lookup.scala:31:38]
assign _csignals_T_26 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_28; // @[Lookup.scala:31:38]
assign _csignals_T_28 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_30; // @[Lookup.scala:31:38]
assign _csignals_T_30 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_60; // @[Lookup.scala:31:38]
assign _csignals_T_60 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_62; // @[Lookup.scala:31:38]
assign _csignals_T_62 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_64; // @[Lookup.scala:31:38]
assign _csignals_T_64 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_66; // @[Lookup.scala:31:38]
assign _csignals_T_66 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_68; // @[Lookup.scala:31:38]
assign _csignals_T_68 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_70; // @[Lookup.scala:31:38]
assign _csignals_T_70 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_72; // @[Lookup.scala:31:38]
assign _csignals_T_72 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_74; // @[Lookup.scala:31:38]
assign _csignals_T_74 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_76; // @[Lookup.scala:31:38]
assign _csignals_T_76 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_78; // @[Lookup.scala:31:38]
assign _csignals_T_78 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_80; // @[Lookup.scala:31:38]
assign _csignals_T_80 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_82; // @[Lookup.scala:31:38]
assign _csignals_T_82 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_84; // @[Lookup.scala:31:38]
assign _csignals_T_84 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_96; // @[Lookup.scala:31:38]
assign _csignals_T_96 = _GEN; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_98; // @[Lookup.scala:31:38]
assign _csignals_T_98 = _GEN; // @[Lookup.scala:31:38]
wire _csignals_T_1 = _csignals_T == 32'h2003; // @[Lookup.scala:31:38]
wire _csignals_T_3 = _csignals_T_2 == 32'h3; // @[Lookup.scala:31:38]
wire _csignals_T_5 = _csignals_T_4 == 32'h4003; // @[Lookup.scala:31:38]
wire _csignals_T_7 = _csignals_T_6 == 32'h1003; // @[Lookup.scala:31:38]
wire _csignals_T_9 = _csignals_T_8 == 32'h5003; // @[Lookup.scala:31:38]
wire _csignals_T_11 = _csignals_T_10 == 32'h2023; // @[Lookup.scala:31:38]
wire _csignals_T_13 = _csignals_T_12 == 32'h23; // @[Lookup.scala:31:38]
wire _csignals_T_15 = _csignals_T_14 == 32'h1023; // @[Lookup.scala:31:38]
wire _csignals_T_583 = _csignals_T_15; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_632 = _csignals_T_15; // @[Lookup.scala:31:38, :34:39]
wire [31:0] _GEN_0 = {25'h0, io_dat_dec_inst_0[6:0]}; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_16; // @[Lookup.scala:31:38]
assign _csignals_T_16 = _GEN_0; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_18; // @[Lookup.scala:31:38]
assign _csignals_T_18 = _GEN_0; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_58; // @[Lookup.scala:31:38]
assign _csignals_T_58 = _GEN_0; // @[Lookup.scala:31:38]
wire _csignals_T_17 = _csignals_T_16 == 32'h17; // @[Lookup.scala:31:38]
wire _csignals_T_19 = _csignals_T_18 == 32'h37; // @[Lookup.scala:31:38]
wire _csignals_T_21 = _csignals_T_20 == 32'h13; // @[Lookup.scala:31:38]
wire _csignals_T_23 = _csignals_T_22 == 32'h7013; // @[Lookup.scala:31:38]
wire _csignals_T_25 = _csignals_T_24 == 32'h6013; // @[Lookup.scala:31:38]
wire _csignals_T_27 = _csignals_T_26 == 32'h4013; // @[Lookup.scala:31:38]
wire _csignals_T_29 = _csignals_T_28 == 32'h2013; // @[Lookup.scala:31:38]
wire _csignals_T_31 = _csignals_T_30 == 32'h3013; // @[Lookup.scala:31:38]
wire [31:0] _GEN_1 = io_dat_dec_inst_0 & 32'hFE00707F; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_32; // @[Lookup.scala:31:38]
assign _csignals_T_32 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_34; // @[Lookup.scala:31:38]
assign _csignals_T_34 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_36; // @[Lookup.scala:31:38]
assign _csignals_T_36 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_38; // @[Lookup.scala:31:38]
assign _csignals_T_38 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_40; // @[Lookup.scala:31:38]
assign _csignals_T_40 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_42; // @[Lookup.scala:31:38]
assign _csignals_T_42 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_44; // @[Lookup.scala:31:38]
assign _csignals_T_44 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_46; // @[Lookup.scala:31:38]
assign _csignals_T_46 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_48; // @[Lookup.scala:31:38]
assign _csignals_T_48 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_50; // @[Lookup.scala:31:38]
assign _csignals_T_50 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_52; // @[Lookup.scala:31:38]
assign _csignals_T_52 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_54; // @[Lookup.scala:31:38]
assign _csignals_T_54 = _GEN_1; // @[Lookup.scala:31:38]
wire [31:0] _csignals_T_56; // @[Lookup.scala:31:38]
assign _csignals_T_56 = _GEN_1; // @[Lookup.scala:31:38]
wire _csignals_T_33 = _csignals_T_32 == 32'h1013; // @[Lookup.scala:31:38]
wire _csignals_T_35 = _csignals_T_34 == 32'h40005013; // @[Lookup.scala:31:38]
wire _csignals_T_37 = _csignals_T_36 == 32'h5013; // @[Lookup.scala:31:38]
wire _csignals_T_39 = _csignals_T_38 == 32'h1033; // @[Lookup.scala:31:38]
wire _csignals_T_41 = _csignals_T_40 == 32'h33; // @[Lookup.scala:31:38]
wire _csignals_T_43 = _csignals_T_42 == 32'h40000033; // @[Lookup.scala:31:38]
wire _csignals_T_45 = _csignals_T_44 == 32'h2033; // @[Lookup.scala:31:38]
wire _csignals_T_47 = _csignals_T_46 == 32'h3033; // @[Lookup.scala:31:38]
wire _csignals_T_49 = _csignals_T_48 == 32'h7033; // @[Lookup.scala:31:38]
wire _csignals_T_51 = _csignals_T_50 == 32'h6033; // @[Lookup.scala:31:38]
wire _csignals_T_53 = _csignals_T_52 == 32'h4033; // @[Lookup.scala:31:38]
wire _csignals_T_55 = _csignals_T_54 == 32'h40005033; // @[Lookup.scala:31:38]
wire _csignals_T_57 = _csignals_T_56 == 32'h5033; // @[Lookup.scala:31:38]
wire _csignals_T_59 = _csignals_T_58 == 32'h6F; // @[Lookup.scala:31:38]
wire _csignals_T_61 = _csignals_T_60 == 32'h67; // @[Lookup.scala:31:38]
wire _csignals_T_63 = _csignals_T_62 == 32'h63; // @[Lookup.scala:31:38]
wire _csignals_T_65 = _csignals_T_64 == 32'h1063; // @[Lookup.scala:31:38]
wire _csignals_T_67 = _csignals_T_66 == 32'h5063; // @[Lookup.scala:31:38]
wire _csignals_T_69 = _csignals_T_68 == 32'h7063; // @[Lookup.scala:31:38]
wire _csignals_T_71 = _csignals_T_70 == 32'h4063; // @[Lookup.scala:31:38]
wire _csignals_T_73 = _csignals_T_72 == 32'h6063; // @[Lookup.scala:31:38]
wire _csignals_T_75 = _csignals_T_74 == 32'h5073; // @[Lookup.scala:31:38]
wire _csignals_T_77 = _csignals_T_76 == 32'h6073; // @[Lookup.scala:31:38]
wire _csignals_T_79 = _csignals_T_78 == 32'h1073; // @[Lookup.scala:31:38]
wire _csignals_T_81 = _csignals_T_80 == 32'h2073; // @[Lookup.scala:31:38]
wire _csignals_T_83 = _csignals_T_82 == 32'h3073; // @[Lookup.scala:31:38]
wire _csignals_T_85 = _csignals_T_84 == 32'h7073; // @[Lookup.scala:31:38]
wire _csignals_T_303 = _csignals_T_85; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_352 = _csignals_T_85; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_499 = _csignals_T_85; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_87 = _csignals_T_86 == 32'h73; // @[Lookup.scala:31:38]
wire _csignals_T_89 = _csignals_T_88 == 32'h30200073; // @[Lookup.scala:31:38]
wire _csignals_T_91 = _csignals_T_90 == 32'h7B200073; // @[Lookup.scala:31:38]
wire _csignals_T_93 = _csignals_T_92 == 32'h100073; // @[Lookup.scala:31:38]
wire _csignals_T_95 = _csignals_T_94 == 32'h10500073; // @[Lookup.scala:31:38]
wire _csignals_T_97 = _csignals_T_96 == 32'h100F; // @[Lookup.scala:31:38]
wire _csignals_T_738 = _csignals_T_97; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_99 = _csignals_T_98 == 32'hF; // @[Lookup.scala:31:38]
wire _csignals_T_100 = _csignals_T_99; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_101 = _csignals_T_97 | _csignals_T_100; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_102 = _csignals_T_95 | _csignals_T_101; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_103 = _csignals_T_93 | _csignals_T_102; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_104 = _csignals_T_91 | _csignals_T_103; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_105 = _csignals_T_89 | _csignals_T_104; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_106 = _csignals_T_87 | _csignals_T_105; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_107 = _csignals_T_85 | _csignals_T_106; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_108 = _csignals_T_83 | _csignals_T_107; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_109 = _csignals_T_81 | _csignals_T_108; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_110 = _csignals_T_79 | _csignals_T_109; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_111 = _csignals_T_77 | _csignals_T_110; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_112 = _csignals_T_75 | _csignals_T_111; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_113 = _csignals_T_73 | _csignals_T_112; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_114 = _csignals_T_71 | _csignals_T_113; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_115 = _csignals_T_69 | _csignals_T_114; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_116 = _csignals_T_67 | _csignals_T_115; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_117 = _csignals_T_65 | _csignals_T_116; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_118 = _csignals_T_63 | _csignals_T_117; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_119 = _csignals_T_61 | _csignals_T_118; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_120 = _csignals_T_59 | _csignals_T_119; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_121 = _csignals_T_57 | _csignals_T_120; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_122 = _csignals_T_55 | _csignals_T_121; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_123 = _csignals_T_53 | _csignals_T_122; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_124 = _csignals_T_51 | _csignals_T_123; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_125 = _csignals_T_49 | _csignals_T_124; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_126 = _csignals_T_47 | _csignals_T_125; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_127 = _csignals_T_45 | _csignals_T_126; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_128 = _csignals_T_43 | _csignals_T_127; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_129 = _csignals_T_41 | _csignals_T_128; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_130 = _csignals_T_39 | _csignals_T_129; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_131 = _csignals_T_37 | _csignals_T_130; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_132 = _csignals_T_35 | _csignals_T_131; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_133 = _csignals_T_33 | _csignals_T_132; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_134 = _csignals_T_31 | _csignals_T_133; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_135 = _csignals_T_29 | _csignals_T_134; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_136 = _csignals_T_27 | _csignals_T_135; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_137 = _csignals_T_25 | _csignals_T_136; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_138 = _csignals_T_23 | _csignals_T_137; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_139 = _csignals_T_21 | _csignals_T_138; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_140 = _csignals_T_19 | _csignals_T_139; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_141 = _csignals_T_17 | _csignals_T_140; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_142 = _csignals_T_15 | _csignals_T_141; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_143 = _csignals_T_13 | _csignals_T_142; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_144 = _csignals_T_11 | _csignals_T_143; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_145 = _csignals_T_9 | _csignals_T_144; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_146 = _csignals_T_7 | _csignals_T_145; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_147 = _csignals_T_5 | _csignals_T_146; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_148 = _csignals_T_3 | _csignals_T_147; // @[Lookup.scala:31:38, :34:39]
wire csignals_0 = _csignals_T_1 | _csignals_T_148; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_162 = _csignals_T_73 ? 4'h6 : 4'h0; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_163 = _csignals_T_71 ? 4'h5 : _csignals_T_162; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_164 = _csignals_T_69 ? 4'h4 : _csignals_T_163; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_165 = _csignals_T_67 ? 4'h3 : _csignals_T_164; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_166 = _csignals_T_65 ? 4'h1 : _csignals_T_165; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_167 = _csignals_T_63 ? 4'h2 : _csignals_T_166; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_168 = _csignals_T_61 ? 4'h8 : _csignals_T_167; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_169 = _csignals_T_59 ? 4'h7 : _csignals_T_168; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_170 = _csignals_T_57 ? 4'h0 : _csignals_T_169; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_171 = _csignals_T_55 ? 4'h0 : _csignals_T_170; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_172 = _csignals_T_53 ? 4'h0 : _csignals_T_171; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_173 = _csignals_T_51 ? 4'h0 : _csignals_T_172; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_174 = _csignals_T_49 ? 4'h0 : _csignals_T_173; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_175 = _csignals_T_47 ? 4'h0 : _csignals_T_174; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_176 = _csignals_T_45 ? 4'h0 : _csignals_T_175; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_177 = _csignals_T_43 ? 4'h0 : _csignals_T_176; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_178 = _csignals_T_41 ? 4'h0 : _csignals_T_177; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_179 = _csignals_T_39 ? 4'h0 : _csignals_T_178; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_180 = _csignals_T_37 ? 4'h0 : _csignals_T_179; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_181 = _csignals_T_35 ? 4'h0 : _csignals_T_180; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_182 = _csignals_T_33 ? 4'h0 : _csignals_T_181; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_183 = _csignals_T_31 ? 4'h0 : _csignals_T_182; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_184 = _csignals_T_29 ? 4'h0 : _csignals_T_183; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_185 = _csignals_T_27 ? 4'h0 : _csignals_T_184; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_186 = _csignals_T_25 ? 4'h0 : _csignals_T_185; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_187 = _csignals_T_23 ? 4'h0 : _csignals_T_186; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_188 = _csignals_T_21 ? 4'h0 : _csignals_T_187; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_189 = _csignals_T_19 ? 4'h0 : _csignals_T_188; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_190 = _csignals_T_17 ? 4'h0 : _csignals_T_189; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_191 = _csignals_T_15 ? 4'h0 : _csignals_T_190; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_192 = _csignals_T_13 ? 4'h0 : _csignals_T_191; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_193 = _csignals_T_11 ? 4'h0 : _csignals_T_192; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_194 = _csignals_T_9 ? 4'h0 : _csignals_T_193; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_195 = _csignals_T_7 ? 4'h0 : _csignals_T_194; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_196 = _csignals_T_5 ? 4'h0 : _csignals_T_195; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_197 = _csignals_T_3 ? 4'h0 : _csignals_T_196; // @[Lookup.scala:31:38, :34:39]
assign csignals_1 = _csignals_T_1 ? 4'h0 : _csignals_T_197; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_br_type_0 = csignals_1; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_205 = {_csignals_T_85, 1'h0}; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_206 = _csignals_T_83 ? 2'h0 : _csignals_T_205; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_207 = _csignals_T_81 ? 2'h0 : _csignals_T_206; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_208 = _csignals_T_79 ? 2'h0 : _csignals_T_207; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_209 = _csignals_T_77 ? 2'h2 : _csignals_T_208; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_210 = _csignals_T_75 ? 2'h2 : _csignals_T_209; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_211 = _csignals_T_73 ? 2'h0 : _csignals_T_210; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_212 = _csignals_T_71 ? 2'h0 : _csignals_T_211; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_213 = _csignals_T_69 ? 2'h0 : _csignals_T_212; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_214 = _csignals_T_67 ? 2'h0 : _csignals_T_213; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_215 = _csignals_T_65 ? 2'h0 : _csignals_T_214; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_216 = _csignals_T_63 ? 2'h0 : _csignals_T_215; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_217 = _csignals_T_61 ? 2'h0 : _csignals_T_216; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_218 = _csignals_T_59 ? 2'h0 : _csignals_T_217; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_219 = _csignals_T_57 ? 2'h0 : _csignals_T_218; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_220 = _csignals_T_55 ? 2'h0 : _csignals_T_219; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_221 = _csignals_T_53 ? 2'h0 : _csignals_T_220; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_222 = _csignals_T_51 ? 2'h0 : _csignals_T_221; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_223 = _csignals_T_49 ? 2'h0 : _csignals_T_222; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_224 = _csignals_T_47 ? 2'h0 : _csignals_T_223; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_225 = _csignals_T_45 ? 2'h0 : _csignals_T_224; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_226 = _csignals_T_43 ? 2'h0 : _csignals_T_225; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_227 = _csignals_T_41 ? 2'h0 : _csignals_T_226; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_228 = _csignals_T_39 ? 2'h0 : _csignals_T_227; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_229 = _csignals_T_37 ? 2'h0 : _csignals_T_228; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_230 = _csignals_T_35 ? 2'h0 : _csignals_T_229; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_231 = _csignals_T_33 ? 2'h0 : _csignals_T_230; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_232 = _csignals_T_31 ? 2'h0 : _csignals_T_231; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_233 = _csignals_T_29 ? 2'h0 : _csignals_T_232; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_234 = _csignals_T_27 ? 2'h0 : _csignals_T_233; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_235 = _csignals_T_25 ? 2'h0 : _csignals_T_234; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_236 = _csignals_T_23 ? 2'h0 : _csignals_T_235; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_237 = _csignals_T_21 ? 2'h0 : _csignals_T_236; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_238 = _csignals_T_19 ? 2'h0 : _csignals_T_237; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_239 = _csignals_T_17 ? 2'h1 : _csignals_T_238; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_240 = _csignals_T_15 ? 2'h0 : _csignals_T_239; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_241 = _csignals_T_13 ? 2'h0 : _csignals_T_240; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_242 = _csignals_T_11 ? 2'h0 : _csignals_T_241; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_243 = _csignals_T_9 ? 2'h0 : _csignals_T_242; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_244 = _csignals_T_7 ? 2'h0 : _csignals_T_243; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_245 = _csignals_T_5 ? 2'h0 : _csignals_T_244; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_246 = _csignals_T_3 ? 2'h0 : _csignals_T_245; // @[Lookup.scala:31:38, :34:39]
assign csignals_2 = _csignals_T_1 ? 2'h0 : _csignals_T_246; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_op1_sel_0 = csignals_2; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_260 = _csignals_T_73 ? 3'h3 : 3'h0; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_261 = _csignals_T_71 ? 3'h3 : _csignals_T_260; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_262 = _csignals_T_69 ? 3'h3 : _csignals_T_261; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_263 = _csignals_T_67 ? 3'h3 : _csignals_T_262; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_264 = _csignals_T_65 ? 3'h3 : _csignals_T_263; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_265 = _csignals_T_63 ? 3'h3 : _csignals_T_264; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_266 = _csignals_T_61 ? 3'h1 : _csignals_T_265; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_267 = _csignals_T_59 ? 3'h5 : _csignals_T_266; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_268 = _csignals_T_57 ? 3'h0 : _csignals_T_267; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_269 = _csignals_T_55 ? 3'h0 : _csignals_T_268; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_270 = _csignals_T_53 ? 3'h0 : _csignals_T_269; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_271 = _csignals_T_51 ? 3'h0 : _csignals_T_270; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_272 = _csignals_T_49 ? 3'h0 : _csignals_T_271; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_273 = _csignals_T_47 ? 3'h0 : _csignals_T_272; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_274 = _csignals_T_45 ? 3'h0 : _csignals_T_273; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_275 = _csignals_T_43 ? 3'h0 : _csignals_T_274; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_276 = _csignals_T_41 ? 3'h0 : _csignals_T_275; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_277 = _csignals_T_39 ? 3'h0 : _csignals_T_276; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_278 = _csignals_T_37 ? 3'h1 : _csignals_T_277; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_279 = _csignals_T_35 ? 3'h1 : _csignals_T_278; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_280 = _csignals_T_33 ? 3'h1 : _csignals_T_279; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_281 = _csignals_T_31 ? 3'h1 : _csignals_T_280; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_282 = _csignals_T_29 ? 3'h1 : _csignals_T_281; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_283 = _csignals_T_27 ? 3'h1 : _csignals_T_282; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_284 = _csignals_T_25 ? 3'h1 : _csignals_T_283; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_285 = _csignals_T_23 ? 3'h1 : _csignals_T_284; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_286 = _csignals_T_21 ? 3'h1 : _csignals_T_285; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_287 = _csignals_T_19 ? 3'h4 : _csignals_T_286; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_288 = _csignals_T_17 ? 3'h4 : _csignals_T_287; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_289 = _csignals_T_15 ? 3'h2 : _csignals_T_288; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_290 = _csignals_T_13 ? 3'h2 : _csignals_T_289; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_291 = _csignals_T_11 ? 3'h2 : _csignals_T_290; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_292 = _csignals_T_9 ? 3'h1 : _csignals_T_291; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_293 = _csignals_T_7 ? 3'h1 : _csignals_T_292; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_294 = _csignals_T_5 ? 3'h1 : _csignals_T_293; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_295 = _csignals_T_3 ? 3'h1 : _csignals_T_294; // @[Lookup.scala:31:38, :34:39]
assign csignals_3 = _csignals_T_1 ? 3'h1 : _csignals_T_295; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_op2_sel_0 = csignals_3; // @[Lookup.scala:34:39]
wire _csignals_T_304 = _csignals_T_83 | _csignals_T_303; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_305 = _csignals_T_81 | _csignals_T_304; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_306 = _csignals_T_79 | _csignals_T_305; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_307 = _csignals_T_77 | _csignals_T_306; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_308 = _csignals_T_75 | _csignals_T_307; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_309 = _csignals_T_73 | _csignals_T_308; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_310 = _csignals_T_71 | _csignals_T_309; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_311 = _csignals_T_69 | _csignals_T_310; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_312 = _csignals_T_67 | _csignals_T_311; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_313 = _csignals_T_65 | _csignals_T_312; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_314 = _csignals_T_63 | _csignals_T_313; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_315 = _csignals_T_61 | _csignals_T_314; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_316 = ~_csignals_T_59 & _csignals_T_315; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_317 = _csignals_T_57 | _csignals_T_316; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_318 = _csignals_T_55 | _csignals_T_317; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_319 = _csignals_T_53 | _csignals_T_318; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_320 = _csignals_T_51 | _csignals_T_319; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_321 = _csignals_T_49 | _csignals_T_320; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_322 = _csignals_T_47 | _csignals_T_321; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_323 = _csignals_T_45 | _csignals_T_322; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_324 = _csignals_T_43 | _csignals_T_323; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_325 = _csignals_T_41 | _csignals_T_324; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_326 = _csignals_T_39 | _csignals_T_325; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_327 = _csignals_T_37 | _csignals_T_326; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_328 = _csignals_T_35 | _csignals_T_327; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_329 = _csignals_T_33 | _csignals_T_328; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_330 = _csignals_T_31 | _csignals_T_329; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_331 = _csignals_T_29 | _csignals_T_330; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_332 = _csignals_T_27 | _csignals_T_331; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_333 = _csignals_T_25 | _csignals_T_332; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_334 = _csignals_T_23 | _csignals_T_333; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_335 = _csignals_T_21 | _csignals_T_334; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_336 = ~_csignals_T_19 & _csignals_T_335; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_337 = ~_csignals_T_17 & _csignals_T_336; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_338 = _csignals_T_15 | _csignals_T_337; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_339 = _csignals_T_13 | _csignals_T_338; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_340 = _csignals_T_11 | _csignals_T_339; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_341 = _csignals_T_9 | _csignals_T_340; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_342 = _csignals_T_7 | _csignals_T_341; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_343 = _csignals_T_5 | _csignals_T_342; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_344 = _csignals_T_3 | _csignals_T_343; // @[Lookup.scala:31:38, :34:39]
wire csignals_4 = _csignals_T_1 | _csignals_T_344; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_353 = _csignals_T_83 | _csignals_T_352; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_354 = _csignals_T_81 | _csignals_T_353; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_355 = _csignals_T_79 | _csignals_T_354; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_356 = _csignals_T_77 | _csignals_T_355; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_357 = _csignals_T_75 | _csignals_T_356; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_358 = _csignals_T_73 | _csignals_T_357; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_359 = _csignals_T_71 | _csignals_T_358; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_360 = _csignals_T_69 | _csignals_T_359; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_361 = _csignals_T_67 | _csignals_T_360; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_362 = _csignals_T_65 | _csignals_T_361; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_363 = _csignals_T_63 | _csignals_T_362; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_364 = ~_csignals_T_61 & _csignals_T_363; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_365 = ~_csignals_T_59 & _csignals_T_364; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_366 = _csignals_T_57 | _csignals_T_365; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_367 = _csignals_T_55 | _csignals_T_366; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_368 = _csignals_T_53 | _csignals_T_367; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_369 = _csignals_T_51 | _csignals_T_368; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_370 = _csignals_T_49 | _csignals_T_369; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_371 = _csignals_T_47 | _csignals_T_370; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_372 = _csignals_T_45 | _csignals_T_371; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_373 = _csignals_T_43 | _csignals_T_372; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_374 = _csignals_T_41 | _csignals_T_373; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_375 = _csignals_T_39 | _csignals_T_374; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_376 = ~_csignals_T_37 & _csignals_T_375; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_377 = ~_csignals_T_35 & _csignals_T_376; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_378 = ~_csignals_T_33 & _csignals_T_377; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_379 = ~_csignals_T_31 & _csignals_T_378; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_380 = ~_csignals_T_29 & _csignals_T_379; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_381 = ~_csignals_T_27 & _csignals_T_380; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_382 = ~_csignals_T_25 & _csignals_T_381; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_383 = ~_csignals_T_23 & _csignals_T_382; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_384 = ~_csignals_T_21 & _csignals_T_383; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_385 = ~_csignals_T_19 & _csignals_T_384; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_386 = ~_csignals_T_17 & _csignals_T_385; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_387 = _csignals_T_15 | _csignals_T_386; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_388 = _csignals_T_13 | _csignals_T_387; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_389 = _csignals_T_11 | _csignals_T_388; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_390 = ~_csignals_T_9 & _csignals_T_389; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_391 = ~_csignals_T_7 & _csignals_T_390; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_392 = ~_csignals_T_5 & _csignals_T_391; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_393 = ~_csignals_T_3 & _csignals_T_392; // @[Lookup.scala:31:38, :34:39]
wire csignals_5 = ~_csignals_T_1 & _csignals_T_393; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_401 = _csignals_T_85 ? 4'hA : 4'h0; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_402 = _csignals_T_83 ? 4'hA : _csignals_T_401; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_403 = _csignals_T_81 ? 4'hA : _csignals_T_402; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_404 = _csignals_T_79 ? 4'hA : _csignals_T_403; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_405 = _csignals_T_77 ? 4'hA : _csignals_T_404; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_406 = _csignals_T_75 ? 4'hA : _csignals_T_405; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_407 = _csignals_T_73 ? 4'h0 : _csignals_T_406; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_408 = _csignals_T_71 ? 4'h0 : _csignals_T_407; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_409 = _csignals_T_69 ? 4'h0 : _csignals_T_408; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_410 = _csignals_T_67 ? 4'h0 : _csignals_T_409; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_411 = _csignals_T_65 ? 4'h0 : _csignals_T_410; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_412 = _csignals_T_63 ? 4'h0 : _csignals_T_411; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_413 = _csignals_T_61 ? 4'h0 : _csignals_T_412; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_414 = _csignals_T_59 ? 4'h0 : _csignals_T_413; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_415 = _csignals_T_57 ? 4'h3 : _csignals_T_414; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_416 = _csignals_T_55 ? 4'h4 : _csignals_T_415; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_417 = _csignals_T_53 ? 4'h7 : _csignals_T_416; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_418 = _csignals_T_51 ? 4'h6 : _csignals_T_417; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_419 = _csignals_T_49 ? 4'h5 : _csignals_T_418; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_420 = _csignals_T_47 ? 4'h9 : _csignals_T_419; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_421 = _csignals_T_45 ? 4'h8 : _csignals_T_420; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_422 = _csignals_T_43 ? 4'h1 : _csignals_T_421; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_423 = _csignals_T_41 ? 4'h0 : _csignals_T_422; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_424 = _csignals_T_39 ? 4'h2 : _csignals_T_423; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_425 = _csignals_T_37 ? 4'h3 : _csignals_T_424; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_426 = _csignals_T_35 ? 4'h4 : _csignals_T_425; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_427 = _csignals_T_33 ? 4'h2 : _csignals_T_426; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_428 = _csignals_T_31 ? 4'h9 : _csignals_T_427; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_429 = _csignals_T_29 ? 4'h8 : _csignals_T_428; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_430 = _csignals_T_27 ? 4'h7 : _csignals_T_429; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_431 = _csignals_T_25 ? 4'h6 : _csignals_T_430; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_432 = _csignals_T_23 ? 4'h5 : _csignals_T_431; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_433 = _csignals_T_21 ? 4'h0 : _csignals_T_432; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_434 = _csignals_T_19 ? 4'hB : _csignals_T_433; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_435 = _csignals_T_17 ? 4'h0 : _csignals_T_434; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_436 = _csignals_T_15 ? 4'h0 : _csignals_T_435; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_437 = _csignals_T_13 ? 4'h0 : _csignals_T_436; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_438 = _csignals_T_11 ? 4'h0 : _csignals_T_437; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_439 = _csignals_T_9 ? 4'h0 : _csignals_T_438; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_440 = _csignals_T_7 ? 4'h0 : _csignals_T_439; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_441 = _csignals_T_5 ? 4'h0 : _csignals_T_440; // @[Lookup.scala:31:38, :34:39]
wire [3:0] _csignals_T_442 = _csignals_T_3 ? 4'h0 : _csignals_T_441; // @[Lookup.scala:31:38, :34:39]
assign csignals_6 = _csignals_T_1 ? 4'h0 : _csignals_T_442; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_alu_fun_0 = csignals_6; // @[Lookup.scala:34:39]
wire [1:0] _csignals_T_450 = {2{_csignals_T_85}}; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_451 = _csignals_T_83 ? 2'h3 : _csignals_T_450; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_452 = _csignals_T_81 ? 2'h3 : _csignals_T_451; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_453 = _csignals_T_79 ? 2'h3 : _csignals_T_452; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_454 = _csignals_T_77 ? 2'h3 : _csignals_T_453; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_455 = _csignals_T_75 ? 2'h3 : _csignals_T_454; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_456 = _csignals_T_73 ? 2'h0 : _csignals_T_455; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_457 = _csignals_T_71 ? 2'h0 : _csignals_T_456; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_458 = _csignals_T_69 ? 2'h0 : _csignals_T_457; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_459 = _csignals_T_67 ? 2'h0 : _csignals_T_458; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_460 = _csignals_T_65 ? 2'h0 : _csignals_T_459; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_461 = _csignals_T_63 ? 2'h0 : _csignals_T_460; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_462 = _csignals_T_61 ? 2'h2 : _csignals_T_461; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_463 = _csignals_T_59 ? 2'h2 : _csignals_T_462; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_464 = _csignals_T_57 ? 2'h0 : _csignals_T_463; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_465 = _csignals_T_55 ? 2'h0 : _csignals_T_464; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_466 = _csignals_T_53 ? 2'h0 : _csignals_T_465; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_467 = _csignals_T_51 ? 2'h0 : _csignals_T_466; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_468 = _csignals_T_49 ? 2'h0 : _csignals_T_467; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_469 = _csignals_T_47 ? 2'h0 : _csignals_T_468; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_470 = _csignals_T_45 ? 2'h0 : _csignals_T_469; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_471 = _csignals_T_43 ? 2'h0 : _csignals_T_470; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_472 = _csignals_T_41 ? 2'h0 : _csignals_T_471; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_473 = _csignals_T_39 ? 2'h0 : _csignals_T_472; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_474 = _csignals_T_37 ? 2'h0 : _csignals_T_473; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_475 = _csignals_T_35 ? 2'h0 : _csignals_T_474; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_476 = _csignals_T_33 ? 2'h0 : _csignals_T_475; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_477 = _csignals_T_31 ? 2'h0 : _csignals_T_476; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_478 = _csignals_T_29 ? 2'h0 : _csignals_T_477; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_479 = _csignals_T_27 ? 2'h0 : _csignals_T_478; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_480 = _csignals_T_25 ? 2'h0 : _csignals_T_479; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_481 = _csignals_T_23 ? 2'h0 : _csignals_T_480; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_482 = _csignals_T_21 ? 2'h0 : _csignals_T_481; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_483 = _csignals_T_19 ? 2'h0 : _csignals_T_482; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_484 = _csignals_T_17 ? 2'h0 : _csignals_T_483; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_485 = _csignals_T_15 ? 2'h0 : _csignals_T_484; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_486 = _csignals_T_13 ? 2'h0 : _csignals_T_485; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_487 = _csignals_T_11 ? 2'h0 : _csignals_T_486; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_488 = _csignals_T_9 ? 2'h1 : _csignals_T_487; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_489 = _csignals_T_7 ? 2'h1 : _csignals_T_488; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_490 = _csignals_T_5 ? 2'h1 : _csignals_T_489; // @[Lookup.scala:31:38, :34:39]
wire [1:0] _csignals_T_491 = _csignals_T_3 ? 2'h1 : _csignals_T_490; // @[Lookup.scala:31:38, :34:39]
assign csignals_7 = _csignals_T_1 ? 2'h1 : _csignals_T_491; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_wb_sel_0 = csignals_7; // @[Lookup.scala:34:39]
wire _csignals_T_500 = _csignals_T_83 | _csignals_T_499; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_501 = _csignals_T_81 | _csignals_T_500; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_502 = _csignals_T_79 | _csignals_T_501; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_503 = _csignals_T_77 | _csignals_T_502; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_504 = _csignals_T_75 | _csignals_T_503; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_505 = ~_csignals_T_73 & _csignals_T_504; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_506 = ~_csignals_T_71 & _csignals_T_505; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_507 = ~_csignals_T_69 & _csignals_T_506; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_508 = ~_csignals_T_67 & _csignals_T_507; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_509 = ~_csignals_T_65 & _csignals_T_508; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_510 = ~_csignals_T_63 & _csignals_T_509; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_511 = _csignals_T_61 | _csignals_T_510; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_512 = _csignals_T_59 | _csignals_T_511; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_513 = _csignals_T_57 | _csignals_T_512; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_514 = _csignals_T_55 | _csignals_T_513; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_515 = _csignals_T_53 | _csignals_T_514; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_516 = _csignals_T_51 | _csignals_T_515; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_517 = _csignals_T_49 | _csignals_T_516; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_518 = _csignals_T_47 | _csignals_T_517; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_519 = _csignals_T_45 | _csignals_T_518; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_520 = _csignals_T_43 | _csignals_T_519; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_521 = _csignals_T_41 | _csignals_T_520; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_522 = _csignals_T_39 | _csignals_T_521; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_523 = _csignals_T_37 | _csignals_T_522; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_524 = _csignals_T_35 | _csignals_T_523; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_525 = _csignals_T_33 | _csignals_T_524; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_526 = _csignals_T_31 | _csignals_T_525; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_527 = _csignals_T_29 | _csignals_T_526; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_528 = _csignals_T_27 | _csignals_T_527; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_529 = _csignals_T_25 | _csignals_T_528; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_530 = _csignals_T_23 | _csignals_T_529; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_531 = _csignals_T_21 | _csignals_T_530; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_532 = _csignals_T_19 | _csignals_T_531; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_533 = _csignals_T_17 | _csignals_T_532; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_534 = ~_csignals_T_15 & _csignals_T_533; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_535 = ~_csignals_T_13 & _csignals_T_534; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_536 = ~_csignals_T_11 & _csignals_T_535; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_537 = _csignals_T_9 | _csignals_T_536; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_538 = _csignals_T_7 | _csignals_T_537; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_539 = _csignals_T_5 | _csignals_T_538; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_540 = _csignals_T_3 | _csignals_T_539; // @[Lookup.scala:31:38, :34:39]
assign csignals_8 = _csignals_T_1 | _csignals_T_540; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_rf_wen_0 = csignals_8; // @[Lookup.scala:34:39]
wire _csignals_T_584 = _csignals_T_13 | _csignals_T_583; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_585 = _csignals_T_11 | _csignals_T_584; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_586 = _csignals_T_9 | _csignals_T_585; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_587 = _csignals_T_7 | _csignals_T_586; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_588 = _csignals_T_5 | _csignals_T_587; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_589 = _csignals_T_3 | _csignals_T_588; // @[Lookup.scala:31:38, :34:39]
assign csignals_9 = _csignals_T_1 | _csignals_T_589; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_mem_val_0 = csignals_9; // @[Lookup.scala:34:39]
wire _csignals_T_633 = _csignals_T_13 | _csignals_T_632; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_634 = _csignals_T_11 | _csignals_T_633; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_635 = ~_csignals_T_9 & _csignals_T_634; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_636 = ~_csignals_T_7 & _csignals_T_635; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_637 = ~_csignals_T_5 & _csignals_T_636; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_638 = ~_csignals_T_3 & _csignals_T_637; // @[Lookup.scala:31:38, :34:39]
wire csignals_10 = ~_csignals_T_1 & _csignals_T_638; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_681 = {1'h0, _csignals_T_15, 1'h0}; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_682 = _csignals_T_13 ? 3'h1 : _csignals_T_681; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_683 = _csignals_T_11 ? 3'h3 : _csignals_T_682; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_684 = _csignals_T_9 ? 3'h6 : _csignals_T_683; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_685 = _csignals_T_7 ? 3'h2 : _csignals_T_684; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_686 = _csignals_T_5 ? 3'h5 : _csignals_T_685; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_687 = _csignals_T_3 ? 3'h1 : _csignals_T_686; // @[Lookup.scala:31:38, :34:39]
assign csignals_11 = _csignals_T_1 ? 3'h3 : _csignals_T_687; // @[Lookup.scala:31:38, :34:39]
assign io_ctl_mem_typ_0 = csignals_11; // @[Lookup.scala:34:39]
wire [2:0] _csignals_T_691 = {_csignals_T_93, 2'h0}; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_692 = _csignals_T_91 ? 3'h4 : _csignals_T_691; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_693 = _csignals_T_89 ? 3'h4 : _csignals_T_692; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_694 = _csignals_T_87 ? 3'h4 : _csignals_T_693; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_695 = _csignals_T_85 ? 3'h7 : _csignals_T_694; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_696 = _csignals_T_83 ? 3'h7 : _csignals_T_695; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_697 = _csignals_T_81 ? 3'h6 : _csignals_T_696; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_698 = _csignals_T_79 ? 3'h5 : _csignals_T_697; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_699 = _csignals_T_77 ? 3'h6 : _csignals_T_698; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_700 = _csignals_T_75 ? 3'h5 : _csignals_T_699; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_701 = _csignals_T_73 ? 3'h0 : _csignals_T_700; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_702 = _csignals_T_71 ? 3'h0 : _csignals_T_701; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_703 = _csignals_T_69 ? 3'h0 : _csignals_T_702; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_704 = _csignals_T_67 ? 3'h0 : _csignals_T_703; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_705 = _csignals_T_65 ? 3'h0 : _csignals_T_704; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_706 = _csignals_T_63 ? 3'h0 : _csignals_T_705; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_707 = _csignals_T_61 ? 3'h0 : _csignals_T_706; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_708 = _csignals_T_59 ? 3'h0 : _csignals_T_707; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_709 = _csignals_T_57 ? 3'h0 : _csignals_T_708; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_710 = _csignals_T_55 ? 3'h0 : _csignals_T_709; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_711 = _csignals_T_53 ? 3'h0 : _csignals_T_710; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_712 = _csignals_T_51 ? 3'h0 : _csignals_T_711; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_713 = _csignals_T_49 ? 3'h0 : _csignals_T_712; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_714 = _csignals_T_47 ? 3'h0 : _csignals_T_713; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_715 = _csignals_T_45 ? 3'h0 : _csignals_T_714; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_716 = _csignals_T_43 ? 3'h0 : _csignals_T_715; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_717 = _csignals_T_41 ? 3'h0 : _csignals_T_716; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_718 = _csignals_T_39 ? 3'h0 : _csignals_T_717; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_719 = _csignals_T_37 ? 3'h0 : _csignals_T_718; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_720 = _csignals_T_35 ? 3'h0 : _csignals_T_719; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_721 = _csignals_T_33 ? 3'h0 : _csignals_T_720; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_722 = _csignals_T_31 ? 3'h0 : _csignals_T_721; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_723 = _csignals_T_29 ? 3'h0 : _csignals_T_722; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_724 = _csignals_T_27 ? 3'h0 : _csignals_T_723; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_725 = _csignals_T_25 ? 3'h0 : _csignals_T_724; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_726 = _csignals_T_23 ? 3'h0 : _csignals_T_725; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_727 = _csignals_T_21 ? 3'h0 : _csignals_T_726; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_728 = _csignals_T_19 ? 3'h0 : _csignals_T_727; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_729 = _csignals_T_17 ? 3'h0 : _csignals_T_728; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_730 = _csignals_T_15 ? 3'h0 : _csignals_T_729; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_731 = _csignals_T_13 ? 3'h0 : _csignals_T_730; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_732 = _csignals_T_11 ? 3'h0 : _csignals_T_731; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_733 = _csignals_T_9 ? 3'h0 : _csignals_T_732; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_734 = _csignals_T_7 ? 3'h0 : _csignals_T_733; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_735 = _csignals_T_5 ? 3'h0 : _csignals_T_734; // @[Lookup.scala:31:38, :34:39]
wire [2:0] _csignals_T_736 = _csignals_T_3 ? 3'h0 : _csignals_T_735; // @[Lookup.scala:31:38, :34:39]
wire [2:0] csignals_12 = _csignals_T_1 ? 3'h0 : _csignals_T_736; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_739 = ~_csignals_T_95 & _csignals_T_738; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_740 = ~_csignals_T_93 & _csignals_T_739; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_741 = ~_csignals_T_91 & _csignals_T_740; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_742 = ~_csignals_T_89 & _csignals_T_741; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_743 = ~_csignals_T_87 & _csignals_T_742; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_744 = ~_csignals_T_85 & _csignals_T_743; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_745 = ~_csignals_T_83 & _csignals_T_744; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_746 = ~_csignals_T_81 & _csignals_T_745; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_747 = ~_csignals_T_79 & _csignals_T_746; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_748 = ~_csignals_T_77 & _csignals_T_747; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_749 = ~_csignals_T_75 & _csignals_T_748; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_750 = ~_csignals_T_73 & _csignals_T_749; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_751 = ~_csignals_T_71 & _csignals_T_750; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_752 = ~_csignals_T_69 & _csignals_T_751; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_753 = ~_csignals_T_67 & _csignals_T_752; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_754 = ~_csignals_T_65 & _csignals_T_753; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_755 = ~_csignals_T_63 & _csignals_T_754; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_756 = ~_csignals_T_61 & _csignals_T_755; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_757 = ~_csignals_T_59 & _csignals_T_756; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_758 = ~_csignals_T_57 & _csignals_T_757; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_759 = ~_csignals_T_55 & _csignals_T_758; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_760 = ~_csignals_T_53 & _csignals_T_759; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_761 = ~_csignals_T_51 & _csignals_T_760; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_762 = ~_csignals_T_49 & _csignals_T_761; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_763 = ~_csignals_T_47 & _csignals_T_762; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_764 = ~_csignals_T_45 & _csignals_T_763; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_765 = ~_csignals_T_43 & _csignals_T_764; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_766 = ~_csignals_T_41 & _csignals_T_765; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_767 = ~_csignals_T_39 & _csignals_T_766; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_768 = ~_csignals_T_37 & _csignals_T_767; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_769 = ~_csignals_T_35 & _csignals_T_768; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_770 = ~_csignals_T_33 & _csignals_T_769; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_771 = ~_csignals_T_31 & _csignals_T_770; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_772 = ~_csignals_T_29 & _csignals_T_771; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_773 = ~_csignals_T_27 & _csignals_T_772; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_774 = ~_csignals_T_25 & _csignals_T_773; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_775 = ~_csignals_T_23 & _csignals_T_774; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_776 = ~_csignals_T_21 & _csignals_T_775; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_777 = ~_csignals_T_19 & _csignals_T_776; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_778 = ~_csignals_T_17 & _csignals_T_777; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_779 = ~_csignals_T_15 & _csignals_T_778; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_780 = ~_csignals_T_13 & _csignals_T_779; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_781 = ~_csignals_T_11 & _csignals_T_780; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_782 = ~_csignals_T_9 & _csignals_T_781; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_783 = ~_csignals_T_7 & _csignals_T_782; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_784 = ~_csignals_T_5 & _csignals_T_783; // @[Lookup.scala:31:38, :34:39]
wire _csignals_T_785 = ~_csignals_T_3 & _csignals_T_784; // @[Lookup.scala:31:38, :34:39]
wire csignals_13 = ~_csignals_T_1 & _csignals_T_785; // @[Lookup.scala:31:38, :34:39]
wire _ctrl_exe_pc_sel_T = io_dat_exe_br_type_0 == 4'h0; // @[cpath.scala:58:7, :136:49]
wire _ctrl_exe_pc_sel_T_1 = io_dat_exe_br_type_0 == 4'h1; // @[cpath.scala:58:7, :137:49]
wire _ctrl_exe_pc_sel_T_2 = ~io_dat_exe_br_eq_0; // @[cpath.scala:58:7, :137:65]
wire [1:0] _ctrl_exe_pc_sel_T_3 = {1'h0, _ctrl_exe_pc_sel_T_2}; // @[cpath.scala:137:{64,65}]
wire _ctrl_exe_pc_sel_T_4 = io_dat_exe_br_type_0 == 4'h2; // @[cpath.scala:58:7, :138:49]
wire [1:0] _ctrl_exe_pc_sel_T_5 = {1'h0, io_dat_exe_br_eq_0}; // @[cpath.scala:58:7, :138:64]
wire _ctrl_exe_pc_sel_T_6 = io_dat_exe_br_type_0 == 4'h3; // @[cpath.scala:58:7, :139:49]
wire _ctrl_exe_pc_sel_T_7 = ~io_dat_exe_br_lt_0; // @[cpath.scala:58:7, :139:65]
wire [1:0] _ctrl_exe_pc_sel_T_8 = {1'h0, _ctrl_exe_pc_sel_T_7}; // @[cpath.scala:139:{64,65}]
wire _ctrl_exe_pc_sel_T_9 = io_dat_exe_br_type_0 == 4'h4; // @[cpath.scala:58:7, :140:49]
wire _ctrl_exe_pc_sel_T_10 = ~io_dat_exe_br_ltu_0; // @[cpath.scala:58:7, :140:65]
wire [1:0] _ctrl_exe_pc_sel_T_11 = {1'h0, _ctrl_exe_pc_sel_T_10}; // @[cpath.scala:140:{64,65}]
wire _ctrl_exe_pc_sel_T_12 = io_dat_exe_br_type_0 == 4'h5; // @[cpath.scala:58:7, :141:49]
wire [1:0] _ctrl_exe_pc_sel_T_13 = {1'h0, io_dat_exe_br_lt_0}; // @[cpath.scala:58:7, :141:64]
wire _ctrl_exe_pc_sel_T_14 = io_dat_exe_br_type_0 == 4'h6; // @[cpath.scala:58:7, :142:49]
wire [1:0] _ctrl_exe_pc_sel_T_15 = {1'h0, io_dat_exe_br_ltu_0}; // @[cpath.scala:58:7, :142:64]
wire _ctrl_exe_pc_sel_T_16 = io_dat_exe_br_type_0 == 4'h7; // @[cpath.scala:58:7, :143:49]
wire _ctrl_exe_pc_sel_T_17 = io_dat_exe_br_type_0 == 4'h8; // @[cpath.scala:58:7, :144:49]
wire [1:0] _ctrl_exe_pc_sel_T_18 = {_ctrl_exe_pc_sel_T_17, 1'h0}; // @[cpath.scala:144:{29,49}]
wire [1:0] _ctrl_exe_pc_sel_T_19 = _ctrl_exe_pc_sel_T_16 ? 2'h1 : _ctrl_exe_pc_sel_T_18; // @[cpath.scala:143:{29,49}, :144:29]
wire [1:0] _ctrl_exe_pc_sel_T_20 = _ctrl_exe_pc_sel_T_14 ? _ctrl_exe_pc_sel_T_15 : _ctrl_exe_pc_sel_T_19; // @[cpath.scala:142:{29,49,64}, :143:29]
wire [1:0] _ctrl_exe_pc_sel_T_21 = _ctrl_exe_pc_sel_T_12 ? _ctrl_exe_pc_sel_T_13 : _ctrl_exe_pc_sel_T_20; // @[cpath.scala:141:{29,49,64}, :142:29]
wire [1:0] _ctrl_exe_pc_sel_T_22 = _ctrl_exe_pc_sel_T_9 ? _ctrl_exe_pc_sel_T_11 : _ctrl_exe_pc_sel_T_21; // @[cpath.scala:140:{29,49,64}, :141:29]
wire [1:0] _ctrl_exe_pc_sel_T_23 = _ctrl_exe_pc_sel_T_6 ? _ctrl_exe_pc_sel_T_8 : _ctrl_exe_pc_sel_T_22; // @[cpath.scala:139:{29,49,64}, :140:29]
wire [1:0] _ctrl_exe_pc_sel_T_24 = _ctrl_exe_pc_sel_T_4 ? _ctrl_exe_pc_sel_T_5 : _ctrl_exe_pc_sel_T_23; // @[cpath.scala:138:{29,49,64}, :139:29]
wire [1:0] _ctrl_exe_pc_sel_T_25 = _ctrl_exe_pc_sel_T_1 ? _ctrl_exe_pc_sel_T_3 : _ctrl_exe_pc_sel_T_24; // @[cpath.scala:137:{29,49,64}, :138:29]
wire [1:0] _ctrl_exe_pc_sel_T_26 = _ctrl_exe_pc_sel_T ? 2'h0 : _ctrl_exe_pc_sel_T_25; // @[cpath.scala:136:{29,49}, :137:29]
assign ctrl_exe_pc_sel = io_ctl_pipeline_kill_0 ? 2'h3 : _ctrl_exe_pc_sel_T_26; // @[cpath.scala:58:7, :135:29, :136:29]
assign io_ctl_exe_pc_sel_0 = ctrl_exe_pc_sel; // @[cpath.scala:58:7, :135:29]
wire _ifkill_T = |ctrl_exe_pc_sel; // @[cpath.scala:135:29, :148:35]
wire _ifkill_T_1 = _ifkill_T | csignals_13; // @[Lookup.scala:34:39]
reg ifkill_REG; // @[cpath.scala:148:68]
assign ifkill = _ifkill_T_1 | ifkill_REG; // @[cpath.scala:148:{45,58,68}]
assign io_ctl_if_kill_0 = ifkill; // @[cpath.scala:58:7, :148:58]
assign deckill = |ctrl_exe_pc_sel; // @[cpath.scala:135:29, :148:35, :149:35]
assign io_ctl_dec_kill_0 = deckill; // @[cpath.scala:58:7, :149:35]
wire _io_ctl_pipeline_kill_T = io_dat_csr_eret_0 | io_ctl_mem_exception_0; // @[cpath.scala:58:7, :153:45]
assign _io_ctl_pipeline_kill_T_1 = _io_ctl_pipeline_kill_T | io_dat_csr_interrupt_0; // @[cpath.scala:58:7, :153:{45,69}]
assign io_ctl_pipeline_kill_0 = _io_ctl_pipeline_kill_T_1; // @[cpath.scala:58:7, :153:69]
wire _dec_illegal_T = ~csignals_0; // @[Lookup.scala:34:39]
wire dec_illegal = _dec_illegal_T & io_dat_dec_valid_0; // @[cpath.scala:58:7, :155:{23,36}]
wire _stall_T_11; // @[cpath.scala:230:117]
assign io_ctl_dec_stall_0 = stall; // @[cpath.scala:58:7, :158:22]
wire [4:0] dec_rs1_addr = io_dat_dec_inst_0[19:15]; // @[cpath.scala:58:7, :160:38]
wire [4:0] rs1_addr = io_dat_dec_inst_0[19:15]; // @[cpath.scala:58:7, :160:38, :278:34]
wire [4:0] dec_rs2_addr = io_dat_dec_inst_0[24:20]; // @[cpath.scala:58:7, :161:38]
wire [4:0] dec_wbaddr = io_dat_dec_inst_0[11:7]; // @[cpath.scala:58:7, :162:38]
wire dec_rs1_oen = ~deckill & csignals_4; // @[Lookup.scala:34:39]
wire dec_rs2_oen = ~deckill & csignals_5; // @[Lookup.scala:34:39]
reg [4:0] exe_reg_wbaddr; // @[cpath.scala:166:33]
reg [4:0] mem_reg_wbaddr; // @[cpath.scala:167:33]
reg [4:0] wb_reg_wbaddr; // @[cpath.scala:168:33]
reg exe_reg_ctrl_rf_wen; // @[cpath.scala:169:37]
reg mem_reg_ctrl_rf_wen; // @[cpath.scala:170:37]
reg wb_reg_ctrl_rf_wen; // @[cpath.scala:171:37]
reg exe_reg_illegal; // @[cpath.scala:172:37]
reg exe_reg_is_csr; // @[cpath.scala:174:32]
wire _full_stall_T_3; // @[cpath.scala:250:18]
assign io_ctl_full_stall_0 = full_stall; // @[cpath.scala:58:7, :177:25]
wire _exe_reg_is_csr_T = |csignals_12; // @[Lookup.scala:34:39]
wire _exe_reg_is_csr_T_1 = csignals_12 != 3'h4; // @[Lookup.scala:34:39]
wire _exe_reg_is_csr_T_2 = _exe_reg_is_csr_T & _exe_reg_is_csr_T_1; // @[cpath.scala:191:{44,54,68}]
reg exe_inst_is_load; // @[cpath.scala:210:34]
wire _exe_inst_is_load_T = ~csignals_10; // @[Lookup.scala:34:39]
wire _exe_inst_is_load_T_1 = csignals_9 & _exe_inst_is_load_T; // @[Lookup.scala:34:39]
wire _stall_T = exe_reg_wbaddr == dec_rs1_addr; // @[cpath.scala:160:38, :166:33, :229:55]
wire _stall_T_1 = exe_inst_is_load & _stall_T; // @[cpath.scala:210:34, :229:{36,55}]
wire _stall_T_2 = |exe_reg_wbaddr; // @[cpath.scala:166:33, :229:92]
wire _stall_T_3 = _stall_T_1 & _stall_T_2; // @[cpath.scala:229:{36,73,92}]
wire _stall_T_4 = _stall_T_3 & dec_rs1_oen; // @[cpath.scala:163:26, :229:{73,101}]
wire _stall_T_5 = exe_reg_wbaddr == dec_rs2_addr; // @[cpath.scala:161:38, :166:33, :230:55]
wire _stall_T_6 = exe_inst_is_load & _stall_T_5; // @[cpath.scala:210:34, :230:{36,55}]
wire _stall_T_7 = |exe_reg_wbaddr; // @[cpath.scala:166:33, :229:92, :230:92]
wire _stall_T_8 = _stall_T_6 & _stall_T_7; // @[cpath.scala:230:{36,73,92}]
wire _stall_T_9 = _stall_T_8 & dec_rs2_oen; // @[cpath.scala:164:26, :230:{73,101}]
wire _stall_T_10 = _stall_T_4 | _stall_T_9; // @[cpath.scala:229:{101,117}, :230:101]
assign _stall_T_11 = _stall_T_10 | exe_reg_is_csr; // @[cpath.scala:174:32, :229:117, :230:117]
assign stall = _stall_T_11; // @[cpath.scala:158:22, :230:117]
wire _full_stall_T = io_dat_mem_ctrl_dmem_val_0 & io_dmem_resp_valid_0; // @[cpath.scala:58:7, :250:30]
wire _full_stall_T_1 = ~io_dat_mem_ctrl_dmem_val_0; // @[cpath.scala:58:7, :250:56]
wire _full_stall_T_2 = _full_stall_T | _full_stall_T_1; // @[cpath.scala:250:{30,53,56}]
assign _full_stall_T_3 = ~_full_stall_T_2; // @[cpath.scala:250:{18,53}]
assign full_stall = _full_stall_T_3; // @[cpath.scala:177:25, :250:18]
reg io_ctl_fencei_REG; // @[cpath.scala:267:45]
assign _io_ctl_fencei_T = csignals_13 | io_ctl_fencei_REG; // @[Lookup.scala:34:39]
assign io_ctl_fencei_0 = _io_ctl_fencei_T; // @[cpath.scala:58:7, :267:35]
wire _io_ctl_mem_exception_T = exe_reg_illegal | io_dat_exe_inst_misaligned_0; // @[cpath.scala:58:7, :172:37, :270:53]
wire _io_ctl_mem_exception_T_1 = ~io_dat_csr_eret_0; // @[cpath.scala:58:7, :270:87]
wire _io_ctl_mem_exception_T_2 = _io_ctl_mem_exception_T & _io_ctl_mem_exception_T_1; // @[cpath.scala:270:{53,84,87}]
reg io_ctl_mem_exception_REG; // @[cpath.scala:270:35]
assign _io_ctl_mem_exception_T_3 = io_ctl_mem_exception_REG | io_dat_mem_data_misaligned_0; // @[cpath.scala:58:7, :270:{35,105}]
assign io_ctl_mem_exception_0 = _io_ctl_mem_exception_T_3; // @[cpath.scala:58:7, :270:105]
reg io_ctl_mem_exception_cause_REG; // @[cpath.scala:271:45]
reg io_ctl_mem_exception_cause_REG_1; // @[cpath.scala:272:45]
wire [2:0] _io_ctl_mem_exception_cause_T = {1'h1, io_dat_mem_store_0, 1'h0}; // @[cpath.scala:58:7, :273:37]
wire [2:0] _io_ctl_mem_exception_cause_T_1 = io_ctl_mem_exception_cause_REG_1 ? 3'h0 : _io_ctl_mem_exception_cause_T; // @[cpath.scala:272:{37,45}, :273:37]
wire [2:0] _io_ctl_mem_exception_cause_T_2 = io_ctl_mem_exception_cause_REG ? 3'h2 : _io_ctl_mem_exception_cause_T_1; // @[cpath.scala:271:{37,45}, :272:37]
assign io_ctl_mem_exception_cause_0 = {29'h0, _io_ctl_mem_exception_cause_T_2}; // @[cpath.scala:58:7, :271:{31,37}]
wire _csr_ren_T = csignals_12 == 3'h6; // @[Lookup.scala:34:39]
wire _csr_ren_T_1 = &csignals_12; // @[Lookup.scala:34:39]
wire _csr_ren_T_2 = _csr_ren_T | _csr_ren_T_1; // @[cpath.scala:279:{30,40,54}]
wire _csr_ren_T_3 = rs1_addr == 5'h0; // @[cpath.scala:278:34, :279:77]
wire csr_ren = _csr_ren_T_2 & _csr_ren_T_3; // @[cpath.scala:279:{40,65,77}]
assign _io_ctl_csr_cmd_T = csr_ren ? 3'h2 : csignals_12; // @[Lookup.scala:34:39]
assign io_ctl_csr_cmd_0 = _io_ctl_csr_cmd_T; // @[cpath.scala:58:7, :280:25]
assign io_ctl_mem_fcn_0 = {1'h0, csignals_10}; // @[Lookup.scala:34:39]
wire _T_2 = ~stall & ~full_stall; // @[cpath.scala:158:22, :177:25, :178:{10,17,20}]
wire _T_4 = stall & ~full_stall; // @[cpath.scala:158:22, :177:25, :178:20, :195:21]
always @(posedge clock) begin // @[cpath.scala:58:7]
ifkill_REG <= csignals_13; // @[Lookup.scala:34:39]
if (_T_2) // @[cpath.scala:178:17]
exe_reg_wbaddr <= deckill ? 5'h0 : dec_wbaddr; // @[cpath.scala:149:35, :162:38, :166:33, :181:7, :182:30, :189:30]
else if (_T_4) // @[cpath.scala:195:21]
exe_reg_wbaddr <= 5'h0; // @[cpath.scala:166:33]
if (full_stall) begin // @[cpath.scala:177:25]
end
else begin // @[cpath.scala:177:25]
mem_reg_wbaddr <= exe_reg_wbaddr; // @[cpath.scala:166:33, :167:33]
wb_reg_wbaddr <= mem_reg_wbaddr; // @[cpath.scala:167:33, :168:33]
end
io_ctl_fencei_REG <= csignals_13; // @[Lookup.scala:34:39]
io_ctl_mem_exception_REG <= _io_ctl_mem_exception_T_2; // @[cpath.scala:270:{35,84}]
io_ctl_mem_exception_cause_REG <= exe_reg_illegal; // @[cpath.scala:172:37, :271:45]
io_ctl_mem_exception_cause_REG_1 <= io_dat_exe_inst_misaligned_0; // @[cpath.scala:58:7, :272:45]
if (reset) begin // @[cpath.scala:58:7]
exe_reg_ctrl_rf_wen <= 1'h0; // @[cpath.scala:169:37]
mem_reg_ctrl_rf_wen <= 1'h0; // @[cpath.scala:170:37]
wb_reg_ctrl_rf_wen <= 1'h0; // @[cpath.scala:171:37]
exe_reg_illegal <= 1'h0; // @[cpath.scala:172:37]
exe_reg_is_csr <= 1'h0; // @[cpath.scala:174:32]
exe_inst_is_load <= 1'h0; // @[cpath.scala:210:34]
end
else begin // @[cpath.scala:58:7]
exe_reg_ctrl_rf_wen <= _T_2 ? ~deckill & csignals_8 : ~_T_4 & exe_reg_ctrl_rf_wen; // @[Lookup.scala:34:39]
if (full_stall) begin // @[cpath.scala:177:25]
end
else begin // @[cpath.scala:177:25]
mem_reg_ctrl_rf_wen <= exe_reg_ctrl_rf_wen; // @[cpath.scala:169:37, :170:37]
wb_reg_ctrl_rf_wen <= mem_reg_ctrl_rf_wen; // @[cpath.scala:170:37, :171:37]
exe_inst_is_load <= _exe_inst_is_load_T_1; // @[cpath.scala:210:34, :214:37]
end
exe_reg_illegal <= ~io_dat_csr_eret_0 & (_T_2 ? ~deckill & dec_illegal : ~_T_4 & exe_reg_illegal); // @[cpath.scala:58:7, :149:35, :155:36, :163:26, :169:37, :172:37, :178:17, :179:4, :181:7, :185:30, :192:30, :195:21, :196:4, :199:27, :201:27, :219:4, :220:26]
exe_reg_is_csr <= _T_2 ? ~deckill & _exe_reg_is_csr_T_2 : ~_T_4 & exe_reg_is_csr; // @[cpath.scala:149:35, :163:26, :169:37, :174:32, :178:17, :179:4, :181:7, :184:30, :191:{30,54}, :195:21, :196:4, :199:27, :200:27]
end
always @(posedge)
assign io_ctl_dec_stall = io_ctl_dec_stall_0; // @[cpath.scala:58:7]
assign io_ctl_full_stall = io_ctl_full_stall_0; // @[cpath.scala:58:7]
assign io_ctl_exe_pc_sel = io_ctl_exe_pc_sel_0; // @[cpath.scala:58:7]
assign io_ctl_br_type = io_ctl_br_type_0; // @[cpath.scala:58:7]
assign io_ctl_if_kill = io_ctl_if_kill_0; // @[cpath.scala:58:7]
assign io_ctl_dec_kill = io_ctl_dec_kill_0; // @[cpath.scala:58:7]
assign io_ctl_op1_sel = io_ctl_op1_sel_0; // @[cpath.scala:58:7]
assign io_ctl_op2_sel = io_ctl_op2_sel_0; // @[cpath.scala:58:7]
assign io_ctl_alu_fun = io_ctl_alu_fun_0; // @[cpath.scala:58:7]
assign io_ctl_wb_sel = io_ctl_wb_sel_0; // @[cpath.scala:58:7]
assign io_ctl_rf_wen = io_ctl_rf_wen_0; // @[cpath.scala:58:7]
assign io_ctl_mem_val = io_ctl_mem_val_0; // @[cpath.scala:58:7]
assign io_ctl_mem_fcn = io_ctl_mem_fcn_0; // @[cpath.scala:58:7]
assign io_ctl_mem_typ = io_ctl_mem_typ_0; // @[cpath.scala:58:7]
assign io_ctl_csr_cmd = io_ctl_csr_cmd_0; // @[cpath.scala:58:7]
assign io_ctl_fencei = io_ctl_fencei_0; // @[cpath.scala:58:7]
assign io_ctl_pipeline_kill = io_ctl_pipeline_kill_0; // @[cpath.scala:58:7]
assign io_ctl_mem_exception = io_ctl_mem_exception_0; // @[cpath.scala:58:7]
assign io_ctl_mem_exception_cause = io_ctl_mem_exception_cause_0; // @[cpath.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_3( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File RegisterRouter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
import freechips.rocketchip.resources.{Device, Resource, ResourceBindings}
import freechips.rocketchip.prci.{NoCrossing}
import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno}
import scala.math.min
class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle {
val source = UInt((sourceBits max 1).W)
val size = UInt((sizeBits max 1).W)
}
case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra")
case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers.
* It provides functionality for describing and outputting metdata about the registers in several formats.
* It also provides a concrete implementation of a regmap function that will be used
* to wire a map of internal registers associated with this node to the node's interconnect port.
*/
case class TLRegisterNode(
address: Seq[AddressSet],
device: Device,
deviceKey: String = "reg/control",
concurrency: Int = 0,
beatBytes: Int = 4,
undefZero: Boolean = true,
executable: Boolean = false)(
implicit valName: ValName)
extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1(
Seq(TLSlaveParameters.v1(
address = address,
resources = Seq(Resource(device, deviceKey)),
executable = executable,
supportsGet = TransferSizes(1, beatBytes),
supportsPutPartial = TransferSizes(1, beatBytes),
supportsPutFull = TransferSizes(1, beatBytes),
fifoId = Some(0))), // requests are handled in order
beatBytes = beatBytes,
minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle
{
val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min)
require (size >= beatBytes)
address.foreach { case a =>
require (a.widen(size-1).base == address.head.widen(size-1).base,
s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}")
}
// Calling this method causes the matching TL2 bundle to be
// configured to route all requests to the listed RegFields.
def regmap(mapping: RegField.Map*) = {
val (bundleIn, edge) = this.in(0)
val a = bundleIn.a
val d = bundleIn.d
val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields
val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields)
val in = Wire(Decoupled(new RegMapperInput(params)))
in.bits.read := a.bits.opcode === TLMessages.Get
in.bits.index := edge.addr_hi(a.bits)
in.bits.data := a.bits.data
in.bits.mask := a.bits.mask
Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = in.bits.extra(TLRegisterRouterExtra)
a_extra.source := a.bits.source
a_extra.size := a.bits.size
// Invoke the register map builder
val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*)
// No flow control needed
in.valid := a.valid
a.ready := in.ready
d.valid := out.valid
out.ready := d.ready
// We must restore the size to enable width adapters to work
val d_extra = out.bits.extra(TLRegisterRouterExtra)
d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size)
// avoid a Mux on the data bus by manually overriding two fields
d.bits.data := out.bits.data
Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match {
case (lhs, rhs) => lhs :<= rhs
}
d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck)
// Tie off unused channels
bundleIn.b.valid := false.B
bundleIn.c.ready := true.B
bundleIn.e.ready := true.B
genRegDescsJson(mapping:_*)
}
def genRegDescsJson(mapping: RegField.Map*): Unit = {
// Dump out the register map for documentation purposes.
val base = address.head.base
val baseHex = s"0x${base.toInt.toHexString}"
val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}"
val json = GenRegDescsAnno.serialize(base, name, mapping:_*)
var suffix = 0
while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) {
suffix = suffix + 1
}
ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json)
val module = Module.currentModule.get.asInstanceOf[RawModule]
GenRegDescsAnno.anno(
module,
base,
mapping:_*)
}
}
/** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink.
* - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers.
* - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect.
* - Use the mapping helper function regmap to internally fill out the space of device control registers.
*/
trait HasTLControlRegMap { this: RegisterRouter =>
protected val controlNode = TLRegisterNode(
address = address,
device = device,
deviceKey = "reg/control",
concurrency = concurrency,
beatBytes = beatBytes,
undefZero = undefZero,
executable = executable)
// Externally, this helper should be used to connect the register control port to a bus
val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode)
// Backwards-compatibility default node accessor with no clock crossing
lazy val node: TLInwardNode = controlXing(NoCrossing)
// Internally, this function should be used to populate the control port with registers
protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) }
}
File TileClockGater.scala:
package chipyard.clocking
import chisel3._
import chisel3.util._
import chisel3.experimental.Analog
import org.chipsalliance.cde.config._
import freechips.rocketchip.subsystem._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.prci._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.regmapper._
import freechips.rocketchip.subsystem._
/** This node adds clock gating control registers.
* If deploying on a platform which does not support clock gating, deasserting the enable
* flag will generate the registers, preserving the same memory map and behavior, but will not
* generate any gaters
*/
class TileClockGater(address: BigInt, beatBytes: Int)(implicit p: Parameters, valName: ValName) extends LazyModule
{
val device = new SimpleDevice(s"clock-gater", Nil)
val clockNode = ClockGroupIdentityNode()
val tlNode = TLRegisterNode(Seq(AddressSet(address, 4096-1)), device, "reg/control", beatBytes=beatBytes)
lazy val module = new LazyModuleImp(this) {
val sources = clockNode.in.head._1.member.data.toSeq
val sinks = clockNode.out.head._1.member.elements.toSeq
val nSinks = sinks.size
val regs = (0 until nSinks).map({i =>
val sinkName = sinks(i)._1
val reg = withReset(sources(i).reset) { Module(new AsyncResetRegVec(w=1, init=1)) }
if (sinkName.contains("tile")) {
println(s"${(address+i*4).toString(16)}: Tile $sinkName clock gate")
sinks(i)._2.clock := ClockGate(sources(i).clock, reg.io.q.asBool)
sinks(i)._2.reset := sources(i).reset
} else {
sinks(i)._2 := sources(i)
}
reg
})
tlNode.regmap((0 until nSinks).map({i =>
i*4 -> Seq(RegField.rwReg(1, regs(i).io))
}): _*)
}
}
File MuxLiteral.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.log2Ceil
import scala.reflect.ClassTag
/* MuxLiteral creates a lookup table from a key to a list of values.
* Unlike MuxLookup, the table keys must be exclusive literals.
*/
object MuxLiteral
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (UInt, T), rest: (UInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(UInt, T)]): T =
MuxTable(index, default, cases.map { case (k, v) => (k.litValue, v) })
}
object MuxSeq
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: T, rest: T*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[T]): T =
MuxTable(index, default, cases.zipWithIndex.map { case (v, i) => (BigInt(i), v) })
}
object MuxTable
{
def apply[T <: Data:ClassTag](index: UInt, default: T, first: (BigInt, T), rest: (BigInt, T)*): T =
apply(index, default, first :: rest.toList)
def apply[T <: Data:ClassTag](index: UInt, default: T, cases: Seq[(BigInt, T)]): T = {
/* All keys must be >= 0 and distinct */
cases.foreach { case (k, _) => require (k >= 0) }
require (cases.map(_._1).distinct.size == cases.size)
/* Filter out any cases identical to the default */
val simple = cases.filter { case (k, v) => !default.isLit || !v.isLit || v.litValue != default.litValue }
val maxKey = (BigInt(0) +: simple.map(_._1)).max
val endIndex = BigInt(1) << log2Ceil(maxKey+1)
if (simple.isEmpty) {
default
} else if (endIndex <= 2*simple.size) {
/* The dense encoding case uses a Vec */
val table = Array.fill(endIndex.toInt) { default }
simple.foreach { case (k, v) => table(k.toInt) = v }
Mux(index >= endIndex.U, default, VecInit(table)(index))
} else {
/* The sparse encoding case uses switch */
val out = WireDefault(default)
simple.foldLeft(new chisel3.util.SwitchContext(index, None, Set.empty)) { case (acc, (k, v)) =>
acc.is (k.U) { out := v }
}
out
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TileClockGater( // @[TileClockGater.scala:27:25]
input clock, // @[TileClockGater.scala:27:25]
input reset, // @[TileClockGater.scala:27:25]
output auto_clock_gater_in_1_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_clock_gater_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_clock_gater_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_clock_gater_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_clock_gater_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [20:0] auto_clock_gater_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_clock_gater_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_clock_gater_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_1_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_in_1_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_clock_gater_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_clock_gater_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [11:0] auto_clock_gater_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_clock_gater_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_0_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
input auto_clock_gater_in_0_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_out_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25]
output auto_clock_gater_out_member_allClocks_uncore_reset // @[LazyModuleImp.scala:107:25]
);
wire out_front_valid; // @[RegisterRouter.scala:87:24]
wire out_front_ready; // @[RegisterRouter.scala:87:24]
wire out_bits_read; // @[RegisterRouter.scala:87:24]
wire [11:0] out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [8:0] in_bits_index; // @[RegisterRouter.scala:73:18]
wire in_bits_read; // @[RegisterRouter.scala:73:18]
wire auto_clock_gater_in_1_a_valid_0 = auto_clock_gater_in_1_a_valid; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_a_bits_opcode_0 = auto_clock_gater_in_1_a_bits_opcode; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_a_bits_param_0 = auto_clock_gater_in_1_a_bits_param; // @[TileClockGater.scala:27:25]
wire [1:0] auto_clock_gater_in_1_a_bits_size_0 = auto_clock_gater_in_1_a_bits_size; // @[TileClockGater.scala:27:25]
wire [11:0] auto_clock_gater_in_1_a_bits_source_0 = auto_clock_gater_in_1_a_bits_source; // @[TileClockGater.scala:27:25]
wire [20:0] auto_clock_gater_in_1_a_bits_address_0 = auto_clock_gater_in_1_a_bits_address; // @[TileClockGater.scala:27:25]
wire [7:0] auto_clock_gater_in_1_a_bits_mask_0 = auto_clock_gater_in_1_a_bits_mask; // @[TileClockGater.scala:27:25]
wire [63:0] auto_clock_gater_in_1_a_bits_data_0 = auto_clock_gater_in_1_a_bits_data; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_a_bits_corrupt_0 = auto_clock_gater_in_1_a_bits_corrupt; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_ready_0 = auto_clock_gater_in_1_d_ready; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_0_member_allClocks_uncore_clock_0 = auto_clock_gater_in_0_member_allClocks_uncore_clock; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_0_member_allClocks_uncore_reset_0 = auto_clock_gater_in_0_member_allClocks_uncore_reset; // @[TileClockGater.scala:27:25]
wire [1:0] _out_frontSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _out_backSel_T = 2'h1; // @[OneHot.scala:58:35]
wire [8:0] out_maskMatch = 9'h1FF; // @[RegisterRouter.scala:87:24]
wire out_frontSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_backSel_0 = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_rifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wifireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wifireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_rofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_5 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_rofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_wofireMux_out = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_6 = 1'h1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_WIRE_0 = 1'h1; // @[MuxLiteral.scala:49:48]
wire out_wofireMux = 1'h1; // @[MuxLiteral.scala:49:10]
wire out_iready = 1'h1; // @[RegisterRouter.scala:87:24]
wire out_oready = 1'h1; // @[RegisterRouter.scala:87:24]
wire [2:0] clock_gaterIn_d_bits_d_opcode = 3'h0; // @[Edges.scala:792:17]
wire [63:0] clock_gaterIn_d_bits_d_data = 64'h0; // @[Edges.scala:792:17]
wire auto_clock_gater_in_1_d_bits_sink = 1'h0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_bits_denied = 1'h0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_bits_corrupt = 1'h0; // @[TileClockGater.scala:27:25]
wire clock_gaterIn_1_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire out_frontSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire out_backSel_1 = 1'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wifireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_rofireMux_T_6 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_wofireMux_T_7 = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T = 1'h0; // @[MuxLiteral.scala:49:17]
wire _out_out_bits_data_T_2 = 1'h0; // @[MuxLiteral.scala:49:17]
wire clock_gaterIn_d_bits_d_sink = 1'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_d_bits_d_denied = 1'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_d_bits_d_corrupt = 1'h0; // @[Edges.scala:792:17]
wire [1:0] auto_clock_gater_in_1_d_bits_param = 2'h0; // @[TileClockGater.scala:27:25]
wire clock_gaterIn_1_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_d_bits_d_param = 2'h0; // @[Edges.scala:792:17]
wire clock_gaterIn_1_a_valid = auto_clock_gater_in_1_a_valid_0; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_a_bits_opcode = auto_clock_gater_in_1_a_bits_opcode_0; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_a_bits_param = auto_clock_gater_in_1_a_bits_param_0; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_a_bits_size = auto_clock_gater_in_1_a_bits_size_0; // @[MixedNode.scala:551:17]
wire [11:0] clock_gaterIn_1_a_bits_source = auto_clock_gater_in_1_a_bits_source_0; // @[MixedNode.scala:551:17]
wire [20:0] clock_gaterIn_1_a_bits_address = auto_clock_gater_in_1_a_bits_address_0; // @[MixedNode.scala:551:17]
wire [7:0] clock_gaterIn_1_a_bits_mask = auto_clock_gater_in_1_a_bits_mask_0; // @[MixedNode.scala:551:17]
wire [63:0] clock_gaterIn_1_a_bits_data = auto_clock_gater_in_1_a_bits_data_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_a_bits_corrupt = auto_clock_gater_in_1_a_bits_corrupt_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_ready = auto_clock_gater_in_1_d_ready_0; // @[MixedNode.scala:551:17]
wire clock_gaterIn_1_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] clock_gaterIn_1_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_1_d_bits_size; // @[MixedNode.scala:551:17]
wire [11:0] clock_gaterIn_1_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] clock_gaterIn_1_d_bits_data; // @[MixedNode.scala:551:17]
wire clock_gaterIn_member_allClocks_uncore_clock = auto_clock_gater_in_0_member_allClocks_uncore_clock_0; // @[MixedNode.scala:551:17]
wire clock_gaterOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
wire clock_gaterIn_member_allClocks_uncore_reset = auto_clock_gater_in_0_member_allClocks_uncore_reset_0; // @[MixedNode.scala:551:17]
wire clock_gaterOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
wire auto_clock_gater_in_1_a_ready_0; // @[TileClockGater.scala:27:25]
wire [2:0] auto_clock_gater_in_1_d_bits_opcode_0; // @[TileClockGater.scala:27:25]
wire [1:0] auto_clock_gater_in_1_d_bits_size_0; // @[TileClockGater.scala:27:25]
wire [11:0] auto_clock_gater_in_1_d_bits_source_0; // @[TileClockGater.scala:27:25]
wire [63:0] auto_clock_gater_in_1_d_bits_data_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_in_1_d_valid_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_out_member_allClocks_uncore_clock_0; // @[TileClockGater.scala:27:25]
wire auto_clock_gater_out_member_allClocks_uncore_reset_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_clock_0 = clock_gaterOut_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17]
assign auto_clock_gater_out_member_allClocks_uncore_reset_0 = clock_gaterOut_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17]
assign clock_gaterOut_member_allClocks_uncore_clock = clock_gaterIn_member_allClocks_uncore_clock; // @[MixedNode.scala:542:17, :551:17]
assign clock_gaterOut_member_allClocks_uncore_reset = clock_gaterIn_member_allClocks_uncore_reset; // @[MixedNode.scala:542:17, :551:17]
wire in_ready; // @[RegisterRouter.scala:73:18]
assign auto_clock_gater_in_1_a_ready_0 = clock_gaterIn_1_a_ready; // @[MixedNode.scala:551:17]
wire in_valid = clock_gaterIn_1_a_valid; // @[RegisterRouter.scala:73:18]
wire [1:0] in_bits_extra_tlrr_extra_size = clock_gaterIn_1_a_bits_size; // @[RegisterRouter.scala:73:18]
wire [11:0] in_bits_extra_tlrr_extra_source = clock_gaterIn_1_a_bits_source; // @[RegisterRouter.scala:73:18]
wire [7:0] in_bits_mask = clock_gaterIn_1_a_bits_mask; // @[RegisterRouter.scala:73:18]
wire [63:0] in_bits_data = clock_gaterIn_1_a_bits_data; // @[RegisterRouter.scala:73:18]
wire out_ready = clock_gaterIn_1_d_ready; // @[RegisterRouter.scala:87:24]
wire out_valid; // @[RegisterRouter.scala:87:24]
assign auto_clock_gater_in_1_d_valid_0 = clock_gaterIn_1_d_valid; // @[MixedNode.scala:551:17]
assign auto_clock_gater_in_1_d_bits_opcode_0 = clock_gaterIn_1_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] clock_gaterIn_d_bits_d_size; // @[Edges.scala:792:17]
assign auto_clock_gater_in_1_d_bits_size_0 = clock_gaterIn_1_d_bits_size; // @[MixedNode.scala:551:17]
wire [11:0] clock_gaterIn_d_bits_d_source; // @[Edges.scala:792:17]
assign auto_clock_gater_in_1_d_bits_source_0 = clock_gaterIn_1_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] out_bits_data; // @[RegisterRouter.scala:87:24]
assign auto_clock_gater_in_1_d_bits_data_0 = clock_gaterIn_1_d_bits_data; // @[MixedNode.scala:551:17]
wire _out_in_ready_T; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_a_ready = in_ready; // @[RegisterRouter.scala:73:18]
wire _in_bits_read_T; // @[RegisterRouter.scala:74:36]
wire _out_front_valid_T = in_valid; // @[RegisterRouter.scala:73:18, :87:24]
wire out_front_bits_read = in_bits_read; // @[RegisterRouter.scala:73:18, :87:24]
wire [8:0] out_front_bits_index = in_bits_index; // @[RegisterRouter.scala:73:18, :87:24]
wire [63:0] out_front_bits_data = in_bits_data; // @[RegisterRouter.scala:73:18, :87:24]
wire [7:0] out_front_bits_mask = in_bits_mask; // @[RegisterRouter.scala:73:18, :87:24]
wire [11:0] out_front_bits_extra_tlrr_extra_source = in_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:73:18, :87:24]
wire [1:0] out_front_bits_extra_tlrr_extra_size = in_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:73:18, :87:24]
assign _in_bits_read_T = clock_gaterIn_1_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36]
assign in_bits_read = _in_bits_read_T; // @[RegisterRouter.scala:73:18, :74:36]
wire [17:0] _in_bits_index_T = clock_gaterIn_1_a_bits_address[20:3]; // @[Edges.scala:192:34]
assign in_bits_index = _in_bits_index_T[8:0]; // @[RegisterRouter.scala:73:18, :75:19]
wire _out_front_ready_T = out_ready; // @[RegisterRouter.scala:87:24]
wire _out_out_valid_T; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_d_valid = out_valid; // @[RegisterRouter.scala:87:24]
wire _clock_gaterIn_d_bits_opcode_T = out_bits_read; // @[RegisterRouter.scala:87:24, :105:25]
assign clock_gaterIn_1_d_bits_data = out_bits_data; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_d_bits_d_source = out_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
wire [1:0] out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_d_bits_d_size = out_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
assign _out_in_ready_T = out_front_ready; // @[RegisterRouter.scala:87:24]
assign _out_out_valid_T = out_front_valid; // @[RegisterRouter.scala:87:24]
assign out_bits_read = out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire [8:0] out_findex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
wire [8:0] out_bindex = out_front_bits_index; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_source = out_front_bits_extra_tlrr_extra_source; // @[RegisterRouter.scala:87:24]
assign out_bits_extra_tlrr_extra_size = out_front_bits_extra_tlrr_extra_size; // @[RegisterRouter.scala:87:24]
wire _out_T = out_findex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_T_1 = out_bindex == 9'h0; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_0 = _out_T_1; // @[MuxLiteral.scala:49:48]
wire out_rivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_wivalid_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire out_roready_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire out_woready_0; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T = out_front_bits_mask[0]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_1 = out_front_bits_mask[1]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_2 = out_front_bits_mask[2]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_3 = out_front_bits_mask[3]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_4 = out_front_bits_mask[4]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_5 = out_front_bits_mask[5]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_6 = out_front_bits_mask[6]; // @[RegisterRouter.scala:87:24]
wire _out_frontMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire _out_backMask_T_7 = out_front_bits_mask[7]; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_8 = {8{_out_frontMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_9 = {8{_out_frontMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_10 = {8{_out_frontMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_11 = {8{_out_frontMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_12 = {8{_out_frontMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_13 = {8{_out_frontMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_14 = {8{_out_frontMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_frontMask_T_15 = {8{_out_frontMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_lo = {_out_frontMask_T_9, _out_frontMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_lo_hi = {_out_frontMask_T_11, _out_frontMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_lo = {out_frontMask_lo_hi, out_frontMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_lo = {_out_frontMask_T_13, _out_frontMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_frontMask_hi_hi = {_out_frontMask_T_15, _out_frontMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_frontMask_hi = {out_frontMask_hi_hi, out_frontMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_frontMask = {out_frontMask_hi, out_frontMask_lo}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_8 = {8{_out_backMask_T}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_9 = {8{_out_backMask_T_1}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_10 = {8{_out_backMask_T_2}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_11 = {8{_out_backMask_T_3}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_12 = {8{_out_backMask_T_4}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_13 = {8{_out_backMask_T_5}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_14 = {8{_out_backMask_T_6}}; // @[RegisterRouter.scala:87:24]
wire [7:0] _out_backMask_T_15 = {8{_out_backMask_T_7}}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_lo = {_out_backMask_T_9, _out_backMask_T_8}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_lo_hi = {_out_backMask_T_11, _out_backMask_T_10}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_lo = {out_backMask_lo_hi, out_backMask_lo_lo}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_lo = {_out_backMask_T_13, _out_backMask_T_12}; // @[RegisterRouter.scala:87:24]
wire [15:0] out_backMask_hi_hi = {_out_backMask_T_15, _out_backMask_T_14}; // @[RegisterRouter.scala:87:24]
wire [31:0] out_backMask_hi = {out_backMask_hi_hi, out_backMask_hi_lo}; // @[RegisterRouter.scala:87:24]
wire [63:0] out_backMask = {out_backMask_hi, out_backMask_lo}; // @[RegisterRouter.scala:87:24]
wire _out_rimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_wimask_T = out_frontMask[0]; // @[RegisterRouter.scala:87:24]
wire out_rimask = _out_rimask_T; // @[RegisterRouter.scala:87:24]
wire out_wimask = _out_wimask_T; // @[RegisterRouter.scala:87:24]
wire _out_romask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire _out_womask_T = out_backMask[0]; // @[RegisterRouter.scala:87:24]
wire out_romask = _out_romask_T; // @[RegisterRouter.scala:87:24]
wire out_womask = _out_womask_T; // @[RegisterRouter.scala:87:24]
wire out_f_rivalid = out_rivalid_0 & out_rimask; // @[RegisterRouter.scala:87:24]
wire out_f_roready = out_roready_0 & out_romask; // @[RegisterRouter.scala:87:24]
wire out_f_wivalid = out_wivalid_0 & out_wimask; // @[RegisterRouter.scala:87:24]
wire out_f_woready = out_woready_0 & out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_2 = out_front_bits_data[0]; // @[RegisterRouter.scala:87:24]
wire _out_T_3 = ~out_rimask; // @[RegisterRouter.scala:87:24]
wire _out_T_4 = ~out_wimask; // @[RegisterRouter.scala:87:24]
wire _out_T_5 = ~out_romask; // @[RegisterRouter.scala:87:24]
wire _out_T_6 = ~out_womask; // @[RegisterRouter.scala:87:24]
wire _out_T_7; // @[RegisterRouter.scala:87:24]
wire _out_T_8 = _out_T_7; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_WIRE_1_0 = _out_T_8; // @[MuxLiteral.scala:49:48]
wire _GEN = in_valid & out_front_ready; // @[RegisterRouter.scala:73:18, :87:24]
wire _out_rifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T = _GEN; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_1 = _out_rifireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_2 = _out_rifireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rifireMux_T_3 = _out_rifireMux_T_2 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_rivalid_0 = _out_rifireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rifireMux_T_4 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_2 = _out_wifireMux_T & _out_wifireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_3 = _out_wifireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wifireMux_T_4 = _out_wifireMux_T_3 & _out_T; // @[RegisterRouter.scala:87:24]
assign out_wivalid_0 = _out_wifireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wifireMux_T_5 = ~_out_T; // @[RegisterRouter.scala:87:24]
wire _GEN_0 = out_front_valid & out_ready; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T = _GEN_0; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_1 = _out_rofireMux_T & out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_2 = _out_rofireMux_T_1; // @[RegisterRouter.scala:87:24]
assign _out_rofireMux_T_3 = _out_rofireMux_T_2 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_roready_0 = _out_rofireMux_T_3; // @[RegisterRouter.scala:87:24]
wire _out_rofireMux_T_4 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_1 = ~out_front_bits_read; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_2 = _out_wofireMux_T & _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_3 = _out_wofireMux_T_2; // @[RegisterRouter.scala:87:24]
assign _out_wofireMux_T_4 = _out_wofireMux_T_3 & _out_T_1; // @[RegisterRouter.scala:87:24]
assign out_woready_0 = _out_wofireMux_T_4; // @[RegisterRouter.scala:87:24]
wire _out_wofireMux_T_5 = ~_out_T_1; // @[RegisterRouter.scala:87:24]
assign in_ready = _out_in_ready_T; // @[RegisterRouter.scala:73:18, :87:24]
assign out_front_valid = _out_front_valid_T; // @[RegisterRouter.scala:87:24]
assign out_front_ready = _out_front_ready_T; // @[RegisterRouter.scala:87:24]
assign out_valid = _out_out_valid_T; // @[RegisterRouter.scala:87:24]
wire _out_out_bits_data_T_1 = _out_out_bits_data_WIRE_0; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_3 = _out_out_bits_data_WIRE_1_0; // @[MuxLiteral.scala:49:{10,48}]
wire _out_out_bits_data_T_4 = _out_out_bits_data_T_1 & _out_out_bits_data_T_3; // @[MuxLiteral.scala:49:10]
assign out_bits_data = {63'h0, _out_out_bits_data_T_4}; // @[RegisterRouter.scala:87:24]
assign clock_gaterIn_1_d_bits_size = clock_gaterIn_d_bits_d_size; // @[Edges.scala:792:17]
assign clock_gaterIn_1_d_bits_source = clock_gaterIn_d_bits_d_source; // @[Edges.scala:792:17]
assign clock_gaterIn_1_d_bits_opcode = {2'h0, _clock_gaterIn_d_bits_opcode_T}; // @[RegisterRouter.scala:105:{19,25}]
TLMonitor_50 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (clock_gaterIn_1_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (clock_gaterIn_1_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (clock_gaterIn_1_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (clock_gaterIn_1_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (clock_gaterIn_1_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (clock_gaterIn_1_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (clock_gaterIn_1_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (clock_gaterIn_1_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (clock_gaterIn_1_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (clock_gaterIn_1_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (clock_gaterIn_1_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (clock_gaterIn_1_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (clock_gaterIn_1_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (clock_gaterIn_1_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (clock_gaterIn_1_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (clock_gaterIn_1_d_bits_data) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
AsyncResetRegVec_w1_i1 regs_0 ( // @[TileClockGater.scala:33:53]
.clock (clock),
.reset (clock_gaterIn_member_allClocks_uncore_reset), // @[MixedNode.scala:551:17]
.io_d (_out_T_2), // @[RegisterRouter.scala:87:24]
.io_q (_out_T_7),
.io_en (out_f_woready) // @[RegisterRouter.scala:87:24]
); // @[TileClockGater.scala:33:53]
assign auto_clock_gater_in_1_a_ready = auto_clock_gater_in_1_a_ready_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_valid = auto_clock_gater_in_1_d_valid_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_opcode = auto_clock_gater_in_1_d_bits_opcode_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_size = auto_clock_gater_in_1_d_bits_size_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_source = auto_clock_gater_in_1_d_bits_source_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_in_1_d_bits_data = auto_clock_gater_in_1_d_bits_data_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_clock = auto_clock_gater_out_member_allClocks_uncore_clock_0; // @[TileClockGater.scala:27:25]
assign auto_clock_gater_out_member_allClocks_uncore_reset = auto_clock_gater_out_member_allClocks_uncore_reset_0; // @[TileClockGater.scala:27:25]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
File AccumulatorMem.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
class AccumulatorReadReq[T <: Data: Arithmetic, U <: Data](n: Int, acc_t: T, scale_t: U) extends Bundle {
val addr = UInt(log2Ceil(n).W)
val scale = scale_t
val igelu_qb = acc_t.cloneType
val igelu_qc = acc_t.cloneType
val iexp_qln2 = acc_t.cloneType
val iexp_qln2_inv = acc_t.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val full = Bool() // Whether or not we return the full bitwidth output
val fromDMA = Bool()
}
class AccumulatorReadResp[T <: Data: Arithmetic, U <: Data](fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val data = fullDataType.cloneType
val fromDMA = Bool()
val scale = scale_t.cloneType
val igelu_qb = fullDataType.head.head.cloneType
val igelu_qc = fullDataType.head.head.cloneType
val iexp_qln2 = fullDataType.head.head.cloneType
val iexp_qln2_inv = fullDataType.head.head.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val acc_bank_id = UInt(2.W) // TODO magic number
}
class AccumulatorReadIO[T <: Data: Arithmetic, U <: Data](n: Int, fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val req = Decoupled(new AccumulatorReadReq[T, U](n, fullDataType.head.head.cloneType, scale_t))
val resp = Flipped(Decoupled(new AccumulatorReadResp[T, U](fullDataType, scale_t)))
}
class AccumulatorWriteReq[T <: Data: Arithmetic](n: Int, t: Vec[Vec[T]]) extends Bundle {
val addr = UInt(log2Up(n).W)
val data = t.cloneType
val acc = Bool()
val mask = Vec(t.getWidth / 8, Bool()) // TODO Use aligned_to here
}
class AccumulatorMemIO [T <: Data: Arithmetic, U <: Data](n: Int, t: Vec[Vec[T]], scale_t: U,
acc_sub_banks: Int, use_shared_ext_mem: Boolean
) extends Bundle {
val read = Flipped(new AccumulatorReadIO(n, t, scale_t))
val write = Flipped(Decoupled(new AccumulatorWriteReq(n, t)))
val ext_mem = if (use_shared_ext_mem) Some(Vec(acc_sub_banks, new ExtMemIO)) else None
val adder = new Bundle {
val valid = Output(Bool())
val op1 = Output(t.cloneType)
val op2 = Output(t.cloneType)
val sum = Input(t.cloneType)
}
}
class AccPipe[T <: Data : Arithmetic](latency: Int, t: T)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val op1 = Input(t.cloneType)
val op2 = Input(t.cloneType)
val sum = Output(t.cloneType)
})
import ev._
io.sum := ShiftRegister(io.op1 + io.op2, latency)
}
class AccPipeShared[T <: Data : Arithmetic](latency: Int, t: Vec[Vec[T]], banks: Int) extends Module {
val io = IO(new Bundle {
val in_sel = Input(Vec(banks, Bool()))
val ina = Input(Vec(banks, t.cloneType))
val inb = Input(Vec(banks, t.cloneType))
val out = Output(t.cloneType)
})
val ina = Mux1H(io.in_sel, io.ina)
val inb = Mux1H(io.in_sel, io.inb)
io.out := VecInit((ina zip inb).map { case (rv, wv) =>
VecInit((rv zip wv).map { case (re, we) =>
val m = Module(new AccPipe(latency, t.head.head.cloneType))
m.io.op1 := re
m.io.op2 := we
m.io.sum
})
})
}
class AccumulatorMem[T <: Data, U <: Data](
n: Int, t: Vec[Vec[T]], scale_func: (T, U) => T, scale_t: U,
acc_singleported: Boolean, acc_sub_banks: Int,
use_shared_ext_mem: Boolean,
acc_latency: Int, acc_type: T, is_dummy: Boolean
)
(implicit ev: Arithmetic[T]) extends Module {
// TODO Do writes in this module work with matrices of size 2? If we try to read from an address right after writing
// to it, then we might not get the written data. We might need some kind of cooldown counter after addresses in the
// accumulator have been written to for configurations with such small matrices
// TODO make a new aligned_to variable specifically for AccumulatorMem. We should assume that inputs are at least
// accType.getWidth/8 aligned, because it won't make sense to do matrix additions directly in the DMA otherwise.
import ev._
// TODO unify this with TwoPortSyncMemIO
val io = IO(new AccumulatorMemIO(n, t, scale_t, acc_sub_banks, use_shared_ext_mem))
require (acc_latency >= 2)
val pipelined_writes = Reg(Vec(acc_latency, Valid(new AccumulatorWriteReq(n, t))))
val oldest_pipelined_write = pipelined_writes(acc_latency-1)
pipelined_writes(0).valid := io.write.fire
pipelined_writes(0).bits := io.write.bits
for (i <- 1 until acc_latency) {
pipelined_writes(i) := pipelined_writes(i-1)
}
val rdata_for_adder = Wire(t)
rdata_for_adder := DontCare
val rdata_for_read_resp = Wire(t)
rdata_for_read_resp := DontCare
val adder_sum = io.adder.sum
io.adder.valid := pipelined_writes(0).valid && pipelined_writes(0).bits.acc
io.adder.op1 := rdata_for_adder
io.adder.op2 := pipelined_writes(0).bits.data
val block_read_req = WireInit(false.B)
val block_write_req = WireInit(false.B)
val mask_len = t.getWidth / 8
val mask_elem = UInt((t.getWidth / mask_len).W)
if (!acc_singleported && !is_dummy) {
require(!use_shared_ext_mem)
val mem = TwoPortSyncMem(n, t, mask_len) // TODO We assume byte-alignment here. Use aligned_to instead
mem.io.waddr := oldest_pipelined_write.bits.addr
mem.io.wen := oldest_pipelined_write.valid
mem.io.wdata := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data)
mem.io.mask := oldest_pipelined_write.bits.mask
rdata_for_adder := mem.io.rdata
rdata_for_read_resp := mem.io.rdata
mem.io.raddr := Mux(io.write.fire && io.write.bits.acc, io.write.bits.addr, io.read.req.bits.addr)
mem.io.ren := io.read.req.fire || (io.write.fire && io.write.bits.acc)
} else if (!is_dummy) {
val rmw_req = Wire(Decoupled(UInt()))
rmw_req.valid := io.write.valid && io.write.bits.acc
rmw_req.bits := io.write.bits.addr
rmw_req.ready := true.B
block_write_req := !rmw_req.ready
val only_read_req = Wire(Decoupled(UInt()))
only_read_req.valid := io.read.req.valid
only_read_req.bits := io.read.req.bits.addr
only_read_req.ready := true.B
block_read_req := !only_read_req.ready
for (i <- 0 until acc_sub_banks) {
def isThisBank(addr: UInt) = addr(log2Ceil(acc_sub_banks)-1,0) === i.U
def getBankIdx(addr: UInt) = addr >> log2Ceil(acc_sub_banks)
val (read, write) = if (use_shared_ext_mem) {
def read(addr: UInt, ren: Bool): Data = {
io.ext_mem.get(i).read_en := ren
io.ext_mem.get(i).read_addr := addr
io.ext_mem.get(i).read_data
}
io.ext_mem.get(i).write_en := false.B
io.ext_mem.get(i).write_addr := DontCare
io.ext_mem.get(i).write_data := DontCare
io.ext_mem.get(i).write_mask := DontCare
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = {
io.ext_mem.get(i).write_en := true.B
io.ext_mem.get(i).write_addr := addr
io.ext_mem.get(i).write_data := wdata.asUInt
io.ext_mem.get(i).write_mask := wmask.asUInt
}
(read _, write _)
} else {
val mem = SyncReadMem(n / acc_sub_banks, Vec(mask_len, mask_elem))
def read(addr: UInt, ren: Bool): Data = mem.read(addr, ren)
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = mem.write(addr, wdata, wmask)
(read _, write _)
}
val ren = WireInit(false.B)
val raddr = WireInit(getBankIdx(rmw_req.bits))
val nEntries = 3
// Writes coming 2 cycles after read leads to bad bank behavior
// Add another buffer here
class W_Q_Entry[T <: Data](mask_len: Int, mask_elem: T) extends Bundle {
val valid = Bool()
val data = Vec(mask_len, mask_elem)
val mask = Vec(mask_len, Bool())
val addr = UInt(log2Ceil(n/acc_sub_banks).W)
}
val w_q = Reg(Vec(nEntries, new W_Q_Entry(mask_len, mask_elem)))
for (e <- w_q) {
when (e.valid) {
assert(!(
io.write.fire && io.write.bits.acc &&
isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr &&
((io.write.bits.mask.asUInt & e.mask.asUInt) =/= 0.U)
), "you cannot accumulate to an AccumulatorMem address until previous writes to that address have completed")
when (io.write.bits.acc && isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr) {
rmw_req.ready := false.B
}
when (isThisBank(io.read.req.bits.addr) && getBankIdx(io.read.req.bits.addr) === e.addr) {
only_read_req.ready := false.B
}
}
}
val w_q_head = RegInit(1.U(nEntries.W))
val w_q_tail = RegInit(1.U(nEntries.W))
val w_q_full = (w_q_tail.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val w_q_empty = !(w_q_head.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val wen = WireInit(false.B)
val wdata = Mux1H(w_q_head.asBools, w_q.map(_.data))
val wmask = Mux1H(w_q_head.asBools, w_q.map(_.mask))
val waddr = Mux1H(w_q_head.asBools, w_q.map(_.addr))
when (wen) {
w_q_head := (w_q_head << 1).asUInt | w_q_head(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_head(i)) {
w_q(i).valid := false.B
}
}
}
val w_q_push = oldest_pipelined_write.valid && isThisBank(oldest_pipelined_write.bits.addr)
when (w_q_push) {
assert(!w_q_full || wen, "we ran out of acc-sub-bank write q entries")
w_q_tail := (w_q_tail << 1).asUInt | w_q_tail(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_tail(i)) {
w_q(i).valid := true.B
w_q(i).data := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data).asTypeOf(Vec(mask_len, mask_elem))
w_q(i).mask := oldest_pipelined_write.bits.mask
w_q(i).addr := getBankIdx(oldest_pipelined_write.bits.addr)
}
}
}
val bank_rdata = read(raddr, ren && !wen).asTypeOf(t)
when (RegNext(ren && rmw_req.valid && isThisBank(rmw_req.bits))) {
rdata_for_adder := bank_rdata
} .elsewhen (RegNext(ren)) {
rdata_for_read_resp := bank_rdata
}
when (wen) {
write(waddr, wdata, wmask)
}
// Three requestors, 1 slot
// Priority is (in descending order):
// 1. incoming reads for RMW
// 2. writes from RMW
// 3. incoming reads
when (rmw_req.fire && isThisBank(rmw_req.bits)) {
ren := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .elsewhen (!w_q_empty) {
wen := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .otherwise {
ren := isThisBank(only_read_req.bits) && only_read_req.fire
raddr := getBankIdx(only_read_req.bits)
}
when (reset.asBool) {
w_q.foreach(_.valid := false.B)
}
}
}
val q = Module(new Queue(new AccumulatorReadResp(t, scale_t), 1, true, true))
q.io.enq.bits.data := rdata_for_read_resp
if (is_dummy) {
rdata_for_read_resp := DontCare
rdata_for_adder := DontCare
}
q.io.enq.bits.scale := RegNext(io.read.req.bits.scale)
q.io.enq.bits.igelu_qb := RegNext(io.read.req.bits.igelu_qb)
q.io.enq.bits.igelu_qc := RegNext(io.read.req.bits.igelu_qc)
q.io.enq.bits.iexp_qln2 := RegNext(io.read.req.bits.iexp_qln2)
q.io.enq.bits.iexp_qln2_inv := RegNext(io.read.req.bits.iexp_qln2_inv)
q.io.enq.bits.act := RegNext(io.read.req.bits.act)
q.io.enq.bits.fromDMA := RegNext(io.read.req.bits.fromDMA)
q.io.enq.bits.acc_bank_id := DontCare
q.io.enq.valid := RegNext(io.read.req.fire)
val p = q.io.deq
io.read.resp.bits.data := p.bits.data
io.read.resp.bits.fromDMA := p.bits.fromDMA
io.read.resp.bits.igelu_qb := p.bits.igelu_qb
io.read.resp.bits.igelu_qc := p.bits.igelu_qc
io.read.resp.bits.iexp_qln2 := p.bits.iexp_qln2
io.read.resp.bits.iexp_qln2_inv := p.bits.iexp_qln2_inv
io.read.resp.bits.act := p.bits.act
io.read.resp.bits.scale := p.bits.scale
io.read.resp.bits.acc_bank_id := DontCare // This is set in Scratchpad
io.read.resp.valid := p.valid
p.ready := io.read.resp.ready
val q_will_be_empty = (q.io.count +& q.io.enq.fire) - q.io.deq.fire === 0.U
io.read.req.ready := q_will_be_empty && (
// Make sure we aren't accumulating, which would take over both ports
!(io.write.valid && io.write.bits.acc) &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.read.req.bits.addr).reduce(_||_) &&
!block_read_req
)
io.write.ready := !block_write_req &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.write.bits.addr && io.write.bits.acc).reduce(_||_)
when (reset.asBool) {
pipelined_writes.foreach(_.valid := false.B)
}
// assert(!(io.read.req.valid && io.write.en && io.write.acc), "reading and accumulating simultaneously is not supported")
assert(!(io.read.req.fire && io.write.fire && io.read.req.bits.addr === io.write.bits.addr), "reading from and writing to same address is not supported")
}
| module AccPipe_2( // @[AccumulatorMem.scala:63:7]
input clock, // @[AccumulatorMem.scala:63:7]
input reset, // @[AccumulatorMem.scala:63:7]
input [31:0] io_op1, // @[AccumulatorMem.scala:64:14]
input [31:0] io_op2, // @[AccumulatorMem.scala:64:14]
output [31:0] io_sum // @[AccumulatorMem.scala:64:14]
);
wire [31:0] io_op1_0 = io_op1; // @[AccumulatorMem.scala:63:7]
wire [31:0] io_op2_0 = io_op2; // @[AccumulatorMem.scala:63:7]
wire [31:0] io_sum_0; // @[AccumulatorMem.scala:63:7]
wire [32:0] _io_sum_T = {io_op1_0[31], io_op1_0} + {io_op2_0[31], io_op2_0}; // @[Arithmetic.scala:94:38]
wire [31:0] _io_sum_T_1 = _io_sum_T[31:0]; // @[Arithmetic.scala:94:38]
wire [31:0] _io_sum_T_2 = _io_sum_T_1; // @[Arithmetic.scala:94:38]
reg [31:0] io_sum_r; // @[AccumulatorMem.scala:70:26]
assign io_sum_0 = io_sum_r; // @[AccumulatorMem.scala:63:7, :70:26]
always @(posedge clock) // @[AccumulatorMem.scala:63:7]
io_sum_r <= _io_sum_T_2; // @[Arithmetic.scala:94:38]
assign io_sum = io_sum_0; // @[AccumulatorMem.scala:63:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_166( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v3.common.{MicroOp}
import boom.v3.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U}
def apply(ip: UInt, isel: UInt): SInt = {
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt
}
}
/**
* Object to get the FP rounding mode out of a packed immediate.
*/
object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } }
/**
* Object to get the FP function fype from a packed immediate.
* Note: only works if !(IS_B or IS_S)
*/
object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } }
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v3.common.BoomModule()(p)
with boom.v3.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire)
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop)
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop))
io.deq.bits := out
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop)
// For flow queue behavior.
if (flow) {
when (io.empty) {
io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop)
io.deq.bits := io.enq.bits
io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
do_deq := false.B
when (io.deq.ready) { do_enq := false.B }
}
}
private val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
File rename-stage.scala:
//******************************************************************************
// Copyright (c) 2012 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Datapath: Rename Logic
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Supports 1-cycle and 2-cycle latencies. (aka, passthrough versus registers between ren1 and ren2).
// - ren1: read the map tables and allocate a new physical register from the freelist.
// - ren2: read the busy table for the physical operands.
//
// Ren1 data is provided as an output to be fed directly into the ROB.
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.common._
import boom.v3.util._
/**
* IO bundle to interface with the Register Rename logic
*
* @param plWidth pipeline width
* @param numIntPregs number of int physical registers
* @param numFpPregs number of FP physical registers
* @param numWbPorts number of int writeback ports
* @param numWbPorts number of FP writeback ports
*/
class RenameStageIO(
val plWidth: Int,
val numPhysRegs: Int,
val numWbPorts: Int)
(implicit p: Parameters) extends BoomBundle
/**
* IO bundle to debug the rename stage
*/
class DebugRenameStageIO(val numPhysRegs: Int)(implicit p: Parameters) extends BoomBundle
{
val freelist = Bits(numPhysRegs.W)
val isprlist = Bits(numPhysRegs.W)
val busytable = UInt(numPhysRegs.W)
}
abstract class AbstractRenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int)
(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val ren_stalls = Output(Vec(plWidth, Bool()))
val kill = Input(Bool())
val dec_fire = Input(Vec(plWidth, Bool())) // will commit state updates
val dec_uops = Input(Vec(plWidth, new MicroOp()))
// physical specifiers available AND busy/ready status available.
val ren2_mask = Vec(plWidth, Output(Bool())) // mask of valid instructions
val ren2_uops = Vec(plWidth, Output(new MicroOp()))
// branch resolution (execute)
val brupdate = Input(new BrUpdateInfo())
val dis_fire = Input(Vec(coreWidth, Bool()))
val dis_ready = Input(Bool())
// wakeup ports
val wakeups = Flipped(Vec(numWbPorts, Valid(new ExeUnitResp(xLen))))
// commit stage
val com_valids = Input(Vec(plWidth, Bool()))
val com_uops = Input(Vec(plWidth, new MicroOp()))
val rbk_valids = Input(Vec(plWidth, Bool()))
val rollback = Input(Bool())
val debug_rob_empty = Input(Bool())
val debug = Output(new DebugRenameStageIO(numPhysRegs))
})
io.ren_stalls.foreach(_ := false.B)
io.debug := DontCare
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp
//-------------------------------------------------------------
// Pipeline State & Wires
// Stage 1
val ren1_fire = Wire(Vec(plWidth, Bool()))
val ren1_uops = Wire(Vec(plWidth, new MicroOp))
// Stage 2
val ren2_fire = io.dis_fire
val ren2_ready = io.dis_ready
val ren2_valids = Wire(Vec(plWidth, Bool()))
val ren2_uops = Wire(Vec(plWidth, new MicroOp))
val ren2_alloc_reqs = Wire(Vec(plWidth, Bool()))
//-------------------------------------------------------------
// pipeline registers
for (w <- 0 until plWidth) {
ren1_fire(w) := io.dec_fire(w)
ren1_uops(w) := io.dec_uops(w)
}
for (w <- 0 until plWidth) {
val r_valid = RegInit(false.B)
val r_uop = Reg(new MicroOp)
val next_uop = Wire(new MicroOp)
next_uop := r_uop
when (io.kill) {
r_valid := false.B
} .elsewhen (ren2_ready) {
r_valid := ren1_fire(w)
next_uop := ren1_uops(w)
} .otherwise {
r_valid := r_valid && !ren2_fire(w) // clear bit if uop gets dispatched
next_uop := r_uop
}
r_uop := GetNewUopAndBrMask(BypassAllocations(next_uop, ren2_uops, ren2_alloc_reqs), io.brupdate)
ren2_valids(w) := r_valid
ren2_uops(w) := r_uop
}
//-------------------------------------------------------------
// Outputs
io.ren2_mask := ren2_valids
}
/**
* Rename stage that connets the map table, free list, and busy table.
* Can be used in both the FP pipeline and the normal execute pipeline.
*
* @param plWidth pipeline width
* @param numWbPorts number of int writeback ports
* @param numWbPorts number of FP writeback ports
*/
class RenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int,
float: Boolean)
(implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p)
{
val pregSz = log2Ceil(numPhysRegs)
val rtype = if (float) RT_FLT else RT_FIX
//-------------------------------------------------------------
// Helper Functions
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = {
val bypassed_uop = Wire(new MicroOp)
bypassed_uop := uop
val bypass_hits_rs1 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs1 }
val bypass_hits_rs2 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs2 }
val bypass_hits_rs3 = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.lrs3 }
val bypass_hits_dst = (older_uops zip alloc_reqs) map { case (r,a) => a && r.ldst === uop.ldst }
val bypass_sel_rs1 = PriorityEncoderOH(bypass_hits_rs1.reverse).reverse
val bypass_sel_rs2 = PriorityEncoderOH(bypass_hits_rs2.reverse).reverse
val bypass_sel_rs3 = PriorityEncoderOH(bypass_hits_rs3.reverse).reverse
val bypass_sel_dst = PriorityEncoderOH(bypass_hits_dst.reverse).reverse
val do_bypass_rs1 = bypass_hits_rs1.reduce(_||_)
val do_bypass_rs2 = bypass_hits_rs2.reduce(_||_)
val do_bypass_rs3 = bypass_hits_rs3.reduce(_||_)
val do_bypass_dst = bypass_hits_dst.reduce(_||_)
val bypass_pdsts = older_uops.map(_.pdst)
when (do_bypass_rs1) { bypassed_uop.prs1 := Mux1H(bypass_sel_rs1, bypass_pdsts) }
when (do_bypass_rs2) { bypassed_uop.prs2 := Mux1H(bypass_sel_rs2, bypass_pdsts) }
when (do_bypass_rs3) { bypassed_uop.prs3 := Mux1H(bypass_sel_rs3, bypass_pdsts) }
when (do_bypass_dst) { bypassed_uop.stale_pdst := Mux1H(bypass_sel_dst, bypass_pdsts) }
bypassed_uop.prs1_busy := uop.prs1_busy || do_bypass_rs1
bypassed_uop.prs2_busy := uop.prs2_busy || do_bypass_rs2
bypassed_uop.prs3_busy := uop.prs3_busy || do_bypass_rs3
if (!float) {
bypassed_uop.prs3 := DontCare
bypassed_uop.prs3_busy := false.B
}
bypassed_uop
}
//-------------------------------------------------------------
// Rename Structures
val maptable = Module(new RenameMapTable(
plWidth,
32,
numPhysRegs,
false,
float))
val freelist = Module(new RenameFreeList(
plWidth,
numPhysRegs,
if (float) 32 else 31))
val busytable = Module(new RenameBusyTable(
plWidth,
numPhysRegs,
numWbPorts,
false,
float))
val ren2_br_tags = Wire(Vec(plWidth, Valid(UInt(brTagSz.W))))
// Commit/Rollback
val com_valids = Wire(Vec(plWidth, Bool()))
val rbk_valids = Wire(Vec(plWidth, Bool()))
for (w <- 0 until plWidth) {
ren2_alloc_reqs(w) := ren2_uops(w).ldst_val && ren2_uops(w).dst_rtype === rtype && ren2_fire(w)
ren2_br_tags(w).valid := ren2_fire(w) && ren2_uops(w).allocate_brtag
com_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.com_valids(w)
rbk_valids(w) := io.com_uops(w).ldst_val && io.com_uops(w).dst_rtype === rtype && io.rbk_valids(w)
ren2_br_tags(w).bits := ren2_uops(w).br_tag
}
//-------------------------------------------------------------
// Rename Table
// Maptable inputs.
val map_reqs = Wire(Vec(plWidth, new MapReq(lregSz)))
val remap_reqs = Wire(Vec(plWidth, new RemapReq(lregSz, pregSz)))
// Generate maptable requests.
for ((((ren1,ren2),com),w) <- (ren1_uops zip ren2_uops zip io.com_uops.reverse).zipWithIndex) {
map_reqs(w).lrs1 := ren1.lrs1
map_reqs(w).lrs2 := ren1.lrs2
map_reqs(w).lrs3 := ren1.lrs3
map_reqs(w).ldst := ren1.ldst
remap_reqs(w).ldst := Mux(io.rollback, com.ldst , ren2.ldst)
remap_reqs(w).pdst := Mux(io.rollback, com.stale_pdst, ren2.pdst)
}
ren2_alloc_reqs zip rbk_valids.reverse zip remap_reqs map {
case ((a,r),rr) => rr.valid := a || r}
// Hook up inputs.
maptable.io.map_reqs := map_reqs
maptable.io.remap_reqs := remap_reqs
maptable.io.ren_br_tags := ren2_br_tags
maptable.io.brupdate := io.brupdate
maptable.io.rollback := io.rollback
// Maptable outputs.
for ((uop, w) <- ren1_uops.zipWithIndex) {
val mappings = maptable.io.map_resps(w)
uop.prs1 := mappings.prs1
uop.prs2 := mappings.prs2
uop.prs3 := mappings.prs3 // only FP has 3rd operand
uop.stale_pdst := mappings.stale_pdst
}
//-------------------------------------------------------------
// Free List
// Freelist inputs.
freelist.io.reqs := ren2_alloc_reqs
freelist.io.dealloc_pregs zip com_valids zip rbk_valids map
{case ((d,c),r) => d.valid := c || r}
freelist.io.dealloc_pregs zip io.com_uops map
{case (d,c) => d.bits := Mux(io.rollback, c.pdst, c.stale_pdst)}
freelist.io.ren_br_tags := ren2_br_tags
freelist.io.brupdate := io.brupdate
freelist.io.debug.pipeline_empty := io.debug_rob_empty
assert (ren2_alloc_reqs zip freelist.io.alloc_pregs map {case (r,p) => !r || p.bits =/= 0.U} reduce (_&&_),
"[rename-stage] A uop is trying to allocate the zero physical register.")
// Freelist outputs.
for ((uop, w) <- ren2_uops.zipWithIndex) {
val preg = freelist.io.alloc_pregs(w).bits
uop.pdst := Mux(uop.ldst =/= 0.U || float.B, preg, 0.U)
}
//-------------------------------------------------------------
// Busy Table
busytable.io.ren_uops := ren2_uops // expects pdst to be set up.
busytable.io.rebusy_reqs := ren2_alloc_reqs
busytable.io.wb_valids := io.wakeups.map(_.valid)
busytable.io.wb_pdsts := io.wakeups.map(_.bits.uop.pdst)
assert (!(io.wakeups.map(x => x.valid && x.bits.uop.dst_rtype =/= rtype).reduce(_||_)),
"[rename] Wakeup has wrong rtype.")
for ((uop, w) <- ren2_uops.zipWithIndex) {
val busy = busytable.io.busy_resps(w)
uop.prs1_busy := uop.lrs1_rtype === rtype && busy.prs1_busy
uop.prs2_busy := uop.lrs2_rtype === rtype && busy.prs2_busy
uop.prs3_busy := uop.frs3_en && busy.prs3_busy
val valid = ren2_valids(w)
assert (!(valid && busy.prs1_busy && rtype === RT_FIX && uop.lrs1 === 0.U), "[rename] x0 is busy??")
assert (!(valid && busy.prs2_busy && rtype === RT_FIX && uop.lrs2 === 0.U), "[rename] x0 is busy??")
}
//-------------------------------------------------------------
// Outputs
for (w <- 0 until plWidth) {
val can_allocate = freelist.io.alloc_pregs(w).valid
// Push back against Decode stage if Rename1 can't proceed.
io.ren_stalls(w) := (ren2_uops(w).dst_rtype === rtype) && !can_allocate
val bypassed_uop = Wire(new MicroOp)
if (w > 0) bypassed_uop := BypassAllocations(ren2_uops(w), ren2_uops.slice(0,w), ren2_alloc_reqs.slice(0,w))
else bypassed_uop := ren2_uops(w)
io.ren2_uops(w) := GetNewUopAndBrMask(bypassed_uop, io.brupdate)
}
//-------------------------------------------------------------
// Debug signals
io.debug.freelist := freelist.io.debug.freelist
io.debug.isprlist := freelist.io.debug.isprlist
io.debug.busytable := busytable.io.debug.busytable
}
class PredRenameStage(
plWidth: Int,
numPhysRegs: Int,
numWbPorts: Int)
(implicit p: Parameters) extends AbstractRenameStage(plWidth, numPhysRegs, numWbPorts)(p)
{
def BypassAllocations(uop: MicroOp, older_uops: Seq[MicroOp], alloc_reqs: Seq[Bool]): MicroOp = {
uop
}
ren2_alloc_reqs := DontCare
val busy_table = RegInit(VecInit(0.U(ftqSz.W).asBools))
val to_busy = WireInit(VecInit(0.U(ftqSz.W).asBools))
val unbusy = WireInit(VecInit(0.U(ftqSz.W).asBools))
val current_ftq_idx = Reg(UInt(log2Ceil(ftqSz).W))
var next_ftq_idx = current_ftq_idx
for (w <- 0 until plWidth) {
io.ren2_uops(w) := ren2_uops(w)
val is_sfb_br = ren2_uops(w).is_sfb_br && ren2_fire(w)
val is_sfb_shadow = ren2_uops(w).is_sfb_shadow && ren2_fire(w)
val ftq_idx = ren2_uops(w).ftq_idx
when (is_sfb_br) {
io.ren2_uops(w).pdst := ftq_idx
to_busy(ftq_idx) := true.B
}
next_ftq_idx = Mux(is_sfb_br, ftq_idx, next_ftq_idx)
when (is_sfb_shadow) {
io.ren2_uops(w).ppred := next_ftq_idx
io.ren2_uops(w).ppred_busy := (busy_table(next_ftq_idx) || to_busy(next_ftq_idx)) && !unbusy(next_ftq_idx)
}
}
for (w <- 0 until numWbPorts) {
when (io.wakeups(w).valid) {
unbusy(io.wakeups(w).bits.uop.pdst) := true.B
}
}
current_ftq_idx := next_ftq_idx
busy_table := ((busy_table.asUInt | to_busy.asUInt) & ~unbusy.asUInt).asBools
}
File micro-op.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// MicroOp
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.common
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.exu.FUConstants
/**
* Extension to BoomBundle to add a MicroOp
*/
abstract trait HasBoomUOP extends BoomBundle
{
val uop = new MicroOp()
}
/**
* MicroOp passing through the pipeline
*/
class MicroOp(implicit p: Parameters) extends BoomBundle
with freechips.rocketchip.rocket.constants.MemoryOpConstants
with freechips.rocketchip.rocket.constants.ScalarOpConstants
{
val uopc = UInt(UOPC_SZ.W) // micro-op code
val inst = UInt(32.W)
val debug_inst = UInt(32.W)
val is_rvc = Bool()
val debug_pc = UInt(coreMaxAddrBits.W)
val iq_type = UInt(IQT_SZ.W) // which issue unit do we use?
val fu_code = UInt(FUConstants.FUC_SZ.W) // which functional unit do we use?
val ctrl = new CtrlSignals
// What is the next state of this uop in the issue window? useful
// for the compacting queue.
val iw_state = UInt(2.W)
// Has operand 1 or 2 been waken speculatively by a load?
// Only integer operands are speculaively woken up,
// so we can ignore p3.
val iw_p1_poisoned = Bool()
val iw_p2_poisoned = Bool()
val is_br = Bool() // is this micro-op a (branch) vs a regular PC+4 inst?
val is_jalr = Bool() // is this a jump? (jal or jalr)
val is_jal = Bool() // is this a JAL (doesn't include JR)? used for branch unit
val is_sfb = Bool() // is this a sfb or in the shadow of a sfb
val br_mask = UInt(maxBrCount.W) // which branches are we being speculated under?
val br_tag = UInt(brTagSz.W)
// Index into FTQ to figure out our fetch PC.
val ftq_idx = UInt(log2Ceil(ftqSz).W)
// This inst straddles two fetch packets
val edge_inst = Bool()
// Low-order bits of our own PC. Combine with ftq[ftq_idx] to get PC.
// Aligned to a cache-line size, as that is the greater fetch granularity.
// TODO: Shouldn't this be aligned to fetch-width size?
val pc_lob = UInt(log2Ceil(icBlockBytes).W)
// Was this a branch that was predicted taken?
val taken = Bool()
val imm_packed = UInt(LONGEST_IMM_SZ.W) // densely pack the imm in decode...
// then translate and sign-extend in execute
val csr_addr = UInt(CSR_ADDR_SZ.W) // only used for critical path reasons in Exe
val rob_idx = UInt(robAddrSz.W)
val ldq_idx = UInt(ldqAddrSz.W)
val stq_idx = UInt(stqAddrSz.W)
val rxq_idx = UInt(log2Ceil(numRxqEntries).W)
val pdst = UInt(maxPregSz.W)
val prs1 = UInt(maxPregSz.W)
val prs2 = UInt(maxPregSz.W)
val prs3 = UInt(maxPregSz.W)
val ppred = UInt(log2Ceil(ftqSz).W)
val prs1_busy = Bool()
val prs2_busy = Bool()
val prs3_busy = Bool()
val ppred_busy = Bool()
val stale_pdst = UInt(maxPregSz.W)
val exception = Bool()
val exc_cause = UInt(xLen.W) // TODO compress this down, xlen is insanity
val bypassable = Bool() // can we bypass ALU results? (doesn't include loads, csr, etc...)
val mem_cmd = UInt(M_SZ.W) // sync primitives/cache flushes
val mem_size = UInt(2.W)
val mem_signed = Bool()
val is_fence = Bool()
val is_fencei = Bool()
val is_amo = Bool()
val uses_ldq = Bool()
val uses_stq = Bool()
val is_sys_pc2epc = Bool() // Is a ECall or Breakpoint -- both set EPC to PC.
val is_unique = Bool() // only allow this instruction in the pipeline, wait for STQ to
// drain, clear fetcha fter it (tell ROB to un-ready until empty)
val flush_on_commit = Bool() // some instructions need to flush the pipeline behind them
// Preditation
def is_sfb_br = is_br && is_sfb && enableSFBOpt.B // Does this write a predicate
def is_sfb_shadow = !is_br && is_sfb && enableSFBOpt.B // Is this predicated
val ldst_is_rs1 = Bool() // If this is set and we are predicated off, copy rs1 to dst,
// else copy rs2 to dst
// logical specifiers (only used in Decode->Rename), except rollback (ldst)
val ldst = UInt(lregSz.W)
val lrs1 = UInt(lregSz.W)
val lrs2 = UInt(lregSz.W)
val lrs3 = UInt(lregSz.W)
val ldst_val = Bool() // is there a destination? invalid for stores, rd==x0, etc.
val dst_rtype = UInt(2.W)
val lrs1_rtype = UInt(2.W)
val lrs2_rtype = UInt(2.W)
val frs3_en = Bool()
// floating point information
val fp_val = Bool() // is a floating-point instruction (F- or D-extension)?
// If it's non-ld/st it will write back exception bits to the fcsr.
val fp_single = Bool() // single-precision floating point instruction (F-extension)
// frontend exception information
val xcpt_pf_if = Bool() // I-TLB page fault.
val xcpt_ae_if = Bool() // I$ access exception.
val xcpt_ma_if = Bool() // Misaligned fetch (jal/brjumping to misaligned addr).
val bp_debug_if = Bool() // Breakpoint
val bp_xcpt_if = Bool() // Breakpoint
// What prediction structure provides the prediction FROM this op
val debug_fsrc = UInt(BSRC_SZ.W)
// What prediction structure provides the prediction TO this op
val debug_tsrc = UInt(BSRC_SZ.W)
// Do we allocate a branch tag for this?
// SFB branches don't get a mask, they get a predicate bit
def allocate_brtag = (is_br && !is_sfb) || is_jalr
// Does this register write-back
def rf_wen = dst_rtype =/= RT_X
// Is it possible for this uop to misspeculate, preventing the commit of subsequent uops?
def unsafe = uses_ldq || (uses_stq && !is_fence) || is_br || is_jalr
def fu_code_is(_fu: UInt) = (fu_code & _fu) =/= 0.U
}
/**
* Control signals within a MicroOp
*
* TODO REFACTOR this, as this should no longer be true, as bypass occurs in stage before branch resolution
*/
class CtrlSignals extends Bundle()
{
val br_type = UInt(BR_N.getWidth.W)
val op1_sel = UInt(OP1_X.getWidth.W)
val op2_sel = UInt(OP2_X.getWidth.W)
val imm_sel = UInt(IS_X.getWidth.W)
val op_fcn = UInt(freechips.rocketchip.rocket.ALU.SZ_ALU_FN.W)
val fcn_dw = Bool()
val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W)
val is_load = Bool() // will invoke TLB address lookup
val is_sta = Bool() // will invoke TLB address lookup
val is_std = Bool()
}
| module PredRenameStage_1( // @[rename-stage.scala:356:7]
input clock, // @[rename-stage.scala:356:7]
input reset, // @[rename-stage.scala:356:7]
input io_kill, // @[rename-stage.scala:60:14]
input io_dec_fire_0, // @[rename-stage.scala:60:14]
input [6:0] io_dec_uops_0_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_0_inst, // @[rename-stage.scala:60:14]
input [31:0] io_dec_uops_0_debug_inst, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_dec_uops_0_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_dec_uops_0_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_dec_uops_0_fu_code, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_br, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_jalr, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_jal, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_sfb, // @[rename-stage.scala:60:14]
input [7:0] io_dec_uops_0_br_mask, // @[rename-stage.scala:60:14]
input [2:0] io_dec_uops_0_br_tag, // @[rename-stage.scala:60:14]
input [3:0] io_dec_uops_0_ftq_idx, // @[rename-stage.scala:60:14]
input io_dec_uops_0_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_pc_lob, // @[rename-stage.scala:60:14]
input io_dec_uops_0_taken, // @[rename-stage.scala:60:14]
input [19:0] io_dec_uops_0_imm_packed, // @[rename-stage.scala:60:14]
input io_dec_uops_0_exception, // @[rename-stage.scala:60:14]
input [63:0] io_dec_uops_0_exc_cause, // @[rename-stage.scala:60:14]
input io_dec_uops_0_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_dec_uops_0_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_mem_size, // @[rename-stage.scala:60:14]
input io_dec_uops_0_mem_signed, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_fence, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_fencei, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_amo, // @[rename-stage.scala:60:14]
input io_dec_uops_0_uses_ldq, // @[rename-stage.scala:60:14]
input io_dec_uops_0_uses_stq, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_dec_uops_0_is_unique, // @[rename-stage.scala:60:14]
input io_dec_uops_0_flush_on_commit, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_dec_uops_0_lrs3, // @[rename-stage.scala:60:14]
input io_dec_uops_0_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_dec_uops_0_frs3_en, // @[rename-stage.scala:60:14]
input io_dec_uops_0_fp_val, // @[rename-stage.scala:60:14]
input io_dec_uops_0_fp_single, // @[rename-stage.scala:60:14]
input io_dec_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_dec_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_dec_uops_0_bp_debug_if, // @[rename-stage.scala:60:14]
input io_dec_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_dec_uops_0_debug_fsrc, // @[rename-stage.scala:60:14]
input [7:0] io_brupdate_b1_resolve_mask, // @[rename-stage.scala:60:14]
input [7:0] io_brupdate_b1_mispredict_mask, // @[rename-stage.scala:60:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_brupdate_b2_uop_inst, // @[rename-stage.scala:60:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_br, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_jalr, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_jal, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_sfb, // @[rename-stage.scala:60:14]
input [7:0] io_brupdate_b2_uop_br_mask, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_br_tag, // @[rename-stage.scala:60:14]
input [3:0] io_brupdate_b2_uop_ftq_idx, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_taken, // @[rename-stage.scala:60:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_rob_idx, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_ldq_idx, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_uop_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_pdst, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_prs1, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_prs2, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_prs3, // @[rename-stage.scala:60:14]
input [3:0] io_brupdate_b2_uop_ppred, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_prs1_busy, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_prs2_busy, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_prs3_busy, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ppred_busy, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_stale_pdst, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_exception, // @[rename-stage.scala:60:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_mem_signed, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_fence, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_fencei, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_amo, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_uses_ldq, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_uses_stq, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_is_unique, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_flush_on_commit, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_frs3_en, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_fp_val, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_fp_single, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_bp_debug_if, // @[rename-stage.scala:60:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[rename-stage.scala:60:14]
input io_brupdate_b2_valid, // @[rename-stage.scala:60:14]
input io_brupdate_b2_mispredict, // @[rename-stage.scala:60:14]
input io_brupdate_b2_taken, // @[rename-stage.scala:60:14]
input [2:0] io_brupdate_b2_cfi_type, // @[rename-stage.scala:60:14]
input [1:0] io_brupdate_b2_pc_sel, // @[rename-stage.scala:60:14]
input [39:0] io_brupdate_b2_jalr_target, // @[rename-stage.scala:60:14]
input [20:0] io_brupdate_b2_target_offset, // @[rename-stage.scala:60:14]
input io_dis_fire_0, // @[rename-stage.scala:60:14]
input io_dis_ready, // @[rename-stage.scala:60:14]
input io_com_valids_0, // @[rename-stage.scala:60:14]
input [6:0] io_com_uops_0_uopc, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_0_inst, // @[rename-stage.scala:60:14]
input [31:0] io_com_uops_0_debug_inst, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_rvc, // @[rename-stage.scala:60:14]
input [39:0] io_com_uops_0_debug_pc, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_iq_type, // @[rename-stage.scala:60:14]
input [9:0] io_com_uops_0_fu_code, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_0_ctrl_br_type, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_ctrl_op1_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_ctrl_op2_sel, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_ctrl_imm_sel, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_ctrl_op_fcn, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_fcn_dw, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_ctrl_csr_cmd, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_is_load, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_is_sta, // @[rename-stage.scala:60:14]
input io_com_uops_0_ctrl_is_std, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_iw_state, // @[rename-stage.scala:60:14]
input io_com_uops_0_iw_p1_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_0_iw_p2_poisoned, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_br, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_jalr, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_jal, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_sfb, // @[rename-stage.scala:60:14]
input [7:0] io_com_uops_0_br_mask, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_br_tag, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_0_ftq_idx, // @[rename-stage.scala:60:14]
input io_com_uops_0_edge_inst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_pc_lob, // @[rename-stage.scala:60:14]
input io_com_uops_0_taken, // @[rename-stage.scala:60:14]
input [19:0] io_com_uops_0_imm_packed, // @[rename-stage.scala:60:14]
input [11:0] io_com_uops_0_csr_addr, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_rob_idx, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_ldq_idx, // @[rename-stage.scala:60:14]
input [2:0] io_com_uops_0_stq_idx, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_rxq_idx, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_pdst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_prs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_prs2, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_prs3, // @[rename-stage.scala:60:14]
input [3:0] io_com_uops_0_ppred, // @[rename-stage.scala:60:14]
input io_com_uops_0_prs1_busy, // @[rename-stage.scala:60:14]
input io_com_uops_0_prs2_busy, // @[rename-stage.scala:60:14]
input io_com_uops_0_prs3_busy, // @[rename-stage.scala:60:14]
input io_com_uops_0_ppred_busy, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_stale_pdst, // @[rename-stage.scala:60:14]
input io_com_uops_0_exception, // @[rename-stage.scala:60:14]
input [63:0] io_com_uops_0_exc_cause, // @[rename-stage.scala:60:14]
input io_com_uops_0_bypassable, // @[rename-stage.scala:60:14]
input [4:0] io_com_uops_0_mem_cmd, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_mem_size, // @[rename-stage.scala:60:14]
input io_com_uops_0_mem_signed, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_fence, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_fencei, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_amo, // @[rename-stage.scala:60:14]
input io_com_uops_0_uses_ldq, // @[rename-stage.scala:60:14]
input io_com_uops_0_uses_stq, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_sys_pc2epc, // @[rename-stage.scala:60:14]
input io_com_uops_0_is_unique, // @[rename-stage.scala:60:14]
input io_com_uops_0_flush_on_commit, // @[rename-stage.scala:60:14]
input io_com_uops_0_ldst_is_rs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_ldst, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_lrs1, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_lrs2, // @[rename-stage.scala:60:14]
input [5:0] io_com_uops_0_lrs3, // @[rename-stage.scala:60:14]
input io_com_uops_0_ldst_val, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_dst_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_lrs1_rtype, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_lrs2_rtype, // @[rename-stage.scala:60:14]
input io_com_uops_0_frs3_en, // @[rename-stage.scala:60:14]
input io_com_uops_0_fp_val, // @[rename-stage.scala:60:14]
input io_com_uops_0_fp_single, // @[rename-stage.scala:60:14]
input io_com_uops_0_xcpt_pf_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_xcpt_ae_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_xcpt_ma_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_bp_debug_if, // @[rename-stage.scala:60:14]
input io_com_uops_0_bp_xcpt_if, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_debug_fsrc, // @[rename-stage.scala:60:14]
input [1:0] io_com_uops_0_debug_tsrc, // @[rename-stage.scala:60:14]
input io_rbk_valids_0, // @[rename-stage.scala:60:14]
input io_rollback, // @[rename-stage.scala:60:14]
input io_debug_rob_empty // @[rename-stage.scala:60:14]
);
wire [1:0] next_uop_debug_tsrc; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_debug_fsrc; // @[rename-stage.scala:123:24]
wire next_uop_bp_xcpt_if; // @[rename-stage.scala:123:24]
wire next_uop_bp_debug_if; // @[rename-stage.scala:123:24]
wire next_uop_xcpt_ma_if; // @[rename-stage.scala:123:24]
wire next_uop_xcpt_ae_if; // @[rename-stage.scala:123:24]
wire next_uop_xcpt_pf_if; // @[rename-stage.scala:123:24]
wire next_uop_fp_single; // @[rename-stage.scala:123:24]
wire next_uop_fp_val; // @[rename-stage.scala:123:24]
wire next_uop_frs3_en; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_lrs2_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_lrs1_rtype; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_dst_rtype; // @[rename-stage.scala:123:24]
wire next_uop_ldst_val; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_lrs3; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_lrs2; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_lrs1; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_ldst; // @[rename-stage.scala:123:24]
wire next_uop_ldst_is_rs1; // @[rename-stage.scala:123:24]
wire next_uop_flush_on_commit; // @[rename-stage.scala:123:24]
wire next_uop_is_unique; // @[rename-stage.scala:123:24]
wire next_uop_is_sys_pc2epc; // @[rename-stage.scala:123:24]
wire next_uop_uses_stq; // @[rename-stage.scala:123:24]
wire next_uop_uses_ldq; // @[rename-stage.scala:123:24]
wire next_uop_is_amo; // @[rename-stage.scala:123:24]
wire next_uop_is_fencei; // @[rename-stage.scala:123:24]
wire next_uop_is_fence; // @[rename-stage.scala:123:24]
wire next_uop_mem_signed; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_mem_size; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_mem_cmd; // @[rename-stage.scala:123:24]
wire next_uop_bypassable; // @[rename-stage.scala:123:24]
wire [63:0] next_uop_exc_cause; // @[rename-stage.scala:123:24]
wire next_uop_exception; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_stale_pdst; // @[rename-stage.scala:123:24]
wire next_uop_ppred_busy; // @[rename-stage.scala:123:24]
wire next_uop_prs3_busy; // @[rename-stage.scala:123:24]
wire next_uop_prs2_busy; // @[rename-stage.scala:123:24]
wire next_uop_prs1_busy; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_ppred; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_prs3; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_prs2; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_prs1; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_pdst; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_rxq_idx; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_stq_idx; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_ldq_idx; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_rob_idx; // @[rename-stage.scala:123:24]
wire [11:0] next_uop_csr_addr; // @[rename-stage.scala:123:24]
wire [19:0] next_uop_imm_packed; // @[rename-stage.scala:123:24]
wire next_uop_taken; // @[rename-stage.scala:123:24]
wire [5:0] next_uop_pc_lob; // @[rename-stage.scala:123:24]
wire next_uop_edge_inst; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_ftq_idx; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_br_tag; // @[rename-stage.scala:123:24]
wire next_uop_is_sfb; // @[rename-stage.scala:123:24]
wire next_uop_is_jal; // @[rename-stage.scala:123:24]
wire next_uop_is_jalr; // @[rename-stage.scala:123:24]
wire next_uop_is_br; // @[rename-stage.scala:123:24]
wire next_uop_iw_p2_poisoned; // @[rename-stage.scala:123:24]
wire next_uop_iw_p1_poisoned; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_iw_state; // @[rename-stage.scala:123:24]
wire [9:0] next_uop_fu_code; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_iq_type; // @[rename-stage.scala:123:24]
wire [39:0] next_uop_debug_pc; // @[rename-stage.scala:123:24]
wire next_uop_is_rvc; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_debug_inst; // @[rename-stage.scala:123:24]
wire [31:0] next_uop_inst; // @[rename-stage.scala:123:24]
wire [6:0] next_uop_uopc; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_is_std; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_is_sta; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_is_load; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_ctrl_csr_cmd; // @[rename-stage.scala:123:24]
wire next_uop_ctrl_fcn_dw; // @[rename-stage.scala:123:24]
wire [4:0] next_uop_ctrl_op_fcn; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_ctrl_imm_sel; // @[rename-stage.scala:123:24]
wire [2:0] next_uop_ctrl_op2_sel; // @[rename-stage.scala:123:24]
wire [1:0] next_uop_ctrl_op1_sel; // @[rename-stage.scala:123:24]
wire [3:0] next_uop_ctrl_br_type; // @[rename-stage.scala:123:24]
wire io_kill_0 = io_kill; // @[rename-stage.scala:356:7]
wire io_dec_fire_0_0 = io_dec_fire_0; // @[rename-stage.scala:356:7]
wire [6:0] io_dec_uops_0_uopc_0 = io_dec_uops_0_uopc; // @[rename-stage.scala:356:7]
wire [31:0] io_dec_uops_0_inst_0 = io_dec_uops_0_inst; // @[rename-stage.scala:356:7]
wire [31:0] io_dec_uops_0_debug_inst_0 = io_dec_uops_0_debug_inst; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_rvc_0 = io_dec_uops_0_is_rvc; // @[rename-stage.scala:356:7]
wire [39:0] io_dec_uops_0_debug_pc_0 = io_dec_uops_0_debug_pc; // @[rename-stage.scala:356:7]
wire [2:0] io_dec_uops_0_iq_type_0 = io_dec_uops_0_iq_type; // @[rename-stage.scala:356:7]
wire [9:0] io_dec_uops_0_fu_code_0 = io_dec_uops_0_fu_code; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_br_0 = io_dec_uops_0_is_br; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_jalr_0 = io_dec_uops_0_is_jalr; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_jal_0 = io_dec_uops_0_is_jal; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_sfb_0 = io_dec_uops_0_is_sfb; // @[rename-stage.scala:356:7]
wire [7:0] io_dec_uops_0_br_mask_0 = io_dec_uops_0_br_mask; // @[rename-stage.scala:356:7]
wire [2:0] io_dec_uops_0_br_tag_0 = io_dec_uops_0_br_tag; // @[rename-stage.scala:356:7]
wire [3:0] io_dec_uops_0_ftq_idx_0 = io_dec_uops_0_ftq_idx; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_edge_inst_0 = io_dec_uops_0_edge_inst; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_pc_lob_0 = io_dec_uops_0_pc_lob; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_taken_0 = io_dec_uops_0_taken; // @[rename-stage.scala:356:7]
wire [19:0] io_dec_uops_0_imm_packed_0 = io_dec_uops_0_imm_packed; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_exception_0 = io_dec_uops_0_exception; // @[rename-stage.scala:356:7]
wire [63:0] io_dec_uops_0_exc_cause_0 = io_dec_uops_0_exc_cause; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_bypassable_0 = io_dec_uops_0_bypassable; // @[rename-stage.scala:356:7]
wire [4:0] io_dec_uops_0_mem_cmd_0 = io_dec_uops_0_mem_cmd; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_mem_size_0 = io_dec_uops_0_mem_size; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_mem_signed_0 = io_dec_uops_0_mem_signed; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_fence_0 = io_dec_uops_0_is_fence; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_fencei_0 = io_dec_uops_0_is_fencei; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_amo_0 = io_dec_uops_0_is_amo; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_uses_ldq_0 = io_dec_uops_0_uses_ldq; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_uses_stq_0 = io_dec_uops_0_uses_stq; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_sys_pc2epc_0 = io_dec_uops_0_is_sys_pc2epc; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_is_unique_0 = io_dec_uops_0_is_unique; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_flush_on_commit_0 = io_dec_uops_0_flush_on_commit; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_ldst_0 = io_dec_uops_0_ldst; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_lrs1_0 = io_dec_uops_0_lrs1; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_lrs2_0 = io_dec_uops_0_lrs2; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_lrs3_0 = io_dec_uops_0_lrs3; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_ldst_val_0 = io_dec_uops_0_ldst_val; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_dst_rtype_0 = io_dec_uops_0_dst_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_lrs1_rtype_0 = io_dec_uops_0_lrs1_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_lrs2_rtype_0 = io_dec_uops_0_lrs2_rtype; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_frs3_en_0 = io_dec_uops_0_frs3_en; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_fp_val_0 = io_dec_uops_0_fp_val; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_fp_single_0 = io_dec_uops_0_fp_single; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_xcpt_pf_if_0 = io_dec_uops_0_xcpt_pf_if; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_xcpt_ae_if_0 = io_dec_uops_0_xcpt_ae_if; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_bp_debug_if_0 = io_dec_uops_0_bp_debug_if; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_bp_xcpt_if_0 = io_dec_uops_0_bp_xcpt_if; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_debug_fsrc_0 = io_dec_uops_0_debug_fsrc; // @[rename-stage.scala:356:7]
wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[rename-stage.scala:356:7]
wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[rename-stage.scala:356:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[rename-stage.scala:356:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[rename-stage.scala:356:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[rename-stage.scala:356:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[rename-stage.scala:356:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[rename-stage.scala:356:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[rename-stage.scala:356:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[rename-stage.scala:356:7]
wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[rename-stage.scala:356:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[rename-stage.scala:356:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[rename-stage.scala:356:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[rename-stage.scala:356:7]
wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[rename-stage.scala:356:7]
wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[rename-stage.scala:356:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[rename-stage.scala:356:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[rename-stage.scala:356:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[rename-stage.scala:356:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[rename-stage.scala:356:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[rename-stage.scala:356:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[rename-stage.scala:356:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[rename-stage.scala:356:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[rename-stage.scala:356:7]
wire io_dis_fire_0_0 = io_dis_fire_0; // @[rename-stage.scala:356:7]
wire io_dis_ready_0 = io_dis_ready; // @[rename-stage.scala:356:7]
wire io_com_valids_0_0 = io_com_valids_0; // @[rename-stage.scala:356:7]
wire [6:0] io_com_uops_0_uopc_0 = io_com_uops_0_uopc; // @[rename-stage.scala:356:7]
wire [31:0] io_com_uops_0_inst_0 = io_com_uops_0_inst; // @[rename-stage.scala:356:7]
wire [31:0] io_com_uops_0_debug_inst_0 = io_com_uops_0_debug_inst; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_rvc_0 = io_com_uops_0_is_rvc; // @[rename-stage.scala:356:7]
wire [39:0] io_com_uops_0_debug_pc_0 = io_com_uops_0_debug_pc; // @[rename-stage.scala:356:7]
wire [2:0] io_com_uops_0_iq_type_0 = io_com_uops_0_iq_type; // @[rename-stage.scala:356:7]
wire [9:0] io_com_uops_0_fu_code_0 = io_com_uops_0_fu_code; // @[rename-stage.scala:356:7]
wire [3:0] io_com_uops_0_ctrl_br_type_0 = io_com_uops_0_ctrl_br_type; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_ctrl_op1_sel_0 = io_com_uops_0_ctrl_op1_sel; // @[rename-stage.scala:356:7]
wire [2:0] io_com_uops_0_ctrl_op2_sel_0 = io_com_uops_0_ctrl_op2_sel; // @[rename-stage.scala:356:7]
wire [2:0] io_com_uops_0_ctrl_imm_sel_0 = io_com_uops_0_ctrl_imm_sel; // @[rename-stage.scala:356:7]
wire [4:0] io_com_uops_0_ctrl_op_fcn_0 = io_com_uops_0_ctrl_op_fcn; // @[rename-stage.scala:356:7]
wire io_com_uops_0_ctrl_fcn_dw_0 = io_com_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:356:7]
wire [2:0] io_com_uops_0_ctrl_csr_cmd_0 = io_com_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:356:7]
wire io_com_uops_0_ctrl_is_load_0 = io_com_uops_0_ctrl_is_load; // @[rename-stage.scala:356:7]
wire io_com_uops_0_ctrl_is_sta_0 = io_com_uops_0_ctrl_is_sta; // @[rename-stage.scala:356:7]
wire io_com_uops_0_ctrl_is_std_0 = io_com_uops_0_ctrl_is_std; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_iw_state_0 = io_com_uops_0_iw_state; // @[rename-stage.scala:356:7]
wire io_com_uops_0_iw_p1_poisoned_0 = io_com_uops_0_iw_p1_poisoned; // @[rename-stage.scala:356:7]
wire io_com_uops_0_iw_p2_poisoned_0 = io_com_uops_0_iw_p2_poisoned; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_br_0 = io_com_uops_0_is_br; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_jalr_0 = io_com_uops_0_is_jalr; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_jal_0 = io_com_uops_0_is_jal; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_sfb_0 = io_com_uops_0_is_sfb; // @[rename-stage.scala:356:7]
wire [7:0] io_com_uops_0_br_mask_0 = io_com_uops_0_br_mask; // @[rename-stage.scala:356:7]
wire [2:0] io_com_uops_0_br_tag_0 = io_com_uops_0_br_tag; // @[rename-stage.scala:356:7]
wire [3:0] io_com_uops_0_ftq_idx_0 = io_com_uops_0_ftq_idx; // @[rename-stage.scala:356:7]
wire io_com_uops_0_edge_inst_0 = io_com_uops_0_edge_inst; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_pc_lob_0 = io_com_uops_0_pc_lob; // @[rename-stage.scala:356:7]
wire io_com_uops_0_taken_0 = io_com_uops_0_taken; // @[rename-stage.scala:356:7]
wire [19:0] io_com_uops_0_imm_packed_0 = io_com_uops_0_imm_packed; // @[rename-stage.scala:356:7]
wire [11:0] io_com_uops_0_csr_addr_0 = io_com_uops_0_csr_addr; // @[rename-stage.scala:356:7]
wire [4:0] io_com_uops_0_rob_idx_0 = io_com_uops_0_rob_idx; // @[rename-stage.scala:356:7]
wire [2:0] io_com_uops_0_ldq_idx_0 = io_com_uops_0_ldq_idx; // @[rename-stage.scala:356:7]
wire [2:0] io_com_uops_0_stq_idx_0 = io_com_uops_0_stq_idx; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_rxq_idx_0 = io_com_uops_0_rxq_idx; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_pdst_0 = io_com_uops_0_pdst; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_prs1_0 = io_com_uops_0_prs1; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_prs2_0 = io_com_uops_0_prs2; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_prs3_0 = io_com_uops_0_prs3; // @[rename-stage.scala:356:7]
wire [3:0] io_com_uops_0_ppred_0 = io_com_uops_0_ppred; // @[rename-stage.scala:356:7]
wire io_com_uops_0_prs1_busy_0 = io_com_uops_0_prs1_busy; // @[rename-stage.scala:356:7]
wire io_com_uops_0_prs2_busy_0 = io_com_uops_0_prs2_busy; // @[rename-stage.scala:356:7]
wire io_com_uops_0_prs3_busy_0 = io_com_uops_0_prs3_busy; // @[rename-stage.scala:356:7]
wire io_com_uops_0_ppred_busy_0 = io_com_uops_0_ppred_busy; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_stale_pdst_0 = io_com_uops_0_stale_pdst; // @[rename-stage.scala:356:7]
wire io_com_uops_0_exception_0 = io_com_uops_0_exception; // @[rename-stage.scala:356:7]
wire [63:0] io_com_uops_0_exc_cause_0 = io_com_uops_0_exc_cause; // @[rename-stage.scala:356:7]
wire io_com_uops_0_bypassable_0 = io_com_uops_0_bypassable; // @[rename-stage.scala:356:7]
wire [4:0] io_com_uops_0_mem_cmd_0 = io_com_uops_0_mem_cmd; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_mem_size_0 = io_com_uops_0_mem_size; // @[rename-stage.scala:356:7]
wire io_com_uops_0_mem_signed_0 = io_com_uops_0_mem_signed; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_fence_0 = io_com_uops_0_is_fence; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_fencei_0 = io_com_uops_0_is_fencei; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_amo_0 = io_com_uops_0_is_amo; // @[rename-stage.scala:356:7]
wire io_com_uops_0_uses_ldq_0 = io_com_uops_0_uses_ldq; // @[rename-stage.scala:356:7]
wire io_com_uops_0_uses_stq_0 = io_com_uops_0_uses_stq; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_sys_pc2epc_0 = io_com_uops_0_is_sys_pc2epc; // @[rename-stage.scala:356:7]
wire io_com_uops_0_is_unique_0 = io_com_uops_0_is_unique; // @[rename-stage.scala:356:7]
wire io_com_uops_0_flush_on_commit_0 = io_com_uops_0_flush_on_commit; // @[rename-stage.scala:356:7]
wire io_com_uops_0_ldst_is_rs1_0 = io_com_uops_0_ldst_is_rs1; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_ldst_0 = io_com_uops_0_ldst; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_lrs1_0 = io_com_uops_0_lrs1; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_lrs2_0 = io_com_uops_0_lrs2; // @[rename-stage.scala:356:7]
wire [5:0] io_com_uops_0_lrs3_0 = io_com_uops_0_lrs3; // @[rename-stage.scala:356:7]
wire io_com_uops_0_ldst_val_0 = io_com_uops_0_ldst_val; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_dst_rtype_0 = io_com_uops_0_dst_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_lrs1_rtype_0 = io_com_uops_0_lrs1_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_lrs2_rtype_0 = io_com_uops_0_lrs2_rtype; // @[rename-stage.scala:356:7]
wire io_com_uops_0_frs3_en_0 = io_com_uops_0_frs3_en; // @[rename-stage.scala:356:7]
wire io_com_uops_0_fp_val_0 = io_com_uops_0_fp_val; // @[rename-stage.scala:356:7]
wire io_com_uops_0_fp_single_0 = io_com_uops_0_fp_single; // @[rename-stage.scala:356:7]
wire io_com_uops_0_xcpt_pf_if_0 = io_com_uops_0_xcpt_pf_if; // @[rename-stage.scala:356:7]
wire io_com_uops_0_xcpt_ae_if_0 = io_com_uops_0_xcpt_ae_if; // @[rename-stage.scala:356:7]
wire io_com_uops_0_xcpt_ma_if_0 = io_com_uops_0_xcpt_ma_if; // @[rename-stage.scala:356:7]
wire io_com_uops_0_bp_debug_if_0 = io_com_uops_0_bp_debug_if; // @[rename-stage.scala:356:7]
wire io_com_uops_0_bp_xcpt_if_0 = io_com_uops_0_bp_xcpt_if; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_debug_fsrc_0 = io_com_uops_0_debug_fsrc; // @[rename-stage.scala:356:7]
wire [1:0] io_com_uops_0_debug_tsrc_0 = io_com_uops_0_debug_tsrc; // @[rename-stage.scala:356:7]
wire io_rbk_valids_0_0 = io_rbk_valids_0; // @[rename-stage.scala:356:7]
wire io_rollback_0 = io_rollback; // @[rename-stage.scala:356:7]
wire io_debug_rob_empty_0 = io_debug_rob_empty; // @[rename-stage.scala:356:7]
wire [15:0] io_debug_freelist = 16'h0; // @[rename-stage.scala:356:7]
wire [15:0] io_debug_isprlist = 16'h0; // @[rename-stage.scala:356:7]
wire [15:0] io_debug_busytable = 16'h0; // @[rename-stage.scala:356:7]
wire [63:0] io_wakeups_0_bits_uop_exc_cause = 64'h0; // @[rename-stage.scala:60:14, :356:7]
wire [63:0] io_wakeups_0_bits_data = 64'h0; // @[rename-stage.scala:60:14, :356:7]
wire [63:0] io_wakeups_0_bits_fflags_bits_uop_exc_cause = 64'h0; // @[rename-stage.scala:60:14, :356:7]
wire [19:0] io_wakeups_0_bits_uop_imm_packed = 20'h0; // @[rename-stage.scala:60:14, :356:7]
wire [19:0] io_wakeups_0_bits_fflags_bits_uop_imm_packed = 20'h0; // @[rename-stage.scala:60:14, :356:7]
wire [7:0] io_wakeups_0_bits_uop_br_mask = 8'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}]
wire [7:0] io_wakeups_0_bits_fflags_bits_uop_br_mask = 8'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}]
wire [7:0] lo_1 = 8'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}]
wire [7:0] hi_1 = 8'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}]
wire [7:0] lo_2 = 8'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}]
wire [7:0] hi_2 = 8'h0; // @[rename-stage.scala:60:14, :356:7, :402:{47,65}]
wire [9:0] io_wakeups_0_bits_uop_fu_code = 10'h0; // @[rename-stage.scala:60:14, :356:7]
wire [9:0] io_wakeups_0_bits_fflags_bits_uop_fu_code = 10'h0; // @[rename-stage.scala:60:14, :356:7]
wire [39:0] io_wakeups_0_bits_uop_debug_pc = 40'h0; // @[rename-stage.scala:60:14, :356:7]
wire [39:0] io_wakeups_0_bits_fflags_bits_uop_debug_pc = 40'h0; // @[rename-stage.scala:60:14, :356:7]
wire [31:0] io_wakeups_0_bits_uop_inst = 32'h0; // @[rename-stage.scala:60:14, :356:7]
wire [31:0] io_wakeups_0_bits_uop_debug_inst = 32'h0; // @[rename-stage.scala:60:14, :356:7]
wire [31:0] io_wakeups_0_bits_fflags_bits_uop_inst = 32'h0; // @[rename-stage.scala:60:14, :356:7]
wire [31:0] io_wakeups_0_bits_fflags_bits_uop_debug_inst = 32'h0; // @[rename-stage.scala:60:14, :356:7]
wire [6:0] io_wakeups_0_bits_uop_uopc = 7'h0; // @[rename-stage.scala:60:14, :356:7]
wire [6:0] io_wakeups_0_bits_fflags_bits_uop_uopc = 7'h0; // @[rename-stage.scala:60:14, :356:7]
wire _io_ren2_uops_0_ppred_busy_T_1 = 1'h1; // @[rename-stage.scala:390:92]
wire [5:0] io_dec_uops_0_pdst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_prs1 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_prs2 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_prs3 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_dec_uops_0_stale_pdst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_pc_lob = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_pdst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_prs1 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_prs2 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_prs3 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_stale_pdst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_ldst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_lrs1 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_lrs2 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_uop_lrs3 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_pc_lob = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_pdst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_prs1 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_prs2 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_prs3 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_stale_pdst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_ldst = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs1 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs2 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] io_wakeups_0_bits_fflags_bits_uop_lrs3 = 6'h0; // @[rename-stage.scala:356:7]
wire [5:0] ren1_uops_0_pdst = 6'h0; // @[rename-stage.scala:101:29]
wire [5:0] ren1_uops_0_prs1 = 6'h0; // @[rename-stage.scala:101:29]
wire [5:0] ren1_uops_0_prs2 = 6'h0; // @[rename-stage.scala:101:29]
wire [5:0] ren1_uops_0_prs3 = 6'h0; // @[rename-stage.scala:101:29]
wire [5:0] ren1_uops_0_stale_pdst = 6'h0; // @[rename-stage.scala:101:29]
wire [11:0] io_dec_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:356:7]
wire [11:0] io_wakeups_0_bits_uop_csr_addr = 12'h0; // @[rename-stage.scala:356:7]
wire [11:0] io_wakeups_0_bits_fflags_bits_uop_csr_addr = 12'h0; // @[rename-stage.scala:356:7]
wire [11:0] ren1_uops_0_csr_addr = 12'h0; // @[rename-stage.scala:101:29]
wire io_ren_stalls_0 = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7]
wire io_dec_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_valid = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_rvc = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_br = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_jalr = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_jal = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_sfb = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_edge_inst = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_taken = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_prs1_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_prs2_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_prs3_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_ppred_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_exception = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_bypassable = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_mem_signed = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_fence = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_fencei = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_amo = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_uses_ldq = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_uses_stq = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_sys_pc2epc = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_is_unique = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_flush_on_commit = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_ldst_val = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_frs3_en = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_fp_val = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_fp_single = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_xcpt_pf_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_xcpt_ae_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_bp_debug_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_uop_bp_xcpt_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_predicated = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_valid = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_rvc = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_br = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_jalr = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_jal = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_sfb = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_edge_inst = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_taken = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_exception = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_bypassable = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_mem_signed = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_fence = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_fencei = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_amo = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_uses_stq = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_is_unique = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_ldst_val = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_frs3_en = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_fp_val = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_fp_single = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[rename-stage.scala:356:7]
wire io_wakeups_0_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[rename-stage.scala:356:7]
wire ren1_uops_0_ctrl_fcn_dw = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ctrl_is_load = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ctrl_is_sta = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ctrl_is_std = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_iw_p1_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_iw_p2_poisoned = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_prs1_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_prs2_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_prs3_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ppred_busy = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_ldst_is_rs1 = 1'h0; // @[rename-stage.scala:101:29]
wire ren1_uops_0_xcpt_ma_if = 1'h0; // @[rename-stage.scala:101:29]
wire ren2_alloc_reqs_0 = 1'h0; // @[rename-stage.scala:109:29]
wire _busy_table_WIRE_0 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_1 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_2 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_3 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_4 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_5 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_6 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_7 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_8 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_9 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_10 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_11 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_12 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_13 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_14 = 1'h0; // @[rename-stage.scala:368:35]
wire _busy_table_WIRE_15 = 1'h0; // @[rename-stage.scala:368:35]
wire _to_busy_WIRE_0 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_1 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_2 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_3 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_4 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_5 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_6 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_7 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_8 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_9 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_10 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_11 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_12 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_13 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_14 = 1'h0; // @[rename-stage.scala:369:33]
wire _to_busy_WIRE_15 = 1'h0; // @[rename-stage.scala:369:33]
wire to_busy_0 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_1 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_2 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_3 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_4 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_5 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_6 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_7 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_8 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_9 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_10 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_11 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_12 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_13 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_14 = 1'h0; // @[rename-stage.scala:369:25]
wire to_busy_15 = 1'h0; // @[rename-stage.scala:369:25]
wire _unbusy_WIRE_0 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_1 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_2 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_3 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_4 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_5 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_6 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_7 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_8 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_9 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_10 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_11 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_12 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_13 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_14 = 1'h0; // @[rename-stage.scala:370:32]
wire _unbusy_WIRE_15 = 1'h0; // @[rename-stage.scala:370:32]
wire unbusy_0 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_1 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_2 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_3 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_4 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_5 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_6 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_7 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_8 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_9 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_10 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_11 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_12 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_13 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_14 = 1'h0; // @[rename-stage.scala:370:24]
wire unbusy_15 = 1'h0; // @[rename-stage.scala:370:24]
wire _is_sfb_br_T_1 = 1'h0; // @[micro-op.scala:109:42]
wire is_sfb_br = 1'h0; // @[rename-stage.scala:378:44]
wire _is_sfb_shadow_T_2 = 1'h0; // @[micro-op.scala:110:43]
wire is_sfb_shadow = 1'h0; // @[rename-stage.scala:379:52]
wire [4:0] io_dec_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_dec_uops_0_rob_idx = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_wakeups_0_bits_uop_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_wakeups_0_bits_uop_rob_idx = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_wakeups_0_bits_uop_mem_cmd = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_rob_idx = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] io_wakeups_0_bits_fflags_bits_flags = 5'h0; // @[rename-stage.scala:356:7]
wire [4:0] ren1_uops_0_ctrl_op_fcn = 5'h0; // @[rename-stage.scala:101:29]
wire [4:0] ren1_uops_0_rob_idx = 5'h0; // @[rename-stage.scala:101:29]
wire [2:0] io_dec_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_dec_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_dec_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_dec_uops_0_ldq_idx = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_dec_uops_0_stq_idx = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_uop_iq_type = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_uop_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_uop_br_tag = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_uop_ldq_idx = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_uop_stq_idx = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_iq_type = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_br_tag = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_ldq_idx = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] io_wakeups_0_bits_fflags_bits_uop_stq_idx = 3'h0; // @[rename-stage.scala:356:7]
wire [2:0] ren1_uops_0_ctrl_op2_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_0_ctrl_imm_sel = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_0_ctrl_csr_cmd = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_0_ldq_idx = 3'h0; // @[rename-stage.scala:101:29]
wire [2:0] ren1_uops_0_stq_idx = 3'h0; // @[rename-stage.scala:101:29]
wire [1:0] io_dec_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_iw_state = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_dec_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_iw_state = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_rxq_idx = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_mem_size = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_dst_rtype = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_lrs1_rtype = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_lrs2_rtype = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_debug_fsrc = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_uop_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_iw_state = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_mem_size = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] io_wakeups_0_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[rename-stage.scala:356:7]
wire [1:0] ren1_uops_0_ctrl_op1_sel = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_0_iw_state = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_0_rxq_idx = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] ren1_uops_0_debug_tsrc = 2'h0; // @[rename-stage.scala:101:29]
wire [1:0] lo_lo_lo_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] lo_lo_hi_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] lo_hi_lo_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] lo_hi_hi_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] hi_lo_lo_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] hi_lo_hi_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] hi_hi_lo_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] hi_hi_hi_1 = 2'h0; // @[rename-stage.scala:402:47]
wire [1:0] lo_lo_lo_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [1:0] lo_lo_hi_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [1:0] lo_hi_lo_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [1:0] lo_hi_hi_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [1:0] hi_lo_lo_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [1:0] hi_lo_hi_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [1:0] hi_hi_lo_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [1:0] hi_hi_hi_2 = 2'h0; // @[rename-stage.scala:402:65]
wire [3:0] io_dec_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] io_dec_uops_0_ppred = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] io_wakeups_0_bits_uop_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] io_wakeups_0_bits_uop_ftq_idx = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] io_wakeups_0_bits_uop_ppred = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ftq_idx = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] io_wakeups_0_bits_fflags_bits_uop_ppred = 4'h0; // @[rename-stage.scala:356:7]
wire [3:0] ren1_uops_0_ctrl_br_type = 4'h0; // @[rename-stage.scala:101:29]
wire [3:0] ren1_uops_0_ppred = 4'h0; // @[rename-stage.scala:101:29]
wire [3:0] lo_lo_1 = 4'h0; // @[rename-stage.scala:402:47]
wire [3:0] lo_hi_1 = 4'h0; // @[rename-stage.scala:402:47]
wire [3:0] hi_lo_1 = 4'h0; // @[rename-stage.scala:402:47]
wire [3:0] hi_hi_1 = 4'h0; // @[rename-stage.scala:402:47]
wire [3:0] lo_lo_2 = 4'h0; // @[rename-stage.scala:402:65]
wire [3:0] lo_hi_2 = 4'h0; // @[rename-stage.scala:402:65]
wire [3:0] hi_lo_2 = 4'h0; // @[rename-stage.scala:402:65]
wire [3:0] hi_hi_2 = 4'h0; // @[rename-stage.scala:402:65]
wire ren1_fire_0 = io_dec_fire_0_0; // @[rename-stage.scala:100:29, :356:7]
wire [6:0] ren1_uops_0_uopc = io_dec_uops_0_uopc_0; // @[rename-stage.scala:101:29, :356:7]
wire [31:0] ren1_uops_0_inst = io_dec_uops_0_inst_0; // @[rename-stage.scala:101:29, :356:7]
wire [31:0] ren1_uops_0_debug_inst = io_dec_uops_0_debug_inst_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_rvc = io_dec_uops_0_is_rvc_0; // @[rename-stage.scala:101:29, :356:7]
wire [39:0] ren1_uops_0_debug_pc = io_dec_uops_0_debug_pc_0; // @[rename-stage.scala:101:29, :356:7]
wire [2:0] ren1_uops_0_iq_type = io_dec_uops_0_iq_type_0; // @[rename-stage.scala:101:29, :356:7]
wire [9:0] ren1_uops_0_fu_code = io_dec_uops_0_fu_code_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_br = io_dec_uops_0_is_br_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_jalr = io_dec_uops_0_is_jalr_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_jal = io_dec_uops_0_is_jal_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_sfb = io_dec_uops_0_is_sfb_0; // @[rename-stage.scala:101:29, :356:7]
wire [7:0] ren1_uops_0_br_mask = io_dec_uops_0_br_mask_0; // @[rename-stage.scala:101:29, :356:7]
wire [2:0] ren1_uops_0_br_tag = io_dec_uops_0_br_tag_0; // @[rename-stage.scala:101:29, :356:7]
wire [3:0] ren1_uops_0_ftq_idx = io_dec_uops_0_ftq_idx_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_edge_inst = io_dec_uops_0_edge_inst_0; // @[rename-stage.scala:101:29, :356:7]
wire [5:0] ren1_uops_0_pc_lob = io_dec_uops_0_pc_lob_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_taken = io_dec_uops_0_taken_0; // @[rename-stage.scala:101:29, :356:7]
wire [19:0] ren1_uops_0_imm_packed = io_dec_uops_0_imm_packed_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_exception = io_dec_uops_0_exception_0; // @[rename-stage.scala:101:29, :356:7]
wire [63:0] ren1_uops_0_exc_cause = io_dec_uops_0_exc_cause_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_bypassable = io_dec_uops_0_bypassable_0; // @[rename-stage.scala:101:29, :356:7]
wire [4:0] ren1_uops_0_mem_cmd = io_dec_uops_0_mem_cmd_0; // @[rename-stage.scala:101:29, :356:7]
wire [1:0] ren1_uops_0_mem_size = io_dec_uops_0_mem_size_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_mem_signed = io_dec_uops_0_mem_signed_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_fence = io_dec_uops_0_is_fence_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_fencei = io_dec_uops_0_is_fencei_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_amo = io_dec_uops_0_is_amo_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_uses_ldq = io_dec_uops_0_uses_ldq_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_uses_stq = io_dec_uops_0_uses_stq_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_sys_pc2epc = io_dec_uops_0_is_sys_pc2epc_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_is_unique = io_dec_uops_0_is_unique_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_flush_on_commit = io_dec_uops_0_flush_on_commit_0; // @[rename-stage.scala:101:29, :356:7]
wire [5:0] ren1_uops_0_ldst = io_dec_uops_0_ldst_0; // @[rename-stage.scala:101:29, :356:7]
wire [5:0] ren1_uops_0_lrs1 = io_dec_uops_0_lrs1_0; // @[rename-stage.scala:101:29, :356:7]
wire [5:0] ren1_uops_0_lrs2 = io_dec_uops_0_lrs2_0; // @[rename-stage.scala:101:29, :356:7]
wire [5:0] ren1_uops_0_lrs3 = io_dec_uops_0_lrs3_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_ldst_val = io_dec_uops_0_ldst_val_0; // @[rename-stage.scala:101:29, :356:7]
wire [1:0] ren1_uops_0_dst_rtype = io_dec_uops_0_dst_rtype_0; // @[rename-stage.scala:101:29, :356:7]
wire [1:0] ren1_uops_0_lrs1_rtype = io_dec_uops_0_lrs1_rtype_0; // @[rename-stage.scala:101:29, :356:7]
wire [1:0] ren1_uops_0_lrs2_rtype = io_dec_uops_0_lrs2_rtype_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_frs3_en = io_dec_uops_0_frs3_en_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_fp_val = io_dec_uops_0_fp_val_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_fp_single = io_dec_uops_0_fp_single_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_xcpt_pf_if = io_dec_uops_0_xcpt_pf_if_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_xcpt_ae_if = io_dec_uops_0_xcpt_ae_if_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_bp_debug_if = io_dec_uops_0_bp_debug_if_0; // @[rename-stage.scala:101:29, :356:7]
wire ren1_uops_0_bp_xcpt_if = io_dec_uops_0_bp_xcpt_if_0; // @[rename-stage.scala:101:29, :356:7]
wire [1:0] ren1_uops_0_debug_fsrc = io_dec_uops_0_debug_fsrc_0; // @[rename-stage.scala:101:29, :356:7]
wire ren2_valids_0; // @[rename-stage.scala:107:29]
wire [6:0] ren2_uops_0_uopc; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_0_inst; // @[rename-stage.scala:108:29]
wire [31:0] ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29]
wire [39:0] ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_iq_type; // @[rename-stage.scala:108:29]
wire [9:0] ren2_uops_0_fu_code; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_iw_state; // @[rename-stage.scala:108:29]
wire ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29]
wire ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_br; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_jal; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29]
wire [7:0] ren2_uops_0_br_mask; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_br_tag; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29]
wire ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29]
wire ren2_uops_0_taken; // @[rename-stage.scala:108:29]
wire [19:0] ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29]
wire [11:0] ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29]
wire [2:0] ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_pdst; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_prs1; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_prs2; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_prs3; // @[rename-stage.scala:108:29]
wire [3:0] ren2_uops_0_ppred; // @[rename-stage.scala:108:29]
wire ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_0_prs3_busy; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29]
wire ren2_uops_0_exception; // @[rename-stage.scala:108:29]
wire [63:0] ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29]
wire ren2_uops_0_bypassable; // @[rename-stage.scala:108:29]
wire [4:0] ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_mem_size; // @[rename-stage.scala:108:29]
wire ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_fence; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_amo; // @[rename-stage.scala:108:29]
wire ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29]
wire ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29]
wire ren2_uops_0_is_unique; // @[rename-stage.scala:108:29]
wire ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_ldst; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_lrs1; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_lrs2; // @[rename-stage.scala:108:29]
wire [5:0] ren2_uops_0_lrs3; // @[rename-stage.scala:108:29]
wire ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29]
wire ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29]
wire ren2_uops_0_fp_val; // @[rename-stage.scala:108:29]
wire ren2_uops_0_fp_single; // @[rename-stage.scala:108:29]
wire ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29]
wire ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29]
wire [1:0] ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29]
wire io_ren2_mask_0; // @[rename-stage.scala:356:7]
wire [3:0] io_ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:356:7]
wire [2:0] io_ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:356:7]
wire [2:0] io_ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:356:7]
wire [4:0] io_ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:356:7]
wire [2:0] io_ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:356:7]
wire [6:0] io_ren2_uops_0_uopc; // @[rename-stage.scala:356:7]
wire [31:0] io_ren2_uops_0_inst; // @[rename-stage.scala:356:7]
wire [31:0] io_ren2_uops_0_debug_inst; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_rvc; // @[rename-stage.scala:356:7]
wire [39:0] io_ren2_uops_0_debug_pc; // @[rename-stage.scala:356:7]
wire [2:0] io_ren2_uops_0_iq_type; // @[rename-stage.scala:356:7]
wire [9:0] io_ren2_uops_0_fu_code; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_iw_state; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_br; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_jalr; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_jal; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_sfb; // @[rename-stage.scala:356:7]
wire [7:0] io_ren2_uops_0_br_mask; // @[rename-stage.scala:356:7]
wire [2:0] io_ren2_uops_0_br_tag; // @[rename-stage.scala:356:7]
wire [3:0] io_ren2_uops_0_ftq_idx; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_edge_inst; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_pc_lob; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_taken; // @[rename-stage.scala:356:7]
wire [19:0] io_ren2_uops_0_imm_packed; // @[rename-stage.scala:356:7]
wire [11:0] io_ren2_uops_0_csr_addr; // @[rename-stage.scala:356:7]
wire [4:0] io_ren2_uops_0_rob_idx; // @[rename-stage.scala:356:7]
wire [2:0] io_ren2_uops_0_ldq_idx; // @[rename-stage.scala:356:7]
wire [2:0] io_ren2_uops_0_stq_idx; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_rxq_idx; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_pdst; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_prs1; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_prs2; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_prs3; // @[rename-stage.scala:356:7]
wire [3:0] io_ren2_uops_0_ppred; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_prs1_busy; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_prs2_busy; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_prs3_busy; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_ppred_busy; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_stale_pdst; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_exception; // @[rename-stage.scala:356:7]
wire [63:0] io_ren2_uops_0_exc_cause; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_bypassable; // @[rename-stage.scala:356:7]
wire [4:0] io_ren2_uops_0_mem_cmd; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_mem_size; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_mem_signed; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_fence; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_fencei; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_amo; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_uses_ldq; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_uses_stq; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_is_unique; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_flush_on_commit; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_ldst; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_lrs1; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_lrs2; // @[rename-stage.scala:356:7]
wire [5:0] io_ren2_uops_0_lrs3; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_ldst_val; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_dst_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_frs3_en; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_fp_val; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_fp_single; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_bp_debug_if; // @[rename-stage.scala:356:7]
wire io_ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_debug_fsrc; // @[rename-stage.scala:356:7]
wire [1:0] io_ren2_uops_0_debug_tsrc; // @[rename-stage.scala:356:7]
assign io_ren2_mask_0 = ren2_valids_0; // @[rename-stage.scala:107:29, :356:7]
assign io_ren2_uops_0_uopc = ren2_uops_0_uopc; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_inst = ren2_uops_0_inst; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_debug_inst = ren2_uops_0_debug_inst; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_rvc = ren2_uops_0_is_rvc; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_debug_pc = ren2_uops_0_debug_pc; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_iq_type = ren2_uops_0_iq_type; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_fu_code = ren2_uops_0_fu_code; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_br_type = ren2_uops_0_ctrl_br_type; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_op1_sel = ren2_uops_0_ctrl_op1_sel; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_op2_sel = ren2_uops_0_ctrl_op2_sel; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_imm_sel = ren2_uops_0_ctrl_imm_sel; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_op_fcn = ren2_uops_0_ctrl_op_fcn; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_fcn_dw = ren2_uops_0_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_csr_cmd = ren2_uops_0_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_is_load = ren2_uops_0_ctrl_is_load; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_is_sta = ren2_uops_0_ctrl_is_sta; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ctrl_is_std = ren2_uops_0_ctrl_is_std; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_iw_state = ren2_uops_0_iw_state; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_iw_p1_poisoned = ren2_uops_0_iw_p1_poisoned; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_iw_p2_poisoned = ren2_uops_0_iw_p2_poisoned; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_br = ren2_uops_0_is_br; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_jalr = ren2_uops_0_is_jalr; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_jal = ren2_uops_0_is_jal; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_sfb = ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_br_mask = ren2_uops_0_br_mask; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_br_tag = ren2_uops_0_br_tag; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ftq_idx = ren2_uops_0_ftq_idx; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_edge_inst = ren2_uops_0_edge_inst; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_pc_lob = ren2_uops_0_pc_lob; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_taken = ren2_uops_0_taken; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_imm_packed = ren2_uops_0_imm_packed; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_csr_addr = ren2_uops_0_csr_addr; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_rob_idx = ren2_uops_0_rob_idx; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ldq_idx = ren2_uops_0_ldq_idx; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_stq_idx = ren2_uops_0_stq_idx; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_rxq_idx = ren2_uops_0_rxq_idx; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_pdst = ren2_uops_0_pdst; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_prs1 = ren2_uops_0_prs1; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_prs2 = ren2_uops_0_prs2; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_prs3 = ren2_uops_0_prs3; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ppred = ren2_uops_0_ppred; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_prs1_busy = ren2_uops_0_prs1_busy; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_prs2_busy = ren2_uops_0_prs2_busy; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_prs3_busy = ren2_uops_0_prs3_busy; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ppred_busy = ren2_uops_0_ppred_busy; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_stale_pdst = ren2_uops_0_stale_pdst; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_exception = ren2_uops_0_exception; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_exc_cause = ren2_uops_0_exc_cause; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_bypassable = ren2_uops_0_bypassable; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_mem_cmd = ren2_uops_0_mem_cmd; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_mem_size = ren2_uops_0_mem_size; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_mem_signed = ren2_uops_0_mem_signed; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_fence = ren2_uops_0_is_fence; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_fencei = ren2_uops_0_is_fencei; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_amo = ren2_uops_0_is_amo; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_uses_ldq = ren2_uops_0_uses_ldq; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_uses_stq = ren2_uops_0_uses_stq; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_sys_pc2epc = ren2_uops_0_is_sys_pc2epc; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_is_unique = ren2_uops_0_is_unique; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_flush_on_commit = ren2_uops_0_flush_on_commit; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ldst_is_rs1 = ren2_uops_0_ldst_is_rs1; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ldst = ren2_uops_0_ldst; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_lrs1 = ren2_uops_0_lrs1; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_lrs2 = ren2_uops_0_lrs2; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_lrs3 = ren2_uops_0_lrs3; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_ldst_val = ren2_uops_0_ldst_val; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_dst_rtype = ren2_uops_0_dst_rtype; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_lrs1_rtype = ren2_uops_0_lrs1_rtype; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_lrs2_rtype = ren2_uops_0_lrs2_rtype; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_frs3_en = ren2_uops_0_frs3_en; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_fp_val = ren2_uops_0_fp_val; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_fp_single = ren2_uops_0_fp_single; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_xcpt_pf_if = ren2_uops_0_xcpt_pf_if; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_xcpt_ae_if = ren2_uops_0_xcpt_ae_if; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_xcpt_ma_if = ren2_uops_0_xcpt_ma_if; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_bp_debug_if = ren2_uops_0_bp_debug_if; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_bp_xcpt_if = ren2_uops_0_bp_xcpt_if; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_debug_fsrc = ren2_uops_0_debug_fsrc; // @[rename-stage.scala:108:29, :356:7]
assign io_ren2_uops_0_debug_tsrc = ren2_uops_0_debug_tsrc; // @[rename-stage.scala:108:29, :356:7]
reg r_valid; // @[rename-stage.scala:121:27]
assign ren2_valids_0 = r_valid; // @[rename-stage.scala:107:29, :121:27]
reg [6:0] r_uop_uopc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_uopc = r_uop_uopc; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_inst = r_uop_inst; // @[rename-stage.scala:108:29, :122:23]
reg [31:0] r_uop_debug_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_inst = r_uop_debug_inst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_rvc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_rvc = r_uop_is_rvc; // @[rename-stage.scala:108:29, :122:23]
reg [39:0] r_uop_debug_pc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_pc = r_uop_debug_pc; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_iq_type; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iq_type = r_uop_iq_type; // @[rename-stage.scala:108:29, :122:23]
reg [9:0] r_uop_fu_code; // @[rename-stage.scala:122:23]
assign ren2_uops_0_fu_code = r_uop_fu_code; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_ctrl_br_type; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_br_type = r_uop_ctrl_br_type; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_ctrl_op1_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_op1_sel = r_uop_ctrl_op1_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_ctrl_op2_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_op2_sel = r_uop_ctrl_op2_sel; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_ctrl_imm_sel; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_imm_sel = r_uop_ctrl_imm_sel; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_ctrl_op_fcn; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_op_fcn = r_uop_ctrl_op_fcn; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_fcn_dw = r_uop_ctrl_fcn_dw; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_ctrl_csr_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_csr_cmd = r_uop_ctrl_csr_cmd; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_is_load; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_is_load = r_uop_ctrl_is_load; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_is_sta = r_uop_ctrl_is_sta; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ctrl_is_std; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ctrl_is_std = r_uop_ctrl_is_std; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_iw_state; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iw_state = r_uop_iw_state; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iw_p1_poisoned = r_uop_iw_p1_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23]
assign ren2_uops_0_iw_p2_poisoned = r_uop_iw_p2_poisoned; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_br; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_br = r_uop_is_br; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_jalr; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_jalr = r_uop_is_jalr; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_jal; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_jal = r_uop_is_jal; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_sfb; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_sfb = r_uop_is_sfb; // @[rename-stage.scala:108:29, :122:23]
reg [7:0] r_uop_br_mask; // @[rename-stage.scala:122:23]
assign ren2_uops_0_br_mask = r_uop_br_mask; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_br_tag; // @[rename-stage.scala:122:23]
assign ren2_uops_0_br_tag = r_uop_br_tag; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_ftq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ftq_idx = r_uop_ftq_idx; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_edge_inst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_edge_inst = r_uop_edge_inst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_pc_lob; // @[rename-stage.scala:122:23]
assign ren2_uops_0_pc_lob = r_uop_pc_lob; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_taken; // @[rename-stage.scala:122:23]
assign ren2_uops_0_taken = r_uop_taken; // @[rename-stage.scala:108:29, :122:23]
reg [19:0] r_uop_imm_packed; // @[rename-stage.scala:122:23]
assign ren2_uops_0_imm_packed = r_uop_imm_packed; // @[rename-stage.scala:108:29, :122:23]
reg [11:0] r_uop_csr_addr; // @[rename-stage.scala:122:23]
assign ren2_uops_0_csr_addr = r_uop_csr_addr; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_rob_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_rob_idx = r_uop_rob_idx; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_ldq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldq_idx = r_uop_ldq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [2:0] r_uop_stq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_stq_idx = r_uop_stq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_rxq_idx; // @[rename-stage.scala:122:23]
assign ren2_uops_0_rxq_idx = r_uop_rxq_idx; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_pdst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_pdst = r_uop_pdst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_prs1; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs1 = r_uop_prs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_prs2; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs2 = r_uop_prs2; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_prs3; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs3 = r_uop_prs3; // @[rename-stage.scala:108:29, :122:23]
reg [3:0] r_uop_ppred; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ppred = r_uop_ppred; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_prs1_busy; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs1_busy = r_uop_prs1_busy; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_prs2_busy; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs2_busy = r_uop_prs2_busy; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_prs3_busy; // @[rename-stage.scala:122:23]
assign ren2_uops_0_prs3_busy = r_uop_prs3_busy; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ppred_busy; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ppred_busy = r_uop_ppred_busy; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_stale_pdst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_stale_pdst = r_uop_stale_pdst; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_exception; // @[rename-stage.scala:122:23]
assign ren2_uops_0_exception = r_uop_exception; // @[rename-stage.scala:108:29, :122:23]
reg [63:0] r_uop_exc_cause; // @[rename-stage.scala:122:23]
assign ren2_uops_0_exc_cause = r_uop_exc_cause; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_bypassable; // @[rename-stage.scala:122:23]
assign ren2_uops_0_bypassable = r_uop_bypassable; // @[rename-stage.scala:108:29, :122:23]
reg [4:0] r_uop_mem_cmd; // @[rename-stage.scala:122:23]
assign ren2_uops_0_mem_cmd = r_uop_mem_cmd; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_mem_size; // @[rename-stage.scala:122:23]
assign ren2_uops_0_mem_size = r_uop_mem_size; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_mem_signed; // @[rename-stage.scala:122:23]
assign ren2_uops_0_mem_signed = r_uop_mem_signed; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_fence; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_fence = r_uop_is_fence; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_fencei; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_fencei = r_uop_is_fencei; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_amo; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_amo = r_uop_is_amo; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_uses_ldq; // @[rename-stage.scala:122:23]
assign ren2_uops_0_uses_ldq = r_uop_uses_ldq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_uses_stq; // @[rename-stage.scala:122:23]
assign ren2_uops_0_uses_stq = r_uop_uses_stq; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_sys_pc2epc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_sys_pc2epc = r_uop_is_sys_pc2epc; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_is_unique; // @[rename-stage.scala:122:23]
assign ren2_uops_0_is_unique = r_uop_is_unique; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_flush_on_commit; // @[rename-stage.scala:122:23]
assign ren2_uops_0_flush_on_commit = r_uop_flush_on_commit; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldst_is_rs1 = r_uop_ldst_is_rs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_ldst; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldst = r_uop_ldst; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_lrs1; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs1 = r_uop_lrs1; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_lrs2; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs2 = r_uop_lrs2; // @[rename-stage.scala:108:29, :122:23]
reg [5:0] r_uop_lrs3; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs3 = r_uop_lrs3; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_ldst_val; // @[rename-stage.scala:122:23]
assign ren2_uops_0_ldst_val = r_uop_ldst_val; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_dst_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_0_dst_rtype = r_uop_dst_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_lrs1_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs1_rtype = r_uop_lrs1_rtype; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_lrs2_rtype; // @[rename-stage.scala:122:23]
assign ren2_uops_0_lrs2_rtype = r_uop_lrs2_rtype; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_frs3_en; // @[rename-stage.scala:122:23]
assign ren2_uops_0_frs3_en = r_uop_frs3_en; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_fp_val; // @[rename-stage.scala:122:23]
assign ren2_uops_0_fp_val = r_uop_fp_val; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_fp_single; // @[rename-stage.scala:122:23]
assign ren2_uops_0_fp_single = r_uop_fp_single; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_xcpt_pf_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_xcpt_pf_if = r_uop_xcpt_pf_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_xcpt_ae_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_xcpt_ae_if = r_uop_xcpt_ae_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_xcpt_ma_if = r_uop_xcpt_ma_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_bp_debug_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_bp_debug_if = r_uop_bp_debug_if; // @[rename-stage.scala:108:29, :122:23]
reg r_uop_bp_xcpt_if; // @[rename-stage.scala:122:23]
assign ren2_uops_0_bp_xcpt_if = r_uop_bp_xcpt_if; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_debug_fsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_fsrc = r_uop_debug_fsrc; // @[rename-stage.scala:108:29, :122:23]
reg [1:0] r_uop_debug_tsrc; // @[rename-stage.scala:122:23]
assign ren2_uops_0_debug_tsrc = r_uop_debug_tsrc; // @[rename-stage.scala:108:29, :122:23]
wire [6:0] r_uop_newuop_uopc = next_uop_uopc; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_inst = next_uop_inst; // @[util.scala:73:26]
wire [31:0] r_uop_newuop_debug_inst = next_uop_debug_inst; // @[util.scala:73:26]
wire r_uop_newuop_is_rvc = next_uop_is_rvc; // @[util.scala:73:26]
wire [39:0] r_uop_newuop_debug_pc = next_uop_debug_pc; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_iq_type = next_uop_iq_type; // @[util.scala:73:26]
wire [9:0] r_uop_newuop_fu_code = next_uop_fu_code; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_ctrl_br_type = next_uop_ctrl_br_type; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_ctrl_op1_sel = next_uop_ctrl_op1_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_ctrl_op2_sel = next_uop_ctrl_op2_sel; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_ctrl_imm_sel = next_uop_ctrl_imm_sel; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_ctrl_op_fcn = next_uop_ctrl_op_fcn; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_fcn_dw = next_uop_ctrl_fcn_dw; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_ctrl_csr_cmd = next_uop_ctrl_csr_cmd; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_is_load = next_uop_ctrl_is_load; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_is_sta = next_uop_ctrl_is_sta; // @[util.scala:73:26]
wire r_uop_newuop_ctrl_is_std = next_uop_ctrl_is_std; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_iw_state = next_uop_iw_state; // @[util.scala:73:26]
wire r_uop_newuop_iw_p1_poisoned = next_uop_iw_p1_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_iw_p2_poisoned = next_uop_iw_p2_poisoned; // @[util.scala:73:26]
wire r_uop_newuop_is_br = next_uop_is_br; // @[util.scala:73:26]
wire r_uop_newuop_is_jalr = next_uop_is_jalr; // @[util.scala:73:26]
wire r_uop_newuop_is_jal = next_uop_is_jal; // @[util.scala:73:26]
wire r_uop_newuop_is_sfb = next_uop_is_sfb; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_br_tag = next_uop_br_tag; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_ftq_idx = next_uop_ftq_idx; // @[util.scala:73:26]
wire r_uop_newuop_edge_inst = next_uop_edge_inst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_pc_lob = next_uop_pc_lob; // @[util.scala:73:26]
wire r_uop_newuop_taken = next_uop_taken; // @[util.scala:73:26]
wire [19:0] r_uop_newuop_imm_packed = next_uop_imm_packed; // @[util.scala:73:26]
wire [11:0] r_uop_newuop_csr_addr = next_uop_csr_addr; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_rob_idx = next_uop_rob_idx; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_ldq_idx = next_uop_ldq_idx; // @[util.scala:73:26]
wire [2:0] r_uop_newuop_stq_idx = next_uop_stq_idx; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_rxq_idx = next_uop_rxq_idx; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_pdst = next_uop_pdst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_prs1 = next_uop_prs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_prs2 = next_uop_prs2; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_prs3 = next_uop_prs3; // @[util.scala:73:26]
wire [3:0] r_uop_newuop_ppred = next_uop_ppred; // @[util.scala:73:26]
wire r_uop_newuop_prs1_busy = next_uop_prs1_busy; // @[util.scala:73:26]
wire r_uop_newuop_prs2_busy = next_uop_prs2_busy; // @[util.scala:73:26]
wire r_uop_newuop_prs3_busy = next_uop_prs3_busy; // @[util.scala:73:26]
wire r_uop_newuop_ppred_busy = next_uop_ppred_busy; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_stale_pdst = next_uop_stale_pdst; // @[util.scala:73:26]
wire r_uop_newuop_exception = next_uop_exception; // @[util.scala:73:26]
wire [63:0] r_uop_newuop_exc_cause = next_uop_exc_cause; // @[util.scala:73:26]
wire r_uop_newuop_bypassable = next_uop_bypassable; // @[util.scala:73:26]
wire [4:0] r_uop_newuop_mem_cmd = next_uop_mem_cmd; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_mem_size = next_uop_mem_size; // @[util.scala:73:26]
wire r_uop_newuop_mem_signed = next_uop_mem_signed; // @[util.scala:73:26]
wire r_uop_newuop_is_fence = next_uop_is_fence; // @[util.scala:73:26]
wire r_uop_newuop_is_fencei = next_uop_is_fencei; // @[util.scala:73:26]
wire r_uop_newuop_is_amo = next_uop_is_amo; // @[util.scala:73:26]
wire r_uop_newuop_uses_ldq = next_uop_uses_ldq; // @[util.scala:73:26]
wire r_uop_newuop_uses_stq = next_uop_uses_stq; // @[util.scala:73:26]
wire r_uop_newuop_is_sys_pc2epc = next_uop_is_sys_pc2epc; // @[util.scala:73:26]
wire r_uop_newuop_is_unique = next_uop_is_unique; // @[util.scala:73:26]
wire r_uop_newuop_flush_on_commit = next_uop_flush_on_commit; // @[util.scala:73:26]
wire r_uop_newuop_ldst_is_rs1 = next_uop_ldst_is_rs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_ldst = next_uop_ldst; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_lrs1 = next_uop_lrs1; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_lrs2 = next_uop_lrs2; // @[util.scala:73:26]
wire [5:0] r_uop_newuop_lrs3 = next_uop_lrs3; // @[util.scala:73:26]
wire r_uop_newuop_ldst_val = next_uop_ldst_val; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_dst_rtype = next_uop_dst_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_lrs1_rtype = next_uop_lrs1_rtype; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_lrs2_rtype = next_uop_lrs2_rtype; // @[util.scala:73:26]
wire r_uop_newuop_frs3_en = next_uop_frs3_en; // @[util.scala:73:26]
wire r_uop_newuop_fp_val = next_uop_fp_val; // @[util.scala:73:26]
wire r_uop_newuop_fp_single = next_uop_fp_single; // @[util.scala:73:26]
wire r_uop_newuop_xcpt_pf_if = next_uop_xcpt_pf_if; // @[util.scala:73:26]
wire r_uop_newuop_xcpt_ae_if = next_uop_xcpt_ae_if; // @[util.scala:73:26]
wire r_uop_newuop_xcpt_ma_if = next_uop_xcpt_ma_if; // @[util.scala:73:26]
wire r_uop_newuop_bp_debug_if = next_uop_bp_debug_if; // @[util.scala:73:26]
wire r_uop_newuop_bp_xcpt_if = next_uop_bp_xcpt_if; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_debug_fsrc = next_uop_debug_fsrc; // @[util.scala:73:26]
wire [1:0] r_uop_newuop_debug_tsrc = next_uop_debug_tsrc; // @[util.scala:73:26]
wire [7:0] next_uop_br_mask; // @[rename-stage.scala:123:24]
wire _r_valid_T = ~io_dis_fire_0_0; // @[rename-stage.scala:133:29, :356:7]
wire _r_valid_T_1 = r_valid & _r_valid_T; // @[rename-stage.scala:121:27, :133:{26,29}]
wire _GEN = io_kill_0 | ~io_dis_ready_0; // @[rename-stage.scala:125:14, :127:20, :129:30, :356:7]
assign next_uop_uopc = _GEN ? r_uop_uopc : ren1_uops_0_uopc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_inst = _GEN ? r_uop_inst : ren1_uops_0_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_inst = _GEN ? r_uop_debug_inst : ren1_uops_0_debug_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_rvc = _GEN ? r_uop_is_rvc : ren1_uops_0_is_rvc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_pc = _GEN ? r_uop_debug_pc : ren1_uops_0_debug_pc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iq_type = _GEN ? r_uop_iq_type : ren1_uops_0_iq_type; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_fu_code = _GEN ? r_uop_fu_code : ren1_uops_0_fu_code; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_br_type = _GEN ? r_uop_ctrl_br_type : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_op1_sel = _GEN ? r_uop_ctrl_op1_sel : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_op2_sel = _GEN ? r_uop_ctrl_op2_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_imm_sel = _GEN ? r_uop_ctrl_imm_sel : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_op_fcn = _GEN ? r_uop_ctrl_op_fcn : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_fcn_dw = _GEN & r_uop_ctrl_fcn_dw; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_csr_cmd = _GEN ? r_uop_ctrl_csr_cmd : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_is_load = _GEN & r_uop_ctrl_is_load; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_is_sta = _GEN & r_uop_ctrl_is_sta; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ctrl_is_std = _GEN & r_uop_ctrl_is_std; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iw_state = _GEN ? r_uop_iw_state : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iw_p1_poisoned = _GEN & r_uop_iw_p1_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_iw_p2_poisoned = _GEN & r_uop_iw_p2_poisoned; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_br = _GEN ? r_uop_is_br : ren1_uops_0_is_br; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_jalr = _GEN ? r_uop_is_jalr : ren1_uops_0_is_jalr; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_jal = _GEN ? r_uop_is_jal : ren1_uops_0_is_jal; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_sfb = _GEN ? r_uop_is_sfb : ren1_uops_0_is_sfb; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_br_mask = _GEN ? r_uop_br_mask : ren1_uops_0_br_mask; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_br_tag = _GEN ? r_uop_br_tag : ren1_uops_0_br_tag; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ftq_idx = _GEN ? r_uop_ftq_idx : ren1_uops_0_ftq_idx; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_edge_inst = _GEN ? r_uop_edge_inst : ren1_uops_0_edge_inst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_pc_lob = _GEN ? r_uop_pc_lob : ren1_uops_0_pc_lob; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_taken = _GEN ? r_uop_taken : ren1_uops_0_taken; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_imm_packed = _GEN ? r_uop_imm_packed : ren1_uops_0_imm_packed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_csr_addr = _GEN ? r_uop_csr_addr : 12'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_rob_idx = _GEN ? r_uop_rob_idx : 5'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldq_idx = _GEN ? r_uop_ldq_idx : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_stq_idx = _GEN ? r_uop_stq_idx : 3'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_rxq_idx = _GEN ? r_uop_rxq_idx : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_pdst = _GEN ? r_uop_pdst : 6'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs1 = _GEN ? r_uop_prs1 : 6'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs2 = _GEN ? r_uop_prs2 : 6'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs3 = _GEN ? r_uop_prs3 : 6'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ppred = _GEN ? r_uop_ppred : 4'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs1_busy = _GEN & r_uop_prs1_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs2_busy = _GEN & r_uop_prs2_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_prs3_busy = _GEN & r_uop_prs3_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ppred_busy = _GEN & r_uop_ppred_busy; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_stale_pdst = _GEN ? r_uop_stale_pdst : 6'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_exception = _GEN ? r_uop_exception : ren1_uops_0_exception; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_exc_cause = _GEN ? r_uop_exc_cause : ren1_uops_0_exc_cause; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_bypassable = _GEN ? r_uop_bypassable : ren1_uops_0_bypassable; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_mem_cmd = _GEN ? r_uop_mem_cmd : ren1_uops_0_mem_cmd; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_mem_size = _GEN ? r_uop_mem_size : ren1_uops_0_mem_size; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_mem_signed = _GEN ? r_uop_mem_signed : ren1_uops_0_mem_signed; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_fence = _GEN ? r_uop_is_fence : ren1_uops_0_is_fence; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_fencei = _GEN ? r_uop_is_fencei : ren1_uops_0_is_fencei; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_amo = _GEN ? r_uop_is_amo : ren1_uops_0_is_amo; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_uses_ldq = _GEN ? r_uop_uses_ldq : ren1_uops_0_uses_ldq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_uses_stq = _GEN ? r_uop_uses_stq : ren1_uops_0_uses_stq; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_sys_pc2epc = _GEN ? r_uop_is_sys_pc2epc : ren1_uops_0_is_sys_pc2epc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_is_unique = _GEN ? r_uop_is_unique : ren1_uops_0_is_unique; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_flush_on_commit = _GEN ? r_uop_flush_on_commit : ren1_uops_0_flush_on_commit; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldst_is_rs1 = _GEN & r_uop_ldst_is_rs1; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldst = _GEN ? r_uop_ldst : ren1_uops_0_ldst; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs1 = _GEN ? r_uop_lrs1 : ren1_uops_0_lrs1; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs2 = _GEN ? r_uop_lrs2 : ren1_uops_0_lrs2; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs3 = _GEN ? r_uop_lrs3 : ren1_uops_0_lrs3; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_ldst_val = _GEN ? r_uop_ldst_val : ren1_uops_0_ldst_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_dst_rtype = _GEN ? r_uop_dst_rtype : ren1_uops_0_dst_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs1_rtype = _GEN ? r_uop_lrs1_rtype : ren1_uops_0_lrs1_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_lrs2_rtype = _GEN ? r_uop_lrs2_rtype : ren1_uops_0_lrs2_rtype; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_frs3_en = _GEN ? r_uop_frs3_en : ren1_uops_0_frs3_en; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_fp_val = _GEN ? r_uop_fp_val : ren1_uops_0_fp_val; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_fp_single = _GEN ? r_uop_fp_single : ren1_uops_0_fp_single; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_xcpt_pf_if = _GEN ? r_uop_xcpt_pf_if : ren1_uops_0_xcpt_pf_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_xcpt_ae_if = _GEN ? r_uop_xcpt_ae_if : ren1_uops_0_xcpt_ae_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_xcpt_ma_if = _GEN & r_uop_xcpt_ma_if; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_bp_debug_if = _GEN ? r_uop_bp_debug_if : ren1_uops_0_bp_debug_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_bp_xcpt_if = _GEN ? r_uop_bp_xcpt_if : ren1_uops_0_bp_xcpt_if; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_fsrc = _GEN ? r_uop_debug_fsrc : ren1_uops_0_debug_fsrc; // @[rename-stage.scala:101:29, :122:23, :123:24, :125:14, :127:20, :129:30]
assign next_uop_debug_tsrc = _GEN ? r_uop_debug_tsrc : 2'h0; // @[rename-stage.scala:122:23, :123:24, :125:14, :127:20, :129:30]
wire [7:0] _r_uop_newuop_br_mask_T_1; // @[util.scala:74:35]
wire [7:0] r_uop_newuop_br_mask; // @[util.scala:73:26]
wire [7:0] _r_uop_newuop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:74:37]
assign _r_uop_newuop_br_mask_T_1 = next_uop_br_mask & _r_uop_newuop_br_mask_T; // @[util.scala:74:{35,37}]
assign r_uop_newuop_br_mask = _r_uop_newuop_br_mask_T_1; // @[util.scala:73:26, :74:35]
reg busy_table_0; // @[rename-stage.scala:368:27]
wire _io_ren2_uops_0_ppred_busy_T = busy_table_0; // @[rename-stage.scala:368:27, :390:63]
reg busy_table_1; // @[rename-stage.scala:368:27]
reg busy_table_2; // @[rename-stage.scala:368:27]
reg busy_table_3; // @[rename-stage.scala:368:27]
reg busy_table_4; // @[rename-stage.scala:368:27]
reg busy_table_5; // @[rename-stage.scala:368:27]
reg busy_table_6; // @[rename-stage.scala:368:27]
reg busy_table_7; // @[rename-stage.scala:368:27]
reg busy_table_8; // @[rename-stage.scala:368:27]
reg busy_table_9; // @[rename-stage.scala:368:27]
reg busy_table_10; // @[rename-stage.scala:368:27]
reg busy_table_11; // @[rename-stage.scala:368:27]
reg busy_table_12; // @[rename-stage.scala:368:27]
reg busy_table_13; // @[rename-stage.scala:368:27]
reg busy_table_14; // @[rename-stage.scala:368:27]
reg busy_table_15; // @[rename-stage.scala:368:27]
wire _is_sfb_br_T = ren2_uops_0_is_br & ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29]
wire _is_sfb_shadow_T = ~ren2_uops_0_is_br; // @[rename-stage.scala:108:29]
wire _is_sfb_shadow_T_1 = _is_sfb_shadow_T & ren2_uops_0_is_sfb; // @[rename-stage.scala:108:29]
wire _io_ren2_uops_0_ppred_busy_T_2 = _io_ren2_uops_0_ppred_busy_T; // @[rename-stage.scala:390:{63,89}]
wire [1:0] lo_lo_lo = {busy_table_1, busy_table_0}; // @[rename-stage.scala:368:27, :402:30]
wire [1:0] lo_lo_hi = {busy_table_3, busy_table_2}; // @[rename-stage.scala:368:27, :402:30]
wire [3:0] lo_lo = {lo_lo_hi, lo_lo_lo}; // @[rename-stage.scala:402:30]
wire [1:0] lo_hi_lo = {busy_table_5, busy_table_4}; // @[rename-stage.scala:368:27, :402:30]
wire [1:0] lo_hi_hi = {busy_table_7, busy_table_6}; // @[rename-stage.scala:368:27, :402:30]
wire [3:0] lo_hi = {lo_hi_hi, lo_hi_lo}; // @[rename-stage.scala:402:30]
wire [7:0] lo = {lo_hi, lo_lo}; // @[rename-stage.scala:402:30]
wire [1:0] hi_lo_lo = {busy_table_9, busy_table_8}; // @[rename-stage.scala:368:27, :402:30]
wire [1:0] hi_lo_hi = {busy_table_11, busy_table_10}; // @[rename-stage.scala:368:27, :402:30]
wire [3:0] hi_lo = {hi_lo_hi, hi_lo_lo}; // @[rename-stage.scala:402:30]
wire [1:0] hi_hi_lo = {busy_table_13, busy_table_12}; // @[rename-stage.scala:368:27, :402:30]
wire [1:0] hi_hi_hi = {busy_table_15, busy_table_14}; // @[rename-stage.scala:368:27, :402:30]
wire [3:0] hi_hi = {hi_hi_hi, hi_hi_lo}; // @[rename-stage.scala:402:30]
wire [7:0] hi = {hi_hi, hi_lo}; // @[rename-stage.scala:402:30]
always @(posedge clock) begin // @[rename-stage.scala:356:7]
if (reset) begin // @[rename-stage.scala:356:7]
r_valid <= 1'h0; // @[rename-stage.scala:121:27]
busy_table_0 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_1 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_2 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_3 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_4 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_5 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_6 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_7 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_8 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_9 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_10 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_11 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_12 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_13 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_14 <= 1'h0; // @[rename-stage.scala:368:27]
busy_table_15 <= 1'h0; // @[rename-stage.scala:368:27]
end
else begin // @[rename-stage.scala:356:7]
r_valid <= ~io_kill_0 & (io_dis_ready_0 ? ren1_fire_0 : _r_valid_T_1); // @[rename-stage.scala:100:29, :121:27, :127:20, :128:15, :129:30, :130:15, :133:{15,26}, :356:7]
busy_table_0 <= lo[0]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_1 <= lo[1]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_2 <= lo[2]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_3 <= lo[3]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_4 <= lo[4]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_5 <= lo[5]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_6 <= lo[6]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_7 <= lo[7]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_8 <= hi[0]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_9 <= hi[1]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_10 <= hi[2]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_11 <= hi[3]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_12 <= hi[4]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_13 <= hi[5]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_14 <= hi[6]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
busy_table_15 <= hi[7]; // @[rename-stage.scala:368:27, :402:{30,55,73}]
end
r_uop_uopc <= r_uop_newuop_uopc; // @[util.scala:73:26]
r_uop_inst <= r_uop_newuop_inst; // @[util.scala:73:26]
r_uop_debug_inst <= r_uop_newuop_debug_inst; // @[util.scala:73:26]
r_uop_is_rvc <= r_uop_newuop_is_rvc; // @[util.scala:73:26]
r_uop_debug_pc <= r_uop_newuop_debug_pc; // @[util.scala:73:26]
r_uop_iq_type <= r_uop_newuop_iq_type; // @[util.scala:73:26]
r_uop_fu_code <= r_uop_newuop_fu_code; // @[util.scala:73:26]
r_uop_ctrl_br_type <= r_uop_newuop_ctrl_br_type; // @[util.scala:73:26]
r_uop_ctrl_op1_sel <= r_uop_newuop_ctrl_op1_sel; // @[util.scala:73:26]
r_uop_ctrl_op2_sel <= r_uop_newuop_ctrl_op2_sel; // @[util.scala:73:26]
r_uop_ctrl_imm_sel <= r_uop_newuop_ctrl_imm_sel; // @[util.scala:73:26]
r_uop_ctrl_op_fcn <= r_uop_newuop_ctrl_op_fcn; // @[util.scala:73:26]
r_uop_ctrl_fcn_dw <= r_uop_newuop_ctrl_fcn_dw; // @[util.scala:73:26]
r_uop_ctrl_csr_cmd <= r_uop_newuop_ctrl_csr_cmd; // @[util.scala:73:26]
r_uop_ctrl_is_load <= r_uop_newuop_ctrl_is_load; // @[util.scala:73:26]
r_uop_ctrl_is_sta <= r_uop_newuop_ctrl_is_sta; // @[util.scala:73:26]
r_uop_ctrl_is_std <= r_uop_newuop_ctrl_is_std; // @[util.scala:73:26]
r_uop_iw_state <= r_uop_newuop_iw_state; // @[util.scala:73:26]
r_uop_iw_p1_poisoned <= r_uop_newuop_iw_p1_poisoned; // @[util.scala:73:26]
r_uop_iw_p2_poisoned <= r_uop_newuop_iw_p2_poisoned; // @[util.scala:73:26]
r_uop_is_br <= r_uop_newuop_is_br; // @[util.scala:73:26]
r_uop_is_jalr <= r_uop_newuop_is_jalr; // @[util.scala:73:26]
r_uop_is_jal <= r_uop_newuop_is_jal; // @[util.scala:73:26]
r_uop_is_sfb <= r_uop_newuop_is_sfb; // @[util.scala:73:26]
r_uop_br_mask <= r_uop_newuop_br_mask; // @[util.scala:73:26]
r_uop_br_tag <= r_uop_newuop_br_tag; // @[util.scala:73:26]
r_uop_ftq_idx <= r_uop_newuop_ftq_idx; // @[util.scala:73:26]
r_uop_edge_inst <= r_uop_newuop_edge_inst; // @[util.scala:73:26]
r_uop_pc_lob <= r_uop_newuop_pc_lob; // @[util.scala:73:26]
r_uop_taken <= r_uop_newuop_taken; // @[util.scala:73:26]
r_uop_imm_packed <= r_uop_newuop_imm_packed; // @[util.scala:73:26]
r_uop_csr_addr <= r_uop_newuop_csr_addr; // @[util.scala:73:26]
r_uop_rob_idx <= r_uop_newuop_rob_idx; // @[util.scala:73:26]
r_uop_ldq_idx <= r_uop_newuop_ldq_idx; // @[util.scala:73:26]
r_uop_stq_idx <= r_uop_newuop_stq_idx; // @[util.scala:73:26]
r_uop_rxq_idx <= r_uop_newuop_rxq_idx; // @[util.scala:73:26]
r_uop_pdst <= r_uop_newuop_pdst; // @[util.scala:73:26]
r_uop_prs1 <= r_uop_newuop_prs1; // @[util.scala:73:26]
r_uop_prs2 <= r_uop_newuop_prs2; // @[util.scala:73:26]
r_uop_prs3 <= r_uop_newuop_prs3; // @[util.scala:73:26]
r_uop_ppred <= r_uop_newuop_ppred; // @[util.scala:73:26]
r_uop_prs1_busy <= r_uop_newuop_prs1_busy; // @[util.scala:73:26]
r_uop_prs2_busy <= r_uop_newuop_prs2_busy; // @[util.scala:73:26]
r_uop_prs3_busy <= r_uop_newuop_prs3_busy; // @[util.scala:73:26]
r_uop_ppred_busy <= r_uop_newuop_ppred_busy; // @[util.scala:73:26]
r_uop_stale_pdst <= r_uop_newuop_stale_pdst; // @[util.scala:73:26]
r_uop_exception <= r_uop_newuop_exception; // @[util.scala:73:26]
r_uop_exc_cause <= r_uop_newuop_exc_cause; // @[util.scala:73:26]
r_uop_bypassable <= r_uop_newuop_bypassable; // @[util.scala:73:26]
r_uop_mem_cmd <= r_uop_newuop_mem_cmd; // @[util.scala:73:26]
r_uop_mem_size <= r_uop_newuop_mem_size; // @[util.scala:73:26]
r_uop_mem_signed <= r_uop_newuop_mem_signed; // @[util.scala:73:26]
r_uop_is_fence <= r_uop_newuop_is_fence; // @[util.scala:73:26]
r_uop_is_fencei <= r_uop_newuop_is_fencei; // @[util.scala:73:26]
r_uop_is_amo <= r_uop_newuop_is_amo; // @[util.scala:73:26]
r_uop_uses_ldq <= r_uop_newuop_uses_ldq; // @[util.scala:73:26]
r_uop_uses_stq <= r_uop_newuop_uses_stq; // @[util.scala:73:26]
r_uop_is_sys_pc2epc <= r_uop_newuop_is_sys_pc2epc; // @[util.scala:73:26]
r_uop_is_unique <= r_uop_newuop_is_unique; // @[util.scala:73:26]
r_uop_flush_on_commit <= r_uop_newuop_flush_on_commit; // @[util.scala:73:26]
r_uop_ldst_is_rs1 <= r_uop_newuop_ldst_is_rs1; // @[util.scala:73:26]
r_uop_ldst <= r_uop_newuop_ldst; // @[util.scala:73:26]
r_uop_lrs1 <= r_uop_newuop_lrs1; // @[util.scala:73:26]
r_uop_lrs2 <= r_uop_newuop_lrs2; // @[util.scala:73:26]
r_uop_lrs3 <= r_uop_newuop_lrs3; // @[util.scala:73:26]
r_uop_ldst_val <= r_uop_newuop_ldst_val; // @[util.scala:73:26]
r_uop_dst_rtype <= r_uop_newuop_dst_rtype; // @[util.scala:73:26]
r_uop_lrs1_rtype <= r_uop_newuop_lrs1_rtype; // @[util.scala:73:26]
r_uop_lrs2_rtype <= r_uop_newuop_lrs2_rtype; // @[util.scala:73:26]
r_uop_frs3_en <= r_uop_newuop_frs3_en; // @[util.scala:73:26]
r_uop_fp_val <= r_uop_newuop_fp_val; // @[util.scala:73:26]
r_uop_fp_single <= r_uop_newuop_fp_single; // @[util.scala:73:26]
r_uop_xcpt_pf_if <= r_uop_newuop_xcpt_pf_if; // @[util.scala:73:26]
r_uop_xcpt_ae_if <= r_uop_newuop_xcpt_ae_if; // @[util.scala:73:26]
r_uop_xcpt_ma_if <= r_uop_newuop_xcpt_ma_if; // @[util.scala:73:26]
r_uop_bp_debug_if <= r_uop_newuop_bp_debug_if; // @[util.scala:73:26]
r_uop_bp_xcpt_if <= r_uop_newuop_bp_xcpt_if; // @[util.scala:73:26]
r_uop_debug_fsrc <= r_uop_newuop_debug_fsrc; // @[util.scala:73:26]
r_uop_debug_tsrc <= r_uop_newuop_debug_tsrc; // @[util.scala:73:26]
always @(posedge)
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_48( // @[AsyncQueue.scala:58:7]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in = 1'h1; // @[ShiftReg.scala:45:23]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_61 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File BankBinder.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes}
case class BankBinderNode(mask: BigInt)(implicit valName: ValName) extends TLCustomNode
{
private val bit = mask & -mask
val maxXfer = TransferSizes(1, if (bit == 0 || bit > 4096) 4096 else bit.toInt)
val ids = AddressSet.enumerateMask(mask)
def resolveStar(iKnown: Int, oKnown: Int, iStars: Int, oStars: Int): (Int, Int) = {
val ports = ids.size
val oStar = if (oStars == 0) 0 else (ports - oKnown) / oStars
val iStar = if (iStars == 0) 0 else (ports - iKnown) / iStars
require (ports == iKnown + iStar*iStars, s"${name} must have ${ports} inputs, but has ${iKnown} + ${iStar}*${iStars} (at ${lazyModule.line})")
require (ports == oKnown + oStar*oStars, s"${name} must have ${ports} outputs, but has ${oKnown} + ${oStar}*${oStars} (at ${lazyModule.line})")
(iStar, oStar)
}
def mapParamsD(n: Int, p: Seq[TLMasterPortParameters]): Seq[TLMasterPortParameters] =
(p zip ids) map { case (cp, id) => cp.v1copy(clients = cp.clients.map { c => c.v1copy(
visibility = c.visibility.flatMap { a => a.intersect(AddressSet(id, ~mask))},
supportsProbe = c.supports.probe intersect maxXfer,
supportsArithmetic = c.supports.arithmetic intersect maxXfer,
supportsLogical = c.supports.logical intersect maxXfer,
supportsGet = c.supports.get intersect maxXfer,
supportsPutFull = c.supports.putFull intersect maxXfer,
supportsPutPartial = c.supports.putPartial intersect maxXfer,
supportsHint = c.supports.hint intersect maxXfer)})}
def mapParamsU(n: Int, p: Seq[TLSlavePortParameters]): Seq[TLSlavePortParameters] =
(p zip ids) map { case (mp, id) => mp.v1copy(managers = mp.managers.flatMap { m =>
val addresses = m.address.flatMap(a => a.intersect(AddressSet(id, ~mask)))
if (addresses.nonEmpty)
Some(m.v1copy(
address = addresses,
supportsAcquireT = m.supportsAcquireT intersect maxXfer,
supportsAcquireB = m.supportsAcquireB intersect maxXfer,
supportsArithmetic = m.supportsArithmetic intersect maxXfer,
supportsLogical = m.supportsLogical intersect maxXfer,
supportsGet = m.supportsGet intersect maxXfer,
supportsPutFull = m.supportsPutFull intersect maxXfer,
supportsPutPartial = m.supportsPutPartial intersect maxXfer,
supportsHint = m.supportsHint intersect maxXfer))
else None
})}
}
/* A BankBinder is used to divide contiguous memory regions into banks, suitable for a cache */
class BankBinder(mask: BigInt)(implicit p: Parameters) extends LazyModule
{
val node = BankBinderNode(mask)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
}
}
}
object BankBinder
{
def apply(mask: BigInt)(implicit p: Parameters): TLNode = {
val binder = LazyModule(new BankBinder(mask))
binder.node
}
def apply(nBanks: Int, granularity: Int)(implicit p: Parameters): TLNode = {
if (nBanks > 0) apply(granularity * (nBanks-1))
else TLTempNode()
}
}
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Filter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, RegionType, TransferSizes}
class TLFilter(
mfilter: TLFilter.ManagerFilter = TLFilter.mIdentity,
cfilter: TLFilter.ClientFilter = TLFilter.cIdentity
)(implicit p: Parameters) extends LazyModule
{
val node = new TLAdapterNode(
clientFn = { cp => cp.v1copy(clients = cp.clients.flatMap { c =>
val out = cfilter(c)
out.map { o => // Confirm the filter only REMOVES capability
require (c.sourceId.contains(o.sourceId))
require (c.supports.probe.contains(o.supports.probe))
require (c.supports.arithmetic.contains(o.supports.arithmetic))
require (c.supports.logical.contains(o.supports.logical))
require (c.supports.get.contains(o.supports.get))
require (c.supports.putFull.contains(o.supports.putFull))
require (c.supports.putPartial.contains(o.supports.putPartial))
require (c.supports.hint.contains(o.supports.hint))
require (!c.requestFifo || o.requestFifo)
}
out
})},
managerFn = { mp =>
val managers = mp.managers.flatMap { m =>
val out = mfilter(m)
out.map { o => // Confirm the filter only REMOVES capability
o.address.foreach { a => require (m.address.map(_.contains(a)).reduce(_||_)) }
require (o.regionType <= m.regionType)
// we allow executable to be changed both ways
require (m.supportsAcquireT.contains(o.supportsAcquireT))
require (m.supportsAcquireB.contains(o.supportsAcquireB))
require (m.supportsArithmetic.contains(o.supportsArithmetic))
require (m.supportsLogical.contains(o.supportsLogical))
require (m.supportsGet.contains(o.supportsGet))
require (m.supportsPutFull.contains(o.supportsPutFull))
require (m.supportsPutPartial.contains(o.supportsPutPartial))
require (m.supportsHint.contains(o.supportsHint))
require (!o.fifoId.isDefined || m.fifoId == o.fifoId)
}
out
}
mp.v1copy(managers = managers,
endSinkId = if (managers.exists(_.supportsAcquireB)) mp.endSinkId else 0)
}
) {
override def circuitIdentity = true
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out <> in
// In case the inner interface removes Acquire, tie-off the channels
if (!edgeIn.manager.anySupportAcquireB) {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFilter
{
type ManagerFilter = TLSlaveParameters => Option[TLSlaveParameters]
type ClientFilter = TLMasterParameters => Option[TLMasterParameters]
// preserve manager visibility
def mIdentity: ManagerFilter = { m => Some(m) }
// preserve client visibility
def cIdentity: ClientFilter = { c => Some(c) }
// make only the intersected address sets visible
def mSelectIntersect(select: AddressSet): ManagerFilter = { m =>
val filtered = m.address.map(_.intersect(select)).flatten
val alignment = select.alignment /* alignment 0 means 'select' selected everything */
transferSizeHelper(m, filtered, alignment)
}
// make everything except the intersected address sets visible
def mSubtract(excepts: Seq[AddressSet]): ManagerFilter = { m =>
val filtered = excepts.foldLeft(m.address) { (a,e) => a.flatMap(_.subtract(e)) }
val alignment: BigInt = if (filtered.isEmpty) 0 else filtered.map(_.alignment).min
transferSizeHelper(m, filtered, alignment)
}
def mSubtract(except: AddressSet): ManagerFilter = { m =>
mSubtract(Seq(except))(m)
}
// adjust supported transfer sizes based on filtered intersection
private def transferSizeHelper(m: TLSlaveParameters, filtered: Seq[AddressSet], alignment: BigInt): Option[TLSlaveParameters] = {
val maxTransfer = 1 << 30
val capTransfer = if (alignment == 0 || alignment > maxTransfer) maxTransfer else alignment.toInt
val cap = TransferSizes(1, capTransfer)
if (filtered.isEmpty) { None } else {
Some(m.v1copy(
address = filtered,
supportsAcquireT = m.supportsAcquireT .intersect(cap),
supportsAcquireB = m.supportsAcquireB .intersect(cap),
supportsArithmetic = m.supportsArithmetic.intersect(cap),
supportsLogical = m.supportsLogical .intersect(cap),
supportsGet = m.supportsGet .intersect(cap),
supportsPutFull = m.supportsPutFull .intersect(cap),
supportsPutPartial = m.supportsPutPartial.intersect(cap),
supportsHint = m.supportsHint .intersect(cap)))
}
}
// hide any fully contained address sets
def mHideContained(containedBy: AddressSet): ManagerFilter = { m =>
val filtered = m.address.filterNot(containedBy.contains(_))
if (filtered.isEmpty) None else Some(m.v1copy(address = filtered))
}
// hide all cacheable managers
def mHideCacheable: ManagerFilter = { m =>
if (m.supportsAcquireB) None else Some(m)
}
// make visible only cacheable managers
def mSelectCacheable: ManagerFilter = { m =>
if (m.supportsAcquireB) Some(m) else None
}
// cacheable managers cannot be acquired from
def mMaskCacheable: ManagerFilter = { m =>
if (m.supportsAcquireB) {
Some(m.v1copy(
regionType = RegionType.UNCACHED,
supportsAcquireB = TransferSizes.none,
supportsAcquireT = TransferSizes.none,
alwaysGrantsT = false))
} else { Some(m) }
}
// only cacheable managers are visible, but cannot be acquired from
def mSelectAndMaskCacheable: ManagerFilter = { m =>
if (m.supportsAcquireB) {
Some(m.v1copy(
regionType = RegionType.UNCACHED,
supportsAcquireB = TransferSizes.none,
supportsAcquireT = TransferSizes.none,
alwaysGrantsT = false))
} else { None }
}
// hide all caching clients
def cHideCaching: ClientFilter = { c =>
if (c.supports.probe) None else Some(c)
}
// onyl caching clients are visible
def cSelectCaching: ClientFilter = { c =>
if (c.supports.probe) Some(c) else None
}
// removes resources from managers
def mResourceRemover: ManagerFilter = { m =>
Some(m.v2copy(resources=Nil))
}
// default application applies neither type of filter unless overridden
def apply(
mfilter: ManagerFilter = TLFilter.mIdentity,
cfilter: ClientFilter = TLFilter.cIdentity
)(implicit p: Parameters): TLNode =
{
val filter = LazyModule(new TLFilter(mfilter, cfilter))
filter.node
}
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File ClockDomain.scala:
package freechips.rocketchip.prci
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing
{
def clockBundle: ClockBundle
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
childClock := clockBundle.clock
childReset := clockBundle.reset
override def provideImplicitClockToLazyChildren = true
// these are just for backwards compatibility with external devices
// that were manually wiring themselves to the domain's clock/reset input:
val clock = IO(Output(chiselTypeOf(clockBundle.clock)))
val reset = IO(Output(chiselTypeOf(clockBundle.reset)))
clock := clockBundle.clock
reset := clockBundle.reset
}
}
abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing
class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain
{
def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name))
val clockNode = ClockSinkNode(Seq(clockSinkParams))
def clockBundle = clockNode.in.head._1
override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString
}
class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain
{
def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name))
val clockNode = ClockSourceNode(Seq(clockSourceParams))
def clockBundle = clockNode.out.head._1
override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString
}
abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing
File Jbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
class TLJbar(policy: TLArbiter.Policy = TLArbiter.roundRobin)(implicit p: Parameters) extends LazyModule
{
val node: TLJunctionNode = new TLJunctionNode(
clientFn = { seq =>
Seq.fill(node.dRatio)(seq(0).v1copy(
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
))
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
Seq.fill(node.uRatio)(seq(0).v1copy(
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
))
}) {
override def circuitIdentity = uRatio == 1 && dRatio == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
node.inoutGrouped.foreach { case (in, out) => TLXbar.circuit(policy, in, out) }
}
}
object TLJbar
{
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin)(implicit p: Parameters) = {
val jbar = LazyModule(new TLJbar(policy))
jbar.node
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLJbarTestImp(nClients: Int, nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val jbar = LazyModule(new TLJbar)
val fuzzers = Seq.fill(nClients) {
val fuzzer = LazyModule(new TLFuzzer(txns))
jbar.node :*= TLXbar() := TLDelayer(0.1) := fuzzer.node
fuzzer
}
for (n <- 0 until nManagers) {
TLRAM(AddressSet(0x0+0x400*n, 0x3ff)) := TLFragmenter(4, 256) := TLDelayer(0.1) := jbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.map(_.module.io.finished).reduce(_ && _)
}
}
class TLJbarTest(nClients: Int, nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLJbarTestImp(nClients, nManagers, txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File ClockGroup.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.prci
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.resources.FixedClockResource
case class ClockGroupingNode(groupName: String)(implicit valName: ValName)
extends MixedNexusNode(ClockGroupImp, ClockImp)(
dFn = { _ => ClockSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) })
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupingNode(groupName)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
require (node.in.size == 1)
require (in.member.size == out.size)
(in.member.data zip out) foreach { case (i, o) => o := i }
}
}
object ClockGroup
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node
}
case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))})
{
override def circuitIdentity = outputs.size == 1
}
class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupAggregateNode(groupName)
override lazy val desiredName = s"ClockGroupAggregator_$groupName"
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in.unzip
val (out, _) = node.out.unzip
val outputs = out.flatMap(_.member.data)
require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1")
require (in.head.member.size == outputs.size)
in.head.member.data.zip(outputs).foreach { case (i, o) => o := i }
}
}
object ClockGroupAggregator
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node
}
class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule
{
val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val (out, _) = node.out.unzip
out.map { out: ClockGroupBundle =>
out.member.data.foreach { o =>
o.clock := clock; o.reset := reset }
}
}
}
object SimpleClockGroupSource
{
def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node
}
case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName)
extends NexusNode(ClockImp)(
dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) },
uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) },
inputRequiresOutput = false) {
def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix)))
}
class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule
{
val node = new FixedClockBroadcastNode(fixedClockOpt) {
override def circuitIdentity = outputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
val (in, _) = node.in(0)
val (out, _) = node.out.unzip
override def desiredName = s"FixedClockBroadcast_${out.size}"
require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock")
out.foreach { _ := in }
}
}
object FixedClockBroadcast
{
def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node
}
case class PRCIClockGroupNode()(implicit valName: ValName)
extends NexusNode(ClockGroupImp)(
dFn = { _ => ClockGroupSourceParameters() },
uFn = { _ => ClockGroupSinkParameters("prci", Nil) },
outputRequiresInput = false)
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Configs.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package freechips.rocketchip.subsystem
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tile._
import freechips.rocketchip.rocket._
import freechips.rocketchip.tilelink._
import sifive.blocks.inclusivecache._
import freechips.rocketchip.devices.tilelink._
import freechips.rocketchip.util._
import sifive.blocks.inclusivecache.InclusiveCacheParameters
case class InclusiveCacheParams(
ways: Int,
sets: Int,
writeBytes: Int, // backing store update granularity
portFactor: Int, // numSubBanks = (widest TL port * portFactor) / writeBytes
memCycles: Int, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
physicalFilter: Option[PhysicalFilterParams] = None,
hintsSkipProbe: Boolean = false, // do hints probe the same client
bankedControl: Boolean = false, // bank the cache ctrl with the cache banks
ctrlAddr: Option[Int] = Some(InclusiveCacheParameters.L2ControlAddress),
// Interior/Exterior refer to placement either inside the Scheduler or outside it
// Inner/Outer refer to buffers on the front (towards cores) or back (towards DDR) of the L2
bufInnerInterior: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC,
bufInnerExterior: InclusiveCachePortParameters = InclusiveCachePortParameters.flowAD,
bufOuterInterior: InclusiveCachePortParameters = InclusiveCachePortParameters.full,
bufOuterExterior: InclusiveCachePortParameters = InclusiveCachePortParameters.none)
case object InclusiveCacheKey extends Field[InclusiveCacheParams]
class WithInclusiveCache(
nWays: Int = 8,
capacityKB: Int = 512,
outerLatencyCycles: Int = 40,
subBankingFactor: Int = 4,
hintsSkipProbe: Boolean = false,
bankedControl: Boolean = false,
ctrlAddr: Option[Int] = Some(InclusiveCacheParameters.L2ControlAddress),
writeBytes: Int = 8
) extends Config((site, here, up) => {
case InclusiveCacheKey => InclusiveCacheParams(
sets = (capacityKB * 1024)/(site(CacheBlockBytes) * nWays * up(SubsystemBankedCoherenceKey, site).nBanks),
ways = nWays,
memCycles = outerLatencyCycles,
writeBytes = writeBytes,
portFactor = subBankingFactor,
hintsSkipProbe = hintsSkipProbe,
bankedControl = bankedControl,
ctrlAddr = ctrlAddr)
case SubsystemBankedCoherenceKey => up(SubsystemBankedCoherenceKey, site).copy(coherenceManager = { context =>
implicit val p = context.p
val sbus = context.tlBusWrapperLocationMap(SBUS)
val cbus = context.tlBusWrapperLocationMap.lift(CBUS).getOrElse(sbus)
val InclusiveCacheParams(
ways,
sets,
writeBytes,
portFactor,
memCycles,
physicalFilter,
hintsSkipProbe,
bankedControl,
ctrlAddr,
bufInnerInterior,
bufInnerExterior,
bufOuterInterior,
bufOuterExterior) = p(InclusiveCacheKey)
val l2Ctrl = ctrlAddr.map { addr =>
InclusiveCacheControlParameters(
address = addr,
beatBytes = cbus.beatBytes,
bankedControl = bankedControl)
}
val l2 = LazyModule(new InclusiveCache(
CacheParameters(
level = 2,
ways = ways,
sets = sets,
blockBytes = sbus.blockBytes,
beatBytes = sbus.beatBytes,
hintsSkipProbe = hintsSkipProbe),
InclusiveCacheMicroParameters(
writeBytes = writeBytes,
portFactor = portFactor,
memCycles = memCycles,
innerBuf = bufInnerInterior,
outerBuf = bufOuterInterior),
l2Ctrl))
def skipMMIO(x: TLClientParameters) = {
val dcacheMMIO =
x.requestFifo &&
x.sourceId.start % 2 == 1 && // 1 => dcache issues acquires from another master
x.nodePath.last.name == "dcache.node"
if (dcacheMMIO) None else Some(x)
}
val filter = LazyModule(new TLFilter(cfilter = skipMMIO))
val l2_inner_buffer = bufInnerExterior()
val l2_outer_buffer = bufOuterExterior()
val cork = LazyModule(new TLCacheCork)
val lastLevelNode = cork.node
l2_inner_buffer.suggestName("InclusiveCache_inner_TLBuffer")
l2_outer_buffer.suggestName("InclusiveCache_outer_TLBuffer")
l2_inner_buffer.node :*= filter.node
l2.node :*= l2_inner_buffer.node
l2_outer_buffer.node :*= l2.node
/* PhysicalFilters need to be on the TL-C side of a CacheCork to prevent Acquire.NtoB -> Grant.toT */
physicalFilter match {
case None => lastLevelNode :*= l2_outer_buffer.node
case Some(fp) => {
val physicalFilter = LazyModule(new PhysicalFilter(fp.copy(controlBeatBytes = cbus.beatBytes)))
lastLevelNode :*= physicalFilter.node :*= l2_outer_buffer.node
physicalFilter.controlNode := cbus.coupleTo("physical_filter") {
TLBuffer(1) := TLFragmenter(cbus, Some("LLCPhysicalFilter")) := _
}
}
}
l2.ctrls.foreach {
_.ctrlnode := cbus.coupleTo("l2_ctrl") { TLBuffer(1) := TLFragmenter(cbus, Some("LLCCtrl")) := _ }
}
ElaborationArtefacts.add("l2.json", l2.module.json)
(filter.node, lastLevelNode, None)
})
})
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
File Xbar.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue}
import freechips.rocketchip.util.BundleField
// Trades off slave port proximity against routing resource cost
object ForceFanout
{
def apply[T](
a: TriStateValue = TriStateValue.unset,
b: TriStateValue = TriStateValue.unset,
c: TriStateValue = TriStateValue.unset,
d: TriStateValue = TriStateValue.unset,
e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) =
{
body(p.alterPartial {
case ForceFanoutKey => p(ForceFanoutKey) match {
case ForceFanoutParams(pa, pb, pc, pd, pe) =>
ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe))
}
})
}
}
private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean)
private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false))
class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
val node = new TLNexusNode(
clientFn = { seq =>
seq(0).v1copy(
echoFields = BundleField.union(seq.flatMap(_.echoFields)),
requestFields = BundleField.union(seq.flatMap(_.requestFields)),
responseKeys = seq.flatMap(_.responseKeys).distinct,
minLatency = seq.map(_.minLatency).min,
clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) =>
port.clients map { client => client.v1copy(
sourceId = client.sourceId.shift(range.start)
)}
}
)
},
managerFn = { seq =>
val fifoIdFactory = TLXbar.relabeler()
seq(0).v1copy(
responseFields = BundleField.union(seq.flatMap(_.responseFields)),
requestKeys = seq.flatMap(_.requestKeys).distinct,
minLatency = seq.map(_.minLatency).min,
endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max,
managers = seq.flatMap { port =>
require (port.beatBytes == seq(0).beatBytes,
s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B")
val fifoIdMapper = fifoIdFactory()
port.managers map { manager => manager.v1copy(
fifoId = manager.fifoId.map(fifoIdMapper(_))
)}
}
)
}
){
override def circuitIdentity = outputs.size == 1 && inputs.size == 1
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
if ((node.in.size * node.out.size) > (8*32)) {
println (s"!!! WARNING !!!")
println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.")
println (s"!!! WARNING !!!")
}
val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle))
override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_")
TLXbar.circuit(policy, node.in, node.out)
}
}
object TLXbar
{
def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId))
def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId))
def assignRanges(sizes: Seq[Int]) = {
val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) }
val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size
val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions
val ranges = (tuples zip starts) map { case ((sz, i), st) =>
(if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i)
}
ranges.sortBy(_._2).map(_._1) // Restore orignal order
}
def relabeler() = {
var idFactory = 0
() => {
val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int]
(x: Int) => {
if (fifoMap.contains(x)) fifoMap(x) else {
val out = idFactory
idFactory = idFactory + 1
fifoMap += (x -> out)
out
}
}
}
}
def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) {
val (io_in, edgesIn) = seqIn.unzip
val (io_out, edgesOut) = seqOut.unzip
// Not every master need connect to every slave on every channel; determine which connections are necessary
val reachableIO = edgesIn.map { cp => edgesOut.map { mp =>
cp.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)}}}}
}.toVector}.toVector
val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED)
}.toVector}.toVector
val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) =>
(edgesOut zip reachableO).map { case (mp, reachable) =>
reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector}.toVector
val connectAIO = reachableIO
val connectBIO = probeIO
val connectCIO = releaseIO
val connectDIO = reachableIO
val connectEIO = releaseIO
def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } }
val connectAOI = transpose(connectAIO)
val connectBOI = transpose(connectBIO)
val connectCOI = transpose(connectCIO)
val connectDOI = transpose(connectDIO)
val connectEOI = transpose(connectEIO)
// Grab the port ID mapping
val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
// We need an intermediate size of bundle with the widest possible identifiers
val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params))
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
// Transform input bundle sources (sinks use global namespace on both sides)
val in = Wire(Vec(io_in.size, TLBundle(wide_bundle)))
for (i <- 0 until in.size) {
val r = inputIdRanges(i)
if (connectAIO(i).exists(x=>x)) {
in(i).a.bits.user := DontCare
in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll
in(i).a.bits.source := io_in(i).a.bits.source | r.start.U
} else {
in(i).a := DontCare
io_in(i).a := DontCare
in(i).a.valid := false.B
io_in(i).a.ready := true.B
}
if (connectBIO(i).exists(x=>x)) {
io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll
io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size)
} else {
in(i).b := DontCare
io_in(i).b := DontCare
in(i).b.ready := true.B
io_in(i).b.valid := false.B
}
if (connectCIO(i).exists(x=>x)) {
in(i).c.bits.user := DontCare
in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll
in(i).c.bits.source := io_in(i).c.bits.source | r.start.U
} else {
in(i).c := DontCare
io_in(i).c := DontCare
in(i).c.valid := false.B
io_in(i).c.ready := true.B
}
if (connectDIO(i).exists(x=>x)) {
io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll
io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size)
} else {
in(i).d := DontCare
io_in(i).d := DontCare
in(i).d.ready := true.B
io_in(i).d.valid := false.B
}
if (connectEIO(i).exists(x=>x)) {
in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll
} else {
in(i).e := DontCare
io_in(i).e := DontCare
in(i).e.valid := false.B
io_in(i).e.ready := true.B
}
}
// Transform output bundle sinks (sources use global namespace on both sides)
val out = Wire(Vec(io_out.size, TLBundle(wide_bundle)))
for (o <- 0 until out.size) {
val r = outputIdRanges(o)
if (connectAOI(o).exists(x=>x)) {
out(o).a.bits.user := DontCare
io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll
} else {
out(o).a := DontCare
io_out(o).a := DontCare
out(o).a.ready := true.B
io_out(o).a.valid := false.B
}
if (connectBOI(o).exists(x=>x)) {
out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll
} else {
out(o).b := DontCare
io_out(o).b := DontCare
out(o).b.valid := false.B
io_out(o).b.ready := true.B
}
if (connectCOI(o).exists(x=>x)) {
out(o).c.bits.user := DontCare
io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll
} else {
out(o).c := DontCare
io_out(o).c := DontCare
out(o).c.ready := true.B
io_out(o).c.valid := false.B
}
if (connectDOI(o).exists(x=>x)) {
out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll
out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U
} else {
out(o).d := DontCare
io_out(o).d := DontCare
out(o).d.valid := false.B
io_out(o).d.ready := true.B
}
if (connectEOI(o).exists(x=>x)) {
io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll
io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size)
} else {
out(o).e := DontCare
io_out(o).e := DontCare
out(o).e.ready := true.B
io_out(o).e.valid := false.B
}
}
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
// Based on input=>output connectivity, create per-input minimal address decode circuits
val requiredAC = (connectAIO ++ connectCIO).distinct
val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO =>
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
// Print the address mapping
if (false) {
println("Xbar mapping:")
route_addrs.foreach { p =>
print(" ")
p.foreach { a => print(s" ${a}") }
println("")
}
println("--")
}
(connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _)))
}.toMap
// Print the ID mapping
if (false) {
println(s"XBar mapping:")
(edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) =>
println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}")
}
println("")
}
val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) }
val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) }
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } }
val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } }
val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } }
val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } }
val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) }
val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) }
val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) }
val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) }
val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) }
// Fanout the input sources to the output sinks
val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) })
val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) })
val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) })
val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) })
val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) })
// Arbitrate amongst the sources
for (o <- 0 until out.size) {
TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*)
TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*)
TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*)
filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B }
filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B }
}
for (i <- 0 until in.size) {
TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*)
TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*)
filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B }
filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B }
}
}
def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
val xbar = LazyModule(new TLXbar(policy, nameSuffix))
xbar.node
}
// Replicate an input port to each output port
def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = {
val filtered = Wire(Vec(select.size, chiselTypeOf(input)))
for (i <- 0 until select.size) {
filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits)
filtered(i).valid := input.valid && (select(i) || (select.size == 1).B)
}
input.ready := Mux1H(select, filtered.map(_.ready))
filtered
}
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Xbar"))
val xbar = LazyModule(new TLXbar)
xbar.node := TLDelayer(0.1) := model.node := fuzz.node
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val xbar = LazyModule(new TLXbar)
val fuzzers = (0 until nClients) map { n =>
val fuzz = LazyModule(new TLFuzzer(txns))
xbar.node := TLDelayer(0.1) := fuzz.node
fuzz
}
(0 until nManagers) foreach { n =>
val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff)))
ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzzers.last.module.io.finished
}
}
class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module)
dut.io.start := io.start
io.finished := dut.io.finished
}
| module CoherenceManagerWrapper( // @[ClockDomain.scala:14:9]
input auto_coupler_to_bus_named_mbus_bus_xing_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_mbus_bus_xing_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coupler_to_bus_named_mbus_bus_xing_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_mbus_bus_xing_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_coherent_jbar_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_coherent_jbar_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coherent_jbar_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coherent_jbar_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coherent_jbar_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_coherent_jbar_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_coherent_jbar_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [15:0] auto_coherent_jbar_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_coherent_jbar_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coherent_jbar_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coherent_jbar_anon_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_coherent_jbar_anon_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coherent_jbar_anon_in_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_coherent_jbar_anon_in_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_coherent_jbar_anon_in_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_coherent_jbar_anon_in_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coherent_jbar_anon_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coherent_jbar_anon_in_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_coherent_jbar_anon_in_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_coherent_jbar_anon_in_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_coherent_jbar_anon_in_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [127:0] auto_coherent_jbar_anon_in_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coherent_jbar_anon_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coherent_jbar_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_coherent_jbar_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coherent_jbar_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_coherent_jbar_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_coherent_jbar_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_coherent_jbar_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_coherent_jbar_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_coherent_jbar_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [127:0] auto_coherent_jbar_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_coherent_jbar_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_coherent_jbar_anon_in_e_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_coherent_jbar_anon_in_e_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_l2_ctrls_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_l2_ctrls_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_l2_ctrls_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_l2_ctrls_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_l2_ctrls_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_l2_ctrls_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_l2_ctrls_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_l2_ctrls_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_l2_ctrls_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_l2_ctrls_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_l2_ctrls_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_l2_ctrls_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_l2_ctrls_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_l2_ctrls_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_l2_ctrls_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_l2_ctrls_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_coh_clock_groups_in_member_coh_0_clock, // @[LazyModuleImp.scala:107:25]
input auto_coh_clock_groups_in_member_coh_0_reset // @[LazyModuleImp.scala:107:25]
);
wire coupler_to_bus_named_mbus_widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_d_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_a_valid; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire [7:0] coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [31:0] coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [3:0] coherent_jbar_out_0_e_bits_sink; // @[Xbar.scala:216:19]
wire [3:0] coherent_jbar_out_0_d_bits_sink; // @[Xbar.scala:216:19]
wire [5:0] coherent_jbar_in_0_d_bits_source; // @[Xbar.scala:159:18]
wire [5:0] coherent_jbar_in_0_c_bits_source; // @[Xbar.scala:159:18]
wire [5:0] coherent_jbar_in_0_a_bits_source; // @[Xbar.scala:159:18]
wire InclusiveCache_outer_TLBuffer_auto_out_d_valid; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [1:0] InclusiveCache_outer_TLBuffer_auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_c_ready; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_a_ready; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_e_valid; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_e_bits_sink; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_d_ready; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_c_valid; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_c_bits_corrupt; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_in_c_bits_data; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_auto_in_c_bits_address; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_in_c_bits_source; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_c_bits_size; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_c_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_c_bits_opcode; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_a_valid; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire [7:0] InclusiveCache_outer_TLBuffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire filter_auto_anon_out_d_valid; // @[Filter.scala:60:9]
wire filter_auto_anon_out_d_bits_corrupt; // @[Filter.scala:60:9]
wire [127:0] filter_auto_anon_out_d_bits_data; // @[Filter.scala:60:9]
wire filter_auto_anon_out_d_bits_denied; // @[Filter.scala:60:9]
wire [3:0] filter_auto_anon_out_d_bits_sink; // @[Filter.scala:60:9]
wire [5:0] filter_auto_anon_out_d_bits_source; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_d_bits_size; // @[Filter.scala:60:9]
wire [1:0] filter_auto_anon_out_d_bits_param; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_d_bits_opcode; // @[Filter.scala:60:9]
wire filter_auto_anon_out_c_ready; // @[Filter.scala:60:9]
wire filter_auto_anon_out_b_valid; // @[Filter.scala:60:9]
wire [31:0] filter_auto_anon_out_b_bits_address; // @[Filter.scala:60:9]
wire [1:0] filter_auto_anon_out_b_bits_param; // @[Filter.scala:60:9]
wire filter_auto_anon_out_a_ready; // @[Filter.scala:60:9]
wire filter_auto_anon_in_e_valid; // @[Filter.scala:60:9]
wire [3:0] filter_auto_anon_in_e_bits_sink; // @[Filter.scala:60:9]
wire filter_auto_anon_in_d_valid; // @[Filter.scala:60:9]
wire filter_auto_anon_in_d_ready; // @[Filter.scala:60:9]
wire filter_auto_anon_in_d_bits_corrupt; // @[Filter.scala:60:9]
wire [127:0] filter_auto_anon_in_d_bits_data; // @[Filter.scala:60:9]
wire filter_auto_anon_in_d_bits_denied; // @[Filter.scala:60:9]
wire [3:0] filter_auto_anon_in_d_bits_sink; // @[Filter.scala:60:9]
wire [5:0] filter_auto_anon_in_d_bits_source; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_d_bits_size; // @[Filter.scala:60:9]
wire [1:0] filter_auto_anon_in_d_bits_param; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_d_bits_opcode; // @[Filter.scala:60:9]
wire filter_auto_anon_in_c_valid; // @[Filter.scala:60:9]
wire filter_auto_anon_in_c_ready; // @[Filter.scala:60:9]
wire filter_auto_anon_in_c_bits_corrupt; // @[Filter.scala:60:9]
wire [127:0] filter_auto_anon_in_c_bits_data; // @[Filter.scala:60:9]
wire [31:0] filter_auto_anon_in_c_bits_address; // @[Filter.scala:60:9]
wire [5:0] filter_auto_anon_in_c_bits_source; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_c_bits_size; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_c_bits_param; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_c_bits_opcode; // @[Filter.scala:60:9]
wire filter_auto_anon_in_b_valid; // @[Filter.scala:60:9]
wire filter_auto_anon_in_b_ready; // @[Filter.scala:60:9]
wire [31:0] filter_auto_anon_in_b_bits_address; // @[Filter.scala:60:9]
wire [1:0] filter_auto_anon_in_b_bits_param; // @[Filter.scala:60:9]
wire filter_auto_anon_in_a_valid; // @[Filter.scala:60:9]
wire filter_auto_anon_in_a_ready; // @[Filter.scala:60:9]
wire filter_auto_anon_in_a_bits_corrupt; // @[Filter.scala:60:9]
wire [127:0] filter_auto_anon_in_a_bits_data; // @[Filter.scala:60:9]
wire [15:0] filter_auto_anon_in_a_bits_mask; // @[Filter.scala:60:9]
wire [31:0] filter_auto_anon_in_a_bits_address; // @[Filter.scala:60:9]
wire [5:0] filter_auto_anon_in_a_bits_source; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_a_bits_size; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_a_bits_param; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_a_bits_opcode; // @[Filter.scala:60:9]
wire fixedClockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9]
wire fixedClockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9]
wire clockGroup_auto_out_reset; // @[ClockGroup.scala:24:9]
wire clockGroup_auto_out_clock; // @[ClockGroup.scala:24:9]
wire coh_clock_groups_auto_out_member_coh_0_reset; // @[ClockGroup.scala:53:9]
wire coh_clock_groups_auto_out_member_coh_0_clock; // @[ClockGroup.scala:53:9]
wire _binder_auto_in_a_ready; // @[BankBinder.scala:71:28]
wire _binder_auto_in_d_valid; // @[BankBinder.scala:71:28]
wire [2:0] _binder_auto_in_d_bits_opcode; // @[BankBinder.scala:71:28]
wire [1:0] _binder_auto_in_d_bits_param; // @[BankBinder.scala:71:28]
wire [2:0] _binder_auto_in_d_bits_size; // @[BankBinder.scala:71:28]
wire [4:0] _binder_auto_in_d_bits_source; // @[BankBinder.scala:71:28]
wire _binder_auto_in_d_bits_sink; // @[BankBinder.scala:71:28]
wire _binder_auto_in_d_bits_denied; // @[BankBinder.scala:71:28]
wire [63:0] _binder_auto_in_d_bits_data; // @[BankBinder.scala:71:28]
wire _binder_auto_in_d_bits_corrupt; // @[BankBinder.scala:71:28]
wire _cork_auto_out_a_valid; // @[Configs.scala:120:26]
wire [2:0] _cork_auto_out_a_bits_opcode; // @[Configs.scala:120:26]
wire [2:0] _cork_auto_out_a_bits_param; // @[Configs.scala:120:26]
wire [2:0] _cork_auto_out_a_bits_size; // @[Configs.scala:120:26]
wire [4:0] _cork_auto_out_a_bits_source; // @[Configs.scala:120:26]
wire [31:0] _cork_auto_out_a_bits_address; // @[Configs.scala:120:26]
wire [7:0] _cork_auto_out_a_bits_mask; // @[Configs.scala:120:26]
wire [63:0] _cork_auto_out_a_bits_data; // @[Configs.scala:120:26]
wire _cork_auto_out_a_bits_corrupt; // @[Configs.scala:120:26]
wire _cork_auto_out_d_ready; // @[Configs.scala:120:26]
wire _InclusiveCache_inner_TLBuffer_auto_out_a_valid; // @[Parameters.scala:56:69]
wire [2:0] _InclusiveCache_inner_TLBuffer_auto_out_a_bits_opcode; // @[Parameters.scala:56:69]
wire [2:0] _InclusiveCache_inner_TLBuffer_auto_out_a_bits_param; // @[Parameters.scala:56:69]
wire [2:0] _InclusiveCache_inner_TLBuffer_auto_out_a_bits_size; // @[Parameters.scala:56:69]
wire [5:0] _InclusiveCache_inner_TLBuffer_auto_out_a_bits_source; // @[Parameters.scala:56:69]
wire [31:0] _InclusiveCache_inner_TLBuffer_auto_out_a_bits_address; // @[Parameters.scala:56:69]
wire [15:0] _InclusiveCache_inner_TLBuffer_auto_out_a_bits_mask; // @[Parameters.scala:56:69]
wire [127:0] _InclusiveCache_inner_TLBuffer_auto_out_a_bits_data; // @[Parameters.scala:56:69]
wire _InclusiveCache_inner_TLBuffer_auto_out_a_bits_corrupt; // @[Parameters.scala:56:69]
wire _InclusiveCache_inner_TLBuffer_auto_out_b_ready; // @[Parameters.scala:56:69]
wire _InclusiveCache_inner_TLBuffer_auto_out_c_valid; // @[Parameters.scala:56:69]
wire [2:0] _InclusiveCache_inner_TLBuffer_auto_out_c_bits_opcode; // @[Parameters.scala:56:69]
wire [2:0] _InclusiveCache_inner_TLBuffer_auto_out_c_bits_param; // @[Parameters.scala:56:69]
wire [2:0] _InclusiveCache_inner_TLBuffer_auto_out_c_bits_size; // @[Parameters.scala:56:69]
wire [5:0] _InclusiveCache_inner_TLBuffer_auto_out_c_bits_source; // @[Parameters.scala:56:69]
wire [31:0] _InclusiveCache_inner_TLBuffer_auto_out_c_bits_address; // @[Parameters.scala:56:69]
wire [127:0] _InclusiveCache_inner_TLBuffer_auto_out_c_bits_data; // @[Parameters.scala:56:69]
wire _InclusiveCache_inner_TLBuffer_auto_out_c_bits_corrupt; // @[Parameters.scala:56:69]
wire _InclusiveCache_inner_TLBuffer_auto_out_d_ready; // @[Parameters.scala:56:69]
wire _InclusiveCache_inner_TLBuffer_auto_out_e_valid; // @[Parameters.scala:56:69]
wire [3:0] _InclusiveCache_inner_TLBuffer_auto_out_e_bits_sink; // @[Parameters.scala:56:69]
wire _l2_auto_in_a_ready; // @[Configs.scala:93:24]
wire _l2_auto_in_b_valid; // @[Configs.scala:93:24]
wire [1:0] _l2_auto_in_b_bits_param; // @[Configs.scala:93:24]
wire [31:0] _l2_auto_in_b_bits_address; // @[Configs.scala:93:24]
wire _l2_auto_in_c_ready; // @[Configs.scala:93:24]
wire _l2_auto_in_d_valid; // @[Configs.scala:93:24]
wire [2:0] _l2_auto_in_d_bits_opcode; // @[Configs.scala:93:24]
wire [1:0] _l2_auto_in_d_bits_param; // @[Configs.scala:93:24]
wire [2:0] _l2_auto_in_d_bits_size; // @[Configs.scala:93:24]
wire [5:0] _l2_auto_in_d_bits_source; // @[Configs.scala:93:24]
wire [3:0] _l2_auto_in_d_bits_sink; // @[Configs.scala:93:24]
wire _l2_auto_in_d_bits_denied; // @[Configs.scala:93:24]
wire [127:0] _l2_auto_in_d_bits_data; // @[Configs.scala:93:24]
wire _l2_auto_in_d_bits_corrupt; // @[Configs.scala:93:24]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_a_ready_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_a_ready; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_d_valid_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_opcode_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_opcode; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_param_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_param; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_size_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_size; // @[ClockDomain.scala:14:9]
wire [4:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_source_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_source; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_sink_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_sink; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_denied_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_denied; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_data_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_data; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_corrupt_0 = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_a_valid_0 = auto_coherent_jbar_anon_in_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_a_bits_opcode_0 = auto_coherent_jbar_anon_in_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_a_bits_param_0 = auto_coherent_jbar_anon_in_a_bits_param; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_a_bits_size_0 = auto_coherent_jbar_anon_in_a_bits_size; // @[ClockDomain.scala:14:9]
wire [5:0] auto_coherent_jbar_anon_in_a_bits_source_0 = auto_coherent_jbar_anon_in_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coherent_jbar_anon_in_a_bits_address_0 = auto_coherent_jbar_anon_in_a_bits_address; // @[ClockDomain.scala:14:9]
wire [15:0] auto_coherent_jbar_anon_in_a_bits_mask_0 = auto_coherent_jbar_anon_in_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [127:0] auto_coherent_jbar_anon_in_a_bits_data_0 = auto_coherent_jbar_anon_in_a_bits_data; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_a_bits_corrupt_0 = auto_coherent_jbar_anon_in_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_b_ready_0 = auto_coherent_jbar_anon_in_b_ready; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_c_valid_0 = auto_coherent_jbar_anon_in_c_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_c_bits_opcode_0 = auto_coherent_jbar_anon_in_c_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_c_bits_param_0 = auto_coherent_jbar_anon_in_c_bits_param; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_c_bits_size_0 = auto_coherent_jbar_anon_in_c_bits_size; // @[ClockDomain.scala:14:9]
wire [5:0] auto_coherent_jbar_anon_in_c_bits_source_0 = auto_coherent_jbar_anon_in_c_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coherent_jbar_anon_in_c_bits_address_0 = auto_coherent_jbar_anon_in_c_bits_address; // @[ClockDomain.scala:14:9]
wire [127:0] auto_coherent_jbar_anon_in_c_bits_data_0 = auto_coherent_jbar_anon_in_c_bits_data; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_c_bits_corrupt_0 = auto_coherent_jbar_anon_in_c_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_d_ready_0 = auto_coherent_jbar_anon_in_d_ready; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_e_valid_0 = auto_coherent_jbar_anon_in_e_valid; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coherent_jbar_anon_in_e_bits_sink_0 = auto_coherent_jbar_anon_in_e_bits_sink; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_a_valid_0 = auto_l2_ctrls_ctrl_in_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] auto_l2_ctrls_ctrl_in_a_bits_opcode_0 = auto_l2_ctrls_ctrl_in_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] auto_l2_ctrls_ctrl_in_a_bits_param_0 = auto_l2_ctrls_ctrl_in_a_bits_param; // @[ClockDomain.scala:14:9]
wire [1:0] auto_l2_ctrls_ctrl_in_a_bits_size_0 = auto_l2_ctrls_ctrl_in_a_bits_size; // @[ClockDomain.scala:14:9]
wire [10:0] auto_l2_ctrls_ctrl_in_a_bits_source_0 = auto_l2_ctrls_ctrl_in_a_bits_source; // @[ClockDomain.scala:14:9]
wire [25:0] auto_l2_ctrls_ctrl_in_a_bits_address_0 = auto_l2_ctrls_ctrl_in_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] auto_l2_ctrls_ctrl_in_a_bits_mask_0 = auto_l2_ctrls_ctrl_in_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] auto_l2_ctrls_ctrl_in_a_bits_data_0 = auto_l2_ctrls_ctrl_in_a_bits_data; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_a_bits_corrupt_0 = auto_l2_ctrls_ctrl_in_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_d_ready_0 = auto_l2_ctrls_ctrl_in_d_ready; // @[ClockDomain.scala:14:9]
wire auto_coh_clock_groups_in_member_coh_0_clock_0 = auto_coh_clock_groups_in_member_coh_0_clock; // @[ClockDomain.scala:14:9]
wire auto_coh_clock_groups_in_member_coh_0_reset_0 = auto_coh_clock_groups_in_member_coh_0_reset; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_b_bits_opcode = 3'h6; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_b_bits_size = 3'h6; // @[ClockDomain.scala:14:9]
wire [2:0] filter_auto_anon_in_b_bits_opcode = 3'h6; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_in_b_bits_size = 3'h6; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_b_bits_opcode = 3'h6; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_b_bits_size = 3'h6; // @[Filter.scala:60:9]
wire [2:0] filter_anonOut_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonOut_b_bits_size = 3'h6; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonIn_b_bits_opcode = 3'h6; // @[MixedNode.scala:551:17]
wire [2:0] filter_anonIn_b_bits_size = 3'h6; // @[MixedNode.scala:551:17]
wire [2:0] coherent_jbar_auto_anon_in_b_bits_opcode = 3'h6; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_b_bits_size = 3'h6; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_out_b_bits_opcode = 3'h6; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_out_b_bits_size = 3'h6; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_b_bits_opcode = 3'h6; // @[MixedNode.scala:542:17]
wire [2:0] coherent_jbar_anonOut_b_bits_size = 3'h6; // @[MixedNode.scala:542:17]
wire [2:0] coherent_jbar_anonIn_b_bits_opcode = 3'h6; // @[MixedNode.scala:551:17]
wire [2:0] coherent_jbar_anonIn_b_bits_size = 3'h6; // @[MixedNode.scala:551:17]
wire [2:0] coherent_jbar_in_0_b_bits_opcode = 3'h6; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_in_0_b_bits_size = 3'h6; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_out_0_b_bits_opcode = 3'h6; // @[Xbar.scala:216:19]
wire [2:0] coherent_jbar_out_0_b_bits_size = 3'h6; // @[Xbar.scala:216:19]
wire [2:0] coherent_jbar_portsBIO_filtered_0_bits_opcode = 3'h6; // @[Xbar.scala:352:24]
wire [2:0] coherent_jbar_portsBIO_filtered_0_bits_size = 3'h6; // @[Xbar.scala:352:24]
wire [5:0] auto_coherent_jbar_anon_in_b_bits_source = 6'h20; // @[ClockDomain.scala:14:9]
wire [5:0] filter_auto_anon_in_b_bits_source = 6'h20; // @[Filter.scala:60:9]
wire [5:0] filter_auto_anon_out_b_bits_source = 6'h20; // @[Filter.scala:60:9]
wire [5:0] filter_anonOut_b_bits_source = 6'h20; // @[MixedNode.scala:542:17]
wire [5:0] filter_anonIn_b_bits_source = 6'h20; // @[MixedNode.scala:551:17]
wire [5:0] coherent_jbar_auto_anon_in_b_bits_source = 6'h20; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_auto_anon_out_b_bits_source = 6'h20; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_anonOut_b_bits_source = 6'h20; // @[MixedNode.scala:542:17]
wire [5:0] coherent_jbar_anonIn_b_bits_source = 6'h20; // @[MixedNode.scala:551:17]
wire [5:0] coherent_jbar_in_0_b_bits_source = 6'h20; // @[Xbar.scala:159:18]
wire [5:0] coherent_jbar__anonIn_b_bits_source_T = 6'h20; // @[Xbar.scala:156:69]
wire [5:0] coherent_jbar_out_0_b_bits_source = 6'h20; // @[Xbar.scala:216:19]
wire [5:0] coherent_jbar__requestBOI_uncommonBits_T = 6'h20; // @[Parameters.scala:52:29]
wire [5:0] coherent_jbar_requestBOI_uncommonBits = 6'h20; // @[Parameters.scala:52:56]
wire [5:0] coherent_jbar_portsBIO_filtered_0_bits_source = 6'h20; // @[Xbar.scala:352:24]
wire [15:0] auto_coherent_jbar_anon_in_b_bits_mask = 16'hFFFF; // @[ClockDomain.scala:14:9]
wire [15:0] filter_auto_anon_in_b_bits_mask = 16'hFFFF; // @[Filter.scala:60:9]
wire [15:0] filter_auto_anon_out_b_bits_mask = 16'hFFFF; // @[Filter.scala:60:9]
wire [15:0] filter_anonOut_b_bits_mask = 16'hFFFF; // @[MixedNode.scala:542:17]
wire [15:0] filter_anonIn_b_bits_mask = 16'hFFFF; // @[MixedNode.scala:551:17]
wire [15:0] coherent_jbar_auto_anon_in_b_bits_mask = 16'hFFFF; // @[Jbar.scala:44:9]
wire [15:0] coherent_jbar_auto_anon_out_b_bits_mask = 16'hFFFF; // @[Jbar.scala:44:9]
wire [15:0] coherent_jbar_anonOut_b_bits_mask = 16'hFFFF; // @[MixedNode.scala:542:17]
wire [15:0] coherent_jbar_anonIn_b_bits_mask = 16'hFFFF; // @[MixedNode.scala:551:17]
wire [15:0] coherent_jbar_in_0_b_bits_mask = 16'hFFFF; // @[Xbar.scala:159:18]
wire [15:0] coherent_jbar_out_0_b_bits_mask = 16'hFFFF; // @[Xbar.scala:216:19]
wire [15:0] coherent_jbar_portsBIO_filtered_0_bits_mask = 16'hFFFF; // @[Xbar.scala:352:24]
wire [127:0] auto_coherent_jbar_anon_in_b_bits_data = 128'h0; // @[ClockDomain.scala:14:9]
wire [127:0] filter_auto_anon_in_b_bits_data = 128'h0; // @[Filter.scala:60:9]
wire [127:0] filter_auto_anon_out_b_bits_data = 128'h0; // @[Filter.scala:60:9]
wire [127:0] filter_anonOut_b_bits_data = 128'h0; // @[MixedNode.scala:542:17]
wire [127:0] filter_anonIn_b_bits_data = 128'h0; // @[MixedNode.scala:551:17]
wire [127:0] coherent_jbar_auto_anon_in_b_bits_data = 128'h0; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_auto_anon_out_b_bits_data = 128'h0; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_anonOut_b_bits_data = 128'h0; // @[MixedNode.scala:542:17]
wire [127:0] coherent_jbar_anonIn_b_bits_data = 128'h0; // @[MixedNode.scala:551:17]
wire [127:0] coherent_jbar_in_0_b_bits_data = 128'h0; // @[Xbar.scala:159:18]
wire [127:0] coherent_jbar_out_0_b_bits_data = 128'h0; // @[Xbar.scala:216:19]
wire [127:0] coherent_jbar_portsBIO_filtered_0_bits_data = 128'h0; // @[Xbar.scala:352:24]
wire auto_coherent_jbar_anon_in_b_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_d_bits_sink = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_d_bits_denied = 1'h0; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_d_bits_corrupt = 1'h0; // @[ClockDomain.scala:14:9]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire coh_clock_groups_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire coh_clock_groups_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire coh_clock_groups__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire clockGroup_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire clockGroup_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire clockGroup__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire fixedClockNode_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire fixedClockNode_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire fixedClockNode__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire broadcast_childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire broadcast_childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire broadcast__childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire filter_auto_anon_in_b_bits_corrupt = 1'h0; // @[Filter.scala:60:9]
wire filter_auto_anon_out_b_bits_corrupt = 1'h0; // @[Filter.scala:60:9]
wire filter_anonOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire filter_anonIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_auto_in_b_valid = 1'h0; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_b_bits_corrupt = 1'h0; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_b_valid = 1'h0; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_b_bits_corrupt = 1'h0; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_b_valid = 1'h0; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_b_valid = 1'h0; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire coherent_jbar_auto_anon_in_b_bits_corrupt = 1'h0; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_out_b_bits_corrupt = 1'h0; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_b_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire coherent_jbar_anonIn_b_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire coherent_jbar_in_0_b_bits_corrupt = 1'h0; // @[Xbar.scala:159:18]
wire coherent_jbar_out_0_b_bits_corrupt = 1'h0; // @[Xbar.scala:216:19]
wire coherent_jbar__requestBOI_T = 1'h0; // @[Parameters.scala:54:10]
wire coherent_jbar__requestDOI_T = 1'h0; // @[Parameters.scala:54:10]
wire coherent_jbar__requestEIO_T = 1'h0; // @[Parameters.scala:54:10]
wire coherent_jbar_beatsBO_opdata = 1'h0; // @[Edges.scala:97:28]
wire coherent_jbar_portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24]
wire auto_coherent_jbar_anon_in_e_ready = 1'h1; // @[ClockDomain.scala:14:9]
wire filter_auto_anon_in_e_ready = 1'h1; // @[Filter.scala:60:9]
wire filter_auto_anon_out_e_ready = 1'h1; // @[Filter.scala:60:9]
wire filter_anonOut_e_ready = 1'h1; // @[MixedNode.scala:542:17]
wire filter_anonIn_e_ready = 1'h1; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_auto_in_b_ready = 1'h1; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_e_ready = 1'h1; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_b_ready = 1'h1; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_e_ready = 1'h1; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_b_ready = 1'h1; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeOut_e_ready = 1'h1; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_b_ready = 1'h1; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_e_ready = 1'h1; // @[MixedNode.scala:551:17]
wire coherent_jbar_auto_anon_in_e_ready = 1'h1; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_out_e_ready = 1'h1; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_e_ready = 1'h1; // @[MixedNode.scala:542:17]
wire coherent_jbar_anonIn_e_ready = 1'h1; // @[MixedNode.scala:551:17]
wire coherent_jbar_in_0_e_ready = 1'h1; // @[Xbar.scala:159:18]
wire coherent_jbar_out_0_e_ready = 1'h1; // @[Xbar.scala:216:19]
wire coherent_jbar__requestAIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire coherent_jbar_requestAIO_0_0 = 1'h1; // @[Xbar.scala:307:107]
wire coherent_jbar__requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire coherent_jbar_requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107]
wire coherent_jbar__requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire coherent_jbar__requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire coherent_jbar__requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire coherent_jbar__requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire coherent_jbar_requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire coherent_jbar__requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire coherent_jbar__requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire coherent_jbar__requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire coherent_jbar__requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire coherent_jbar_requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire coherent_jbar__requestEIO_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire coherent_jbar__requestEIO_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire coherent_jbar__requestEIO_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire coherent_jbar__requestEIO_T_4 = 1'h1; // @[Parameters.scala:57:20]
wire coherent_jbar_requestEIO_0_0 = 1'h1; // @[Parameters.scala:56:48]
wire coherent_jbar__beatsBO_opdata_T = 1'h1; // @[Edges.scala:97:37]
wire coherent_jbar__portsAOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire coherent_jbar__portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire coherent_jbar__portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire coherent_jbar__portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire coherent_jbar_portsEOI_filtered_0_ready = 1'h1; // @[Xbar.scala:352:24]
wire coherent_jbar__portsEOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54]
wire [1:0] auto_l2_ctrls_ctrl_in_d_bits_param = 2'h0; // @[ClockDomain.scala:14:9]
wire [1:0] InclusiveCache_outer_TLBuffer_auto_in_b_bits_param = 2'h0; // @[Buffer.scala:40:9]
wire [1:0] InclusiveCache_outer_TLBuffer_auto_out_b_bits_param = 2'h0; // @[Buffer.scala:40:9]
wire [1:0] InclusiveCache_outer_TLBuffer_nodeOut_b_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire [1:0] InclusiveCache_outer_TLBuffer_nodeIn_b_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] coherent_jbar_beatsBO_0 = 2'h0; // @[Edges.scala:221:14]
wire [1:0] coherent_jbar_beatsBO_decode = 2'h3; // @[Edges.scala:220:59]
wire [5:0] coherent_jbar__beatsBO_decode_T_2 = 6'h3F; // @[package.scala:243:46]
wire [5:0] coherent_jbar__beatsBO_decode_T_1 = 6'h0; // @[package.scala:243:76]
wire [12:0] coherent_jbar__beatsBO_decode_T = 13'hFC0; // @[package.scala:243:71]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_b_bits_opcode = 3'h0; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_b_bits_size = 3'h0; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_b_bits_opcode = 3'h0; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_b_bits_size = 3'h0; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_b_bits_opcode = 3'h0; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_b_bits_size = 3'h0; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_b_bits_opcode = 3'h0; // @[MixedNode.scala:551:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_b_bits_size = 3'h0; // @[MixedNode.scala:551:17]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_in_b_bits_source = 4'h0; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_out_b_bits_source = 4'h0; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeOut_b_bits_source = 4'h0; // @[MixedNode.scala:542:17]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeIn_b_bits_source = 4'h0; // @[MixedNode.scala:551:17]
wire [31:0] InclusiveCache_outer_TLBuffer_auto_in_b_bits_address = 32'h0; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_auto_out_b_bits_address = 32'h0; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_nodeOut_b_bits_address = 32'h0; // @[MixedNode.scala:542:17]
wire [31:0] InclusiveCache_outer_TLBuffer_nodeIn_b_bits_address = 32'h0; // @[MixedNode.scala:551:17]
wire [7:0] InclusiveCache_outer_TLBuffer_auto_in_b_bits_mask = 8'h0; // @[Buffer.scala:40:9]
wire [7:0] InclusiveCache_outer_TLBuffer_auto_out_b_bits_mask = 8'h0; // @[Buffer.scala:40:9]
wire [7:0] InclusiveCache_outer_TLBuffer_nodeOut_b_bits_mask = 8'h0; // @[MixedNode.scala:542:17]
wire [7:0] InclusiveCache_outer_TLBuffer_nodeIn_b_bits_mask = 8'h0; // @[MixedNode.scala:551:17]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_in_b_bits_data = 64'h0; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_out_b_bits_data = 64'h0; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeOut_b_bits_data = 64'h0; // @[MixedNode.scala:542:17]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeIn_b_bits_data = 64'h0; // @[MixedNode.scala:551:17]
wire [32:0] coherent_jbar__requestAIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] coherent_jbar__requestAIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] coherent_jbar__requestCIO_T_2 = 33'h0; // @[Parameters.scala:137:46]
wire [32:0] coherent_jbar__requestCIO_T_3 = 33'h0; // @[Parameters.scala:137:46]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_a_ready = auto_coupler_to_bus_named_mbus_bus_xing_out_a_ready_0; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_a_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [31:0] coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [7:0] coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_d_ready; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_d_valid = auto_coupler_to_bus_named_mbus_bus_xing_out_d_valid_0; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_opcode = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_param = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_param_0; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_size = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [4:0] coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_source = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_sink = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_sink_0; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_denied = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_denied_0; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_data = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire coherent_jbar_auto_anon_in_a_ready; // @[Jbar.scala:44:9]
wire coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_corrupt = auto_coupler_to_bus_named_mbus_bus_xing_out_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire coherent_jbar_auto_anon_in_a_valid = auto_coherent_jbar_anon_in_a_valid_0; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_a_bits_opcode = auto_coherent_jbar_anon_in_a_bits_opcode_0; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_a_bits_param = auto_coherent_jbar_anon_in_a_bits_param_0; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_a_bits_size = auto_coherent_jbar_anon_in_a_bits_size_0; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_auto_anon_in_a_bits_source = auto_coherent_jbar_anon_in_a_bits_source_0; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_auto_anon_in_a_bits_address = auto_coherent_jbar_anon_in_a_bits_address_0; // @[Jbar.scala:44:9]
wire [15:0] coherent_jbar_auto_anon_in_a_bits_mask = auto_coherent_jbar_anon_in_a_bits_mask_0; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_auto_anon_in_a_bits_data = auto_coherent_jbar_anon_in_a_bits_data_0; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_a_bits_corrupt = auto_coherent_jbar_anon_in_a_bits_corrupt_0; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_b_ready = auto_coherent_jbar_anon_in_b_ready_0; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_b_valid; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_auto_anon_in_b_bits_param; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_auto_anon_in_b_bits_address; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_c_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_c_valid = auto_coherent_jbar_anon_in_c_valid_0; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_c_bits_opcode = auto_coherent_jbar_anon_in_c_bits_opcode_0; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_c_bits_param = auto_coherent_jbar_anon_in_c_bits_param_0; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_c_bits_size = auto_coherent_jbar_anon_in_c_bits_size_0; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_auto_anon_in_c_bits_source = auto_coherent_jbar_anon_in_c_bits_source_0; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_auto_anon_in_c_bits_address = auto_coherent_jbar_anon_in_c_bits_address_0; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_auto_anon_in_c_bits_data = auto_coherent_jbar_anon_in_c_bits_data_0; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_c_bits_corrupt = auto_coherent_jbar_anon_in_c_bits_corrupt_0; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_d_ready = auto_coherent_jbar_anon_in_d_ready_0; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_d_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_d_bits_opcode; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_auto_anon_in_d_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_auto_anon_in_d_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_auto_anon_in_d_bits_source; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar_auto_anon_in_d_bits_sink; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_d_bits_denied; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_auto_anon_in_d_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_d_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_in_e_valid = auto_coherent_jbar_anon_in_e_valid_0; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar_auto_anon_in_e_bits_sink = auto_coherent_jbar_anon_in_e_bits_sink_0; // @[Jbar.scala:44:9]
wire coh_clock_groups_auto_in_member_coh_0_clock = auto_coh_clock_groups_in_member_coh_0_clock_0; // @[ClockGroup.scala:53:9]
wire coh_clock_groups_auto_in_member_coh_0_reset = auto_coh_clock_groups_in_member_coh_0_reset_0; // @[ClockGroup.scala:53:9]
wire [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
wire [4:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
wire [7:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_a_valid_0; // @[ClockDomain.scala:14:9]
wire auto_coupler_to_bus_named_mbus_bus_xing_out_d_ready_0; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_a_ready_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coherent_jbar_anon_in_b_bits_param_0; // @[ClockDomain.scala:14:9]
wire [31:0] auto_coherent_jbar_anon_in_b_bits_address_0; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_b_valid_0; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_c_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_coherent_jbar_anon_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_coherent_jbar_anon_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [5:0] auto_coherent_jbar_anon_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire [3:0] auto_coherent_jbar_anon_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
wire [127:0] auto_coherent_jbar_anon_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
wire auto_coherent_jbar_anon_in_d_valid_0; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_a_ready_0; // @[ClockDomain.scala:14:9]
wire [2:0] auto_l2_ctrls_ctrl_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
wire [1:0] auto_l2_ctrls_ctrl_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
wire [10:0] auto_l2_ctrls_ctrl_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
wire [63:0] auto_l2_ctrls_ctrl_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
wire auto_l2_ctrls_ctrl_in_d_valid_0; // @[ClockDomain.scala:14:9]
wire clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
wire clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
wire childClock; // @[LazyModuleImp.scala:155:31]
wire childReset; // @[LazyModuleImp.scala:158:31]
wire coh_clock_groups_nodeIn_member_coh_0_clock = coh_clock_groups_auto_in_member_coh_0_clock; // @[ClockGroup.scala:53:9]
wire coh_clock_groups_nodeOut_member_coh_0_clock; // @[MixedNode.scala:542:17]
wire coh_clock_groups_nodeIn_member_coh_0_reset = coh_clock_groups_auto_in_member_coh_0_reset; // @[ClockGroup.scala:53:9]
wire coh_clock_groups_nodeOut_member_coh_0_reset; // @[MixedNode.scala:542:17]
wire clockGroup_auto_in_member_coh_0_clock = coh_clock_groups_auto_out_member_coh_0_clock; // @[ClockGroup.scala:24:9, :53:9]
wire clockGroup_auto_in_member_coh_0_reset = coh_clock_groups_auto_out_member_coh_0_reset; // @[ClockGroup.scala:24:9, :53:9]
assign coh_clock_groups_auto_out_member_coh_0_clock = coh_clock_groups_nodeOut_member_coh_0_clock; // @[ClockGroup.scala:53:9]
assign coh_clock_groups_auto_out_member_coh_0_reset = coh_clock_groups_nodeOut_member_coh_0_reset; // @[ClockGroup.scala:53:9]
assign coh_clock_groups_nodeOut_member_coh_0_clock = coh_clock_groups_nodeIn_member_coh_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign coh_clock_groups_nodeOut_member_coh_0_reset = coh_clock_groups_nodeIn_member_coh_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire clockGroup_nodeIn_member_coh_0_clock = clockGroup_auto_in_member_coh_0_clock; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_clock; // @[MixedNode.scala:542:17]
wire clockGroup_nodeIn_member_coh_0_reset = clockGroup_auto_in_member_coh_0_reset; // @[ClockGroup.scala:24:9]
wire clockGroup_nodeOut_reset; // @[MixedNode.scala:542:17]
wire fixedClockNode_auto_anon_in_clock = clockGroup_auto_out_clock; // @[ClockGroup.scala:24:9, :104:9]
wire fixedClockNode_auto_anon_in_reset = clockGroup_auto_out_reset; // @[ClockGroup.scala:24:9, :104:9]
assign clockGroup_auto_out_clock = clockGroup_nodeOut_clock; // @[ClockGroup.scala:24:9]
assign clockGroup_auto_out_reset = clockGroup_nodeOut_reset; // @[ClockGroup.scala:24:9]
assign clockGroup_nodeOut_clock = clockGroup_nodeIn_member_coh_0_clock; // @[MixedNode.scala:542:17, :551:17]
assign clockGroup_nodeOut_reset = clockGroup_nodeIn_member_coh_0_reset; // @[MixedNode.scala:542:17, :551:17]
wire fixedClockNode_anonIn_clock = fixedClockNode_auto_anon_in_clock; // @[ClockGroup.scala:104:9]
wire fixedClockNode_anonOut_clock; // @[MixedNode.scala:542:17]
wire fixedClockNode_anonIn_reset = fixedClockNode_auto_anon_in_reset; // @[ClockGroup.scala:104:9]
wire fixedClockNode_anonOut_reset; // @[MixedNode.scala:542:17]
assign clockSinkNodeIn_clock = fixedClockNode_auto_anon_out_clock; // @[ClockGroup.scala:104:9]
assign clockSinkNodeIn_reset = fixedClockNode_auto_anon_out_reset; // @[ClockGroup.scala:104:9]
assign fixedClockNode_auto_anon_out_clock = fixedClockNode_anonOut_clock; // @[ClockGroup.scala:104:9]
assign fixedClockNode_auto_anon_out_reset = fixedClockNode_anonOut_reset; // @[ClockGroup.scala:104:9]
assign fixedClockNode_anonOut_clock = fixedClockNode_anonIn_clock; // @[MixedNode.scala:542:17, :551:17]
assign fixedClockNode_anonOut_reset = fixedClockNode_anonIn_reset; // @[MixedNode.scala:542:17, :551:17]
wire filter_anonIn_a_ready; // @[MixedNode.scala:551:17]
wire coherent_jbar_auto_anon_out_a_ready = filter_auto_anon_in_a_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_out_a_valid; // @[Jbar.scala:44:9]
wire filter_anonIn_a_valid = filter_auto_anon_in_a_valid; // @[Filter.scala:60:9]
wire [2:0] coherent_jbar_auto_anon_out_a_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] filter_anonIn_a_bits_opcode = filter_auto_anon_in_a_bits_opcode; // @[Filter.scala:60:9]
wire [2:0] coherent_jbar_auto_anon_out_a_bits_param; // @[Jbar.scala:44:9]
wire [2:0] filter_anonIn_a_bits_param = filter_auto_anon_in_a_bits_param; // @[Filter.scala:60:9]
wire [2:0] coherent_jbar_auto_anon_out_a_bits_size; // @[Jbar.scala:44:9]
wire [2:0] filter_anonIn_a_bits_size = filter_auto_anon_in_a_bits_size; // @[Filter.scala:60:9]
wire [5:0] coherent_jbar_auto_anon_out_a_bits_source; // @[Jbar.scala:44:9]
wire [5:0] filter_anonIn_a_bits_source = filter_auto_anon_in_a_bits_source; // @[Filter.scala:60:9]
wire [31:0] coherent_jbar_auto_anon_out_a_bits_address; // @[Jbar.scala:44:9]
wire [31:0] filter_anonIn_a_bits_address = filter_auto_anon_in_a_bits_address; // @[Filter.scala:60:9]
wire [15:0] coherent_jbar_auto_anon_out_a_bits_mask; // @[Jbar.scala:44:9]
wire [15:0] filter_anonIn_a_bits_mask = filter_auto_anon_in_a_bits_mask; // @[Filter.scala:60:9]
wire [127:0] coherent_jbar_auto_anon_out_a_bits_data; // @[Jbar.scala:44:9]
wire [127:0] filter_anonIn_a_bits_data = filter_auto_anon_in_a_bits_data; // @[Filter.scala:60:9]
wire coherent_jbar_auto_anon_out_a_bits_corrupt; // @[Jbar.scala:44:9]
wire filter_anonIn_a_bits_corrupt = filter_auto_anon_in_a_bits_corrupt; // @[Filter.scala:60:9]
wire coherent_jbar_auto_anon_out_b_ready; // @[Jbar.scala:44:9]
wire filter_anonIn_b_ready = filter_auto_anon_in_b_ready; // @[Filter.scala:60:9]
wire filter_anonIn_b_valid; // @[MixedNode.scala:551:17]
wire coherent_jbar_auto_anon_out_b_valid = filter_auto_anon_in_b_valid; // @[Jbar.scala:44:9]
wire [1:0] filter_anonIn_b_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] coherent_jbar_auto_anon_out_b_bits_param = filter_auto_anon_in_b_bits_param; // @[Jbar.scala:44:9]
wire [31:0] filter_anonIn_b_bits_address; // @[MixedNode.scala:551:17]
wire [31:0] coherent_jbar_auto_anon_out_b_bits_address = filter_auto_anon_in_b_bits_address; // @[Jbar.scala:44:9]
wire filter_anonIn_c_ready; // @[MixedNode.scala:551:17]
wire coherent_jbar_auto_anon_out_c_ready = filter_auto_anon_in_c_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_out_c_valid; // @[Jbar.scala:44:9]
wire filter_anonIn_c_valid = filter_auto_anon_in_c_valid; // @[Filter.scala:60:9]
wire [2:0] coherent_jbar_auto_anon_out_c_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] filter_anonIn_c_bits_opcode = filter_auto_anon_in_c_bits_opcode; // @[Filter.scala:60:9]
wire [2:0] coherent_jbar_auto_anon_out_c_bits_param; // @[Jbar.scala:44:9]
wire [2:0] filter_anonIn_c_bits_param = filter_auto_anon_in_c_bits_param; // @[Filter.scala:60:9]
wire [2:0] coherent_jbar_auto_anon_out_c_bits_size; // @[Jbar.scala:44:9]
wire [2:0] filter_anonIn_c_bits_size = filter_auto_anon_in_c_bits_size; // @[Filter.scala:60:9]
wire [5:0] coherent_jbar_auto_anon_out_c_bits_source; // @[Jbar.scala:44:9]
wire [5:0] filter_anonIn_c_bits_source = filter_auto_anon_in_c_bits_source; // @[Filter.scala:60:9]
wire [31:0] coherent_jbar_auto_anon_out_c_bits_address; // @[Jbar.scala:44:9]
wire [31:0] filter_anonIn_c_bits_address = filter_auto_anon_in_c_bits_address; // @[Filter.scala:60:9]
wire [127:0] coherent_jbar_auto_anon_out_c_bits_data; // @[Jbar.scala:44:9]
wire [127:0] filter_anonIn_c_bits_data = filter_auto_anon_in_c_bits_data; // @[Filter.scala:60:9]
wire coherent_jbar_auto_anon_out_c_bits_corrupt; // @[Jbar.scala:44:9]
wire filter_anonIn_c_bits_corrupt = filter_auto_anon_in_c_bits_corrupt; // @[Filter.scala:60:9]
wire coherent_jbar_auto_anon_out_d_ready; // @[Jbar.scala:44:9]
wire filter_anonIn_d_ready = filter_auto_anon_in_d_ready; // @[Filter.scala:60:9]
wire filter_anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] filter_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire coherent_jbar_auto_anon_out_d_valid = filter_auto_anon_in_d_valid; // @[Jbar.scala:44:9]
wire [1:0] filter_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] coherent_jbar_auto_anon_out_d_bits_opcode = filter_auto_anon_in_d_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] filter_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [1:0] coherent_jbar_auto_anon_out_d_bits_param = filter_auto_anon_in_d_bits_param; // @[Jbar.scala:44:9]
wire [5:0] filter_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] coherent_jbar_auto_anon_out_d_bits_size = filter_auto_anon_in_d_bits_size; // @[Jbar.scala:44:9]
wire [3:0] filter_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire [5:0] coherent_jbar_auto_anon_out_d_bits_source = filter_auto_anon_in_d_bits_source; // @[Jbar.scala:44:9]
wire filter_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [3:0] coherent_jbar_auto_anon_out_d_bits_sink = filter_auto_anon_in_d_bits_sink; // @[Jbar.scala:44:9]
wire [127:0] filter_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire coherent_jbar_auto_anon_out_d_bits_denied = filter_auto_anon_in_d_bits_denied; // @[Jbar.scala:44:9]
wire filter_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [127:0] coherent_jbar_auto_anon_out_d_bits_data = filter_auto_anon_in_d_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_out_d_bits_corrupt = filter_auto_anon_in_d_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_auto_anon_out_e_valid; // @[Jbar.scala:44:9]
wire filter_anonIn_e_valid = filter_auto_anon_in_e_valid; // @[Filter.scala:60:9]
wire [3:0] coherent_jbar_auto_anon_out_e_bits_sink; // @[Jbar.scala:44:9]
wire [3:0] filter_anonIn_e_bits_sink = filter_auto_anon_in_e_bits_sink; // @[Filter.scala:60:9]
wire filter_anonOut_a_ready = filter_auto_anon_out_a_ready; // @[Filter.scala:60:9]
wire filter_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [5:0] filter_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] filter_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [15:0] filter_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [127:0] filter_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire filter_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire filter_anonOut_b_ready; // @[MixedNode.scala:542:17]
wire filter_anonOut_b_valid = filter_auto_anon_out_b_valid; // @[Filter.scala:60:9]
wire [1:0] filter_anonOut_b_bits_param = filter_auto_anon_out_b_bits_param; // @[Filter.scala:60:9]
wire [31:0] filter_anonOut_b_bits_address = filter_auto_anon_out_b_bits_address; // @[Filter.scala:60:9]
wire filter_anonOut_c_ready = filter_auto_anon_out_c_ready; // @[Filter.scala:60:9]
wire filter_anonOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] filter_anonOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [5:0] filter_anonOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] filter_anonOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [127:0] filter_anonOut_c_bits_data; // @[MixedNode.scala:542:17]
wire filter_anonOut_c_bits_corrupt; // @[MixedNode.scala:542:17]
wire filter_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire filter_anonOut_d_valid = filter_auto_anon_out_d_valid; // @[Filter.scala:60:9]
wire [2:0] filter_anonOut_d_bits_opcode = filter_auto_anon_out_d_bits_opcode; // @[Filter.scala:60:9]
wire [1:0] filter_anonOut_d_bits_param = filter_auto_anon_out_d_bits_param; // @[Filter.scala:60:9]
wire [2:0] filter_anonOut_d_bits_size = filter_auto_anon_out_d_bits_size; // @[Filter.scala:60:9]
wire [5:0] filter_anonOut_d_bits_source = filter_auto_anon_out_d_bits_source; // @[Filter.scala:60:9]
wire [3:0] filter_anonOut_d_bits_sink = filter_auto_anon_out_d_bits_sink; // @[Filter.scala:60:9]
wire filter_anonOut_d_bits_denied = filter_auto_anon_out_d_bits_denied; // @[Filter.scala:60:9]
wire [127:0] filter_anonOut_d_bits_data = filter_auto_anon_out_d_bits_data; // @[Filter.scala:60:9]
wire filter_anonOut_d_bits_corrupt = filter_auto_anon_out_d_bits_corrupt; // @[Filter.scala:60:9]
wire filter_anonOut_e_valid; // @[MixedNode.scala:542:17]
wire [3:0] filter_anonOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire [2:0] filter_auto_anon_out_a_bits_opcode; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_a_bits_param; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_a_bits_size; // @[Filter.scala:60:9]
wire [5:0] filter_auto_anon_out_a_bits_source; // @[Filter.scala:60:9]
wire [31:0] filter_auto_anon_out_a_bits_address; // @[Filter.scala:60:9]
wire [15:0] filter_auto_anon_out_a_bits_mask; // @[Filter.scala:60:9]
wire [127:0] filter_auto_anon_out_a_bits_data; // @[Filter.scala:60:9]
wire filter_auto_anon_out_a_bits_corrupt; // @[Filter.scala:60:9]
wire filter_auto_anon_out_a_valid; // @[Filter.scala:60:9]
wire filter_auto_anon_out_b_ready; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_c_bits_opcode; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_c_bits_param; // @[Filter.scala:60:9]
wire [2:0] filter_auto_anon_out_c_bits_size; // @[Filter.scala:60:9]
wire [5:0] filter_auto_anon_out_c_bits_source; // @[Filter.scala:60:9]
wire [31:0] filter_auto_anon_out_c_bits_address; // @[Filter.scala:60:9]
wire [127:0] filter_auto_anon_out_c_bits_data; // @[Filter.scala:60:9]
wire filter_auto_anon_out_c_bits_corrupt; // @[Filter.scala:60:9]
wire filter_auto_anon_out_c_valid; // @[Filter.scala:60:9]
wire filter_auto_anon_out_d_ready; // @[Filter.scala:60:9]
wire [3:0] filter_auto_anon_out_e_bits_sink; // @[Filter.scala:60:9]
wire filter_auto_anon_out_e_valid; // @[Filter.scala:60:9]
assign filter_anonIn_a_ready = filter_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign filter_auto_anon_out_a_valid = filter_anonOut_a_valid; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_opcode = filter_anonOut_a_bits_opcode; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_param = filter_anonOut_a_bits_param; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_size = filter_anonOut_a_bits_size; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_source = filter_anonOut_a_bits_source; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_address = filter_anonOut_a_bits_address; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_mask = filter_anonOut_a_bits_mask; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_data = filter_anonOut_a_bits_data; // @[Filter.scala:60:9]
assign filter_auto_anon_out_a_bits_corrupt = filter_anonOut_a_bits_corrupt; // @[Filter.scala:60:9]
assign filter_auto_anon_out_b_ready = filter_anonOut_b_ready; // @[Filter.scala:60:9]
assign filter_anonIn_b_valid = filter_anonOut_b_valid; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_b_bits_param = filter_anonOut_b_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_b_bits_address = filter_anonOut_b_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_c_ready = filter_anonOut_c_ready; // @[MixedNode.scala:542:17, :551:17]
assign filter_auto_anon_out_c_valid = filter_anonOut_c_valid; // @[Filter.scala:60:9]
assign filter_auto_anon_out_c_bits_opcode = filter_anonOut_c_bits_opcode; // @[Filter.scala:60:9]
assign filter_auto_anon_out_c_bits_param = filter_anonOut_c_bits_param; // @[Filter.scala:60:9]
assign filter_auto_anon_out_c_bits_size = filter_anonOut_c_bits_size; // @[Filter.scala:60:9]
assign filter_auto_anon_out_c_bits_source = filter_anonOut_c_bits_source; // @[Filter.scala:60:9]
assign filter_auto_anon_out_c_bits_address = filter_anonOut_c_bits_address; // @[Filter.scala:60:9]
assign filter_auto_anon_out_c_bits_data = filter_anonOut_c_bits_data; // @[Filter.scala:60:9]
assign filter_auto_anon_out_c_bits_corrupt = filter_anonOut_c_bits_corrupt; // @[Filter.scala:60:9]
assign filter_auto_anon_out_d_ready = filter_anonOut_d_ready; // @[Filter.scala:60:9]
assign filter_anonIn_d_valid = filter_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_opcode = filter_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_param = filter_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_size = filter_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_source = filter_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_sink = filter_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_denied = filter_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_data = filter_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonIn_d_bits_corrupt = filter_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign filter_auto_anon_out_e_valid = filter_anonOut_e_valid; // @[Filter.scala:60:9]
assign filter_auto_anon_out_e_bits_sink = filter_anonOut_e_bits_sink; // @[Filter.scala:60:9]
assign filter_auto_anon_in_a_ready = filter_anonIn_a_ready; // @[Filter.scala:60:9]
assign filter_anonOut_a_valid = filter_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_opcode = filter_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_param = filter_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_size = filter_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_source = filter_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_address = filter_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_mask = filter_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_data = filter_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_a_bits_corrupt = filter_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_b_ready = filter_anonIn_b_ready; // @[MixedNode.scala:542:17, :551:17]
assign filter_auto_anon_in_b_valid = filter_anonIn_b_valid; // @[Filter.scala:60:9]
assign filter_auto_anon_in_b_bits_param = filter_anonIn_b_bits_param; // @[Filter.scala:60:9]
assign filter_auto_anon_in_b_bits_address = filter_anonIn_b_bits_address; // @[Filter.scala:60:9]
assign filter_auto_anon_in_c_ready = filter_anonIn_c_ready; // @[Filter.scala:60:9]
assign filter_anonOut_c_valid = filter_anonIn_c_valid; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_c_bits_opcode = filter_anonIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_c_bits_param = filter_anonIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_c_bits_size = filter_anonIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_c_bits_source = filter_anonIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_c_bits_address = filter_anonIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_c_bits_data = filter_anonIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_c_bits_corrupt = filter_anonIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_d_ready = filter_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign filter_auto_anon_in_d_valid = filter_anonIn_d_valid; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_opcode = filter_anonIn_d_bits_opcode; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_param = filter_anonIn_d_bits_param; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_size = filter_anonIn_d_bits_size; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_source = filter_anonIn_d_bits_source; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_sink = filter_anonIn_d_bits_sink; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_denied = filter_anonIn_d_bits_denied; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_data = filter_anonIn_d_bits_data; // @[Filter.scala:60:9]
assign filter_auto_anon_in_d_bits_corrupt = filter_anonIn_d_bits_corrupt; // @[Filter.scala:60:9]
assign filter_anonOut_e_valid = filter_anonIn_e_valid; // @[MixedNode.scala:542:17, :551:17]
assign filter_anonOut_e_bits_sink = filter_anonIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_a_valid = InclusiveCache_outer_TLBuffer_auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_a_bits_opcode = InclusiveCache_outer_TLBuffer_auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_a_bits_param = InclusiveCache_outer_TLBuffer_auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_a_bits_size = InclusiveCache_outer_TLBuffer_auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeIn_a_bits_source = InclusiveCache_outer_TLBuffer_auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_nodeIn_a_bits_address = InclusiveCache_outer_TLBuffer_auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] InclusiveCache_outer_TLBuffer_nodeIn_a_bits_mask = InclusiveCache_outer_TLBuffer_auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeIn_a_bits_data = InclusiveCache_outer_TLBuffer_auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeIn_a_bits_corrupt = InclusiveCache_outer_TLBuffer_auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeIn_c_ready; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_c_valid = InclusiveCache_outer_TLBuffer_auto_in_c_valid; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_c_bits_opcode = InclusiveCache_outer_TLBuffer_auto_in_c_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_c_bits_param = InclusiveCache_outer_TLBuffer_auto_in_c_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_c_bits_size = InclusiveCache_outer_TLBuffer_auto_in_c_bits_size; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeIn_c_bits_source = InclusiveCache_outer_TLBuffer_auto_in_c_bits_source; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_nodeIn_c_bits_address = InclusiveCache_outer_TLBuffer_auto_in_c_bits_address; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeIn_c_bits_data = InclusiveCache_outer_TLBuffer_auto_in_c_bits_data; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeIn_c_bits_corrupt = InclusiveCache_outer_TLBuffer_auto_in_c_bits_corrupt; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeIn_d_ready = InclusiveCache_outer_TLBuffer_auto_in_d_ready; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] InclusiveCache_outer_TLBuffer_nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire InclusiveCache_outer_TLBuffer_nodeIn_e_valid = InclusiveCache_outer_TLBuffer_auto_in_e_valid; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeIn_e_bits_sink = InclusiveCache_outer_TLBuffer_auto_in_e_bits_sink; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_a_ready = InclusiveCache_outer_TLBuffer_auto_out_a_ready; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] InclusiveCache_outer_TLBuffer_nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] InclusiveCache_outer_TLBuffer_nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeOut_c_ready = InclusiveCache_outer_TLBuffer_auto_out_c_ready; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_c_valid; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_c_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_c_bits_size; // @[MixedNode.scala:542:17]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeOut_c_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] InclusiveCache_outer_TLBuffer_nodeOut_c_bits_address; // @[MixedNode.scala:542:17]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeOut_c_bits_data; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeOut_c_bits_corrupt; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_nodeOut_d_valid = InclusiveCache_outer_TLBuffer_auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_d_bits_opcode = InclusiveCache_outer_TLBuffer_auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] InclusiveCache_outer_TLBuffer_nodeOut_d_bits_param = InclusiveCache_outer_TLBuffer_auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_d_bits_size = InclusiveCache_outer_TLBuffer_auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_nodeOut_d_bits_source = InclusiveCache_outer_TLBuffer_auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_d_bits_sink = InclusiveCache_outer_TLBuffer_auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_d_bits_denied = InclusiveCache_outer_TLBuffer_auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_nodeOut_d_bits_data = InclusiveCache_outer_TLBuffer_auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_d_bits_corrupt = InclusiveCache_outer_TLBuffer_auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_nodeOut_e_valid; // @[MixedNode.scala:542:17]
wire [2:0] InclusiveCache_outer_TLBuffer_nodeOut_e_bits_sink; // @[MixedNode.scala:542:17]
wire InclusiveCache_outer_TLBuffer_auto_in_a_ready; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_c_ready; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] InclusiveCache_outer_TLBuffer_auto_in_d_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_d_bits_size; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_in_d_bits_source; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_in_d_bits_sink; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_in_d_bits_data; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_d_bits_corrupt; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_in_d_valid; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_a_bits_size; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_out_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_auto_out_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] InclusiveCache_outer_TLBuffer_auto_out_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_out_a_bits_data; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_a_bits_corrupt; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_a_valid; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_c_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_c_bits_param; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_c_bits_size; // @[Buffer.scala:40:9]
wire [3:0] InclusiveCache_outer_TLBuffer_auto_out_c_bits_source; // @[Buffer.scala:40:9]
wire [31:0] InclusiveCache_outer_TLBuffer_auto_out_c_bits_address; // @[Buffer.scala:40:9]
wire [63:0] InclusiveCache_outer_TLBuffer_auto_out_c_bits_data; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_c_bits_corrupt; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_c_valid; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_d_ready; // @[Buffer.scala:40:9]
wire [2:0] InclusiveCache_outer_TLBuffer_auto_out_e_bits_sink; // @[Buffer.scala:40:9]
wire InclusiveCache_outer_TLBuffer_auto_out_e_valid; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_nodeIn_a_ready = InclusiveCache_outer_TLBuffer_nodeOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_auto_out_a_valid = InclusiveCache_outer_TLBuffer_nodeOut_a_valid; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_opcode = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_param = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_size = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_source = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_address = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_mask = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_data = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_a_bits_corrupt = InclusiveCache_outer_TLBuffer_nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_nodeIn_c_ready = InclusiveCache_outer_TLBuffer_nodeOut_c_ready; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_auto_out_c_valid = InclusiveCache_outer_TLBuffer_nodeOut_c_valid; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_c_bits_opcode = InclusiveCache_outer_TLBuffer_nodeOut_c_bits_opcode; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_c_bits_param = InclusiveCache_outer_TLBuffer_nodeOut_c_bits_param; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_c_bits_size = InclusiveCache_outer_TLBuffer_nodeOut_c_bits_size; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_c_bits_source = InclusiveCache_outer_TLBuffer_nodeOut_c_bits_source; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_c_bits_address = InclusiveCache_outer_TLBuffer_nodeOut_c_bits_address; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_c_bits_data = InclusiveCache_outer_TLBuffer_nodeOut_c_bits_data; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_c_bits_corrupt = InclusiveCache_outer_TLBuffer_nodeOut_c_bits_corrupt; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_d_ready = InclusiveCache_outer_TLBuffer_nodeOut_d_ready; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_valid = InclusiveCache_outer_TLBuffer_nodeOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_opcode = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_param = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_size = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_source = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_sink = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_denied = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_data = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeIn_d_bits_corrupt = InclusiveCache_outer_TLBuffer_nodeOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_auto_out_e_valid = InclusiveCache_outer_TLBuffer_nodeOut_e_valid; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_out_e_bits_sink = InclusiveCache_outer_TLBuffer_nodeOut_e_bits_sink; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_a_ready = InclusiveCache_outer_TLBuffer_nodeIn_a_ready; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_valid = InclusiveCache_outer_TLBuffer_nodeIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_opcode = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_param = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_size = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_source = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_address = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_mask = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_data = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_a_bits_corrupt = InclusiveCache_outer_TLBuffer_nodeIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_auto_in_c_ready = InclusiveCache_outer_TLBuffer_nodeIn_c_ready; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_valid = InclusiveCache_outer_TLBuffer_nodeIn_c_valid; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_bits_opcode = InclusiveCache_outer_TLBuffer_nodeIn_c_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_bits_param = InclusiveCache_outer_TLBuffer_nodeIn_c_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_bits_size = InclusiveCache_outer_TLBuffer_nodeIn_c_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_bits_source = InclusiveCache_outer_TLBuffer_nodeIn_c_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_bits_address = InclusiveCache_outer_TLBuffer_nodeIn_c_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_bits_data = InclusiveCache_outer_TLBuffer_nodeIn_c_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_c_bits_corrupt = InclusiveCache_outer_TLBuffer_nodeIn_c_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_d_ready = InclusiveCache_outer_TLBuffer_nodeIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_auto_in_d_valid = InclusiveCache_outer_TLBuffer_nodeIn_d_valid; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_opcode = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_param = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_size = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_source = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_sink = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_denied = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_data = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_auto_in_d_bits_corrupt = InclusiveCache_outer_TLBuffer_nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign InclusiveCache_outer_TLBuffer_nodeOut_e_valid = InclusiveCache_outer_TLBuffer_nodeIn_e_valid; // @[MixedNode.scala:542:17, :551:17]
assign InclusiveCache_outer_TLBuffer_nodeOut_e_bits_sink = InclusiveCache_outer_TLBuffer_nodeIn_e_bits_sink; // @[MixedNode.scala:542:17, :551:17]
wire coherent_jbar_anonIn_a_ready; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_a_ready_0 = coherent_jbar_auto_anon_in_a_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_a_valid = coherent_jbar_auto_anon_in_a_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_a_bits_opcode = coherent_jbar_auto_anon_in_a_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_a_bits_param = coherent_jbar_auto_anon_in_a_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_a_bits_size = coherent_jbar_auto_anon_in_a_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_anonIn_a_bits_source = coherent_jbar_auto_anon_in_a_bits_source; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_anonIn_a_bits_address = coherent_jbar_auto_anon_in_a_bits_address; // @[Jbar.scala:44:9]
wire [15:0] coherent_jbar_anonIn_a_bits_mask = coherent_jbar_auto_anon_in_a_bits_mask; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_anonIn_a_bits_data = coherent_jbar_auto_anon_in_a_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_a_bits_corrupt = coherent_jbar_auto_anon_in_a_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_b_ready = coherent_jbar_auto_anon_in_b_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_b_valid; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_b_valid_0 = coherent_jbar_auto_anon_in_b_valid; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_anonIn_b_bits_param; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_b_bits_param_0 = coherent_jbar_auto_anon_in_b_bits_param; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_anonIn_b_bits_address; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_b_bits_address_0 = coherent_jbar_auto_anon_in_b_bits_address; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_c_ready; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_c_ready_0 = coherent_jbar_auto_anon_in_c_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_c_valid = coherent_jbar_auto_anon_in_c_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_c_bits_opcode = coherent_jbar_auto_anon_in_c_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_c_bits_param = coherent_jbar_auto_anon_in_c_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_c_bits_size = coherent_jbar_auto_anon_in_c_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_anonIn_c_bits_source = coherent_jbar_auto_anon_in_c_bits_source; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_anonIn_c_bits_address = coherent_jbar_auto_anon_in_c_bits_address; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_anonIn_c_bits_data = coherent_jbar_auto_anon_in_c_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_c_bits_corrupt = coherent_jbar_auto_anon_in_c_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_d_ready = coherent_jbar_auto_anon_in_d_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_d_valid; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_valid_0 = coherent_jbar_auto_anon_in_d_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_opcode_0 = coherent_jbar_auto_anon_in_d_bits_opcode; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_param_0 = coherent_jbar_auto_anon_in_d_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_size_0 = coherent_jbar_auto_anon_in_d_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_source_0 = coherent_jbar_auto_anon_in_d_bits_source; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_sink_0 = coherent_jbar_auto_anon_in_d_bits_sink; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_denied_0 = coherent_jbar_auto_anon_in_d_bits_denied; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_data_0 = coherent_jbar_auto_anon_in_d_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
assign auto_coherent_jbar_anon_in_d_bits_corrupt_0 = coherent_jbar_auto_anon_in_d_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_anonIn_e_valid = coherent_jbar_auto_anon_in_e_valid; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar_anonIn_e_bits_sink = coherent_jbar_auto_anon_in_e_bits_sink; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_a_ready = coherent_jbar_auto_anon_out_a_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_a_valid; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_valid = coherent_jbar_auto_anon_out_a_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_opcode = coherent_jbar_auto_anon_out_a_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_param = coherent_jbar_auto_anon_out_a_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_size = coherent_jbar_auto_anon_out_a_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_source = coherent_jbar_auto_anon_out_a_bits_source; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_address = coherent_jbar_auto_anon_out_a_bits_address; // @[Jbar.scala:44:9]
wire [15:0] coherent_jbar_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_mask = coherent_jbar_auto_anon_out_a_bits_mask; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_data = coherent_jbar_auto_anon_out_a_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_a_bits_corrupt = coherent_jbar_auto_anon_out_a_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_b_ready; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_b_ready = coherent_jbar_auto_anon_out_b_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_b_valid = coherent_jbar_auto_anon_out_b_valid; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_anonOut_b_bits_param = coherent_jbar_auto_anon_out_b_bits_param; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_anonOut_b_bits_address = coherent_jbar_auto_anon_out_b_bits_address; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_c_ready = coherent_jbar_auto_anon_out_c_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_c_valid; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_valid = coherent_jbar_auto_anon_out_c_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_c_bits_opcode; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_bits_opcode = coherent_jbar_auto_anon_out_c_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_c_bits_param; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_bits_param = coherent_jbar_auto_anon_out_c_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_c_bits_size; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_bits_size = coherent_jbar_auto_anon_out_c_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_anonOut_c_bits_source; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_bits_source = coherent_jbar_auto_anon_out_c_bits_source; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_anonOut_c_bits_address; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_bits_address = coherent_jbar_auto_anon_out_c_bits_address; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_anonOut_c_bits_data; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_bits_data = coherent_jbar_auto_anon_out_c_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_c_bits_corrupt; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_c_bits_corrupt = coherent_jbar_auto_anon_out_c_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_d_ready; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_d_ready = coherent_jbar_auto_anon_out_d_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_d_valid = coherent_jbar_auto_anon_out_d_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_d_bits_opcode = coherent_jbar_auto_anon_out_d_bits_opcode; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_anonOut_d_bits_param = coherent_jbar_auto_anon_out_d_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_anonOut_d_bits_size = coherent_jbar_auto_anon_out_d_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_anonOut_d_bits_source = coherent_jbar_auto_anon_out_d_bits_source; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar_anonOut_d_bits_sink = coherent_jbar_auto_anon_out_d_bits_sink; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_d_bits_denied = coherent_jbar_auto_anon_out_d_bits_denied; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_anonOut_d_bits_data = coherent_jbar_auto_anon_out_d_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_d_bits_corrupt = coherent_jbar_auto_anon_out_d_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_anonOut_e_valid; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_e_valid = coherent_jbar_auto_anon_out_e_valid; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar_anonOut_e_bits_sink; // @[MixedNode.scala:542:17]
assign filter_auto_anon_in_e_bits_sink = coherent_jbar_auto_anon_out_e_bits_sink; // @[Jbar.scala:44:9]
wire coherent_jbar_out_0_a_ready = coherent_jbar_anonOut_a_ready; // @[Xbar.scala:216:19]
wire coherent_jbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_valid = coherent_jbar_anonOut_a_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_opcode = coherent_jbar_anonOut_a_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_param = coherent_jbar_anonOut_a_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_size = coherent_jbar_anonOut_a_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_source = coherent_jbar_anonOut_a_bits_source; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_address = coherent_jbar_anonOut_a_bits_address; // @[Jbar.scala:44:9]
wire [15:0] coherent_jbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_mask = coherent_jbar_anonOut_a_bits_mask; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_data = coherent_jbar_anonOut_a_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_a_bits_corrupt = coherent_jbar_anonOut_a_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_out_0_b_ready; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_b_ready = coherent_jbar_anonOut_b_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_out_0_b_valid = coherent_jbar_anonOut_b_valid; // @[Xbar.scala:216:19]
wire [1:0] coherent_jbar_out_0_b_bits_param = coherent_jbar_anonOut_b_bits_param; // @[Xbar.scala:216:19]
wire [31:0] coherent_jbar_out_0_b_bits_address = coherent_jbar_anonOut_b_bits_address; // @[Xbar.scala:216:19]
wire coherent_jbar_out_0_c_ready = coherent_jbar_anonOut_c_ready; // @[Xbar.scala:216:19]
wire coherent_jbar_out_0_c_valid; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_valid = coherent_jbar_anonOut_c_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_out_0_c_bits_opcode; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_bits_opcode = coherent_jbar_anonOut_c_bits_opcode; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_out_0_c_bits_param; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_bits_param = coherent_jbar_anonOut_c_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_out_0_c_bits_size; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_bits_size = coherent_jbar_anonOut_c_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar_out_0_c_bits_source; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_bits_source = coherent_jbar_anonOut_c_bits_source; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_out_0_c_bits_address; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_bits_address = coherent_jbar_anonOut_c_bits_address; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_out_0_c_bits_data; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_bits_data = coherent_jbar_anonOut_c_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_out_0_c_bits_corrupt; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_c_bits_corrupt = coherent_jbar_anonOut_c_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_out_0_d_ready; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_d_ready = coherent_jbar_anonOut_d_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_out_0_d_valid = coherent_jbar_anonOut_d_valid; // @[Xbar.scala:216:19]
wire [2:0] coherent_jbar_out_0_d_bits_opcode = coherent_jbar_anonOut_d_bits_opcode; // @[Xbar.scala:216:19]
wire [1:0] coherent_jbar_out_0_d_bits_param = coherent_jbar_anonOut_d_bits_param; // @[Xbar.scala:216:19]
wire [2:0] coherent_jbar_out_0_d_bits_size = coherent_jbar_anonOut_d_bits_size; // @[Xbar.scala:216:19]
wire [5:0] coherent_jbar_out_0_d_bits_source = coherent_jbar_anonOut_d_bits_source; // @[Xbar.scala:216:19]
wire [3:0] coherent_jbar__out_0_d_bits_sink_T = coherent_jbar_anonOut_d_bits_sink; // @[Xbar.scala:251:53]
wire coherent_jbar_out_0_d_bits_denied = coherent_jbar_anonOut_d_bits_denied; // @[Xbar.scala:216:19]
wire [127:0] coherent_jbar_out_0_d_bits_data = coherent_jbar_anonOut_d_bits_data; // @[Xbar.scala:216:19]
wire coherent_jbar_out_0_d_bits_corrupt = coherent_jbar_anonOut_d_bits_corrupt; // @[Xbar.scala:216:19]
wire coherent_jbar_out_0_e_valid; // @[Xbar.scala:216:19]
assign coherent_jbar_auto_anon_out_e_valid = coherent_jbar_anonOut_e_valid; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar__anonOut_e_bits_sink_T; // @[Xbar.scala:156:69]
assign coherent_jbar_auto_anon_out_e_bits_sink = coherent_jbar_anonOut_e_bits_sink; // @[Jbar.scala:44:9]
wire coherent_jbar_in_0_a_ready; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_a_ready = coherent_jbar_anonIn_a_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_in_0_a_valid = coherent_jbar_anonIn_a_valid; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_in_0_a_bits_opcode = coherent_jbar_anonIn_a_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_in_0_a_bits_param = coherent_jbar_anonIn_a_bits_param; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_in_0_a_bits_size = coherent_jbar_anonIn_a_bits_size; // @[Xbar.scala:159:18]
wire [5:0] coherent_jbar__in_0_a_bits_source_T = coherent_jbar_anonIn_a_bits_source; // @[Xbar.scala:166:55]
wire [31:0] coherent_jbar_in_0_a_bits_address = coherent_jbar_anonIn_a_bits_address; // @[Xbar.scala:159:18]
wire [15:0] coherent_jbar_in_0_a_bits_mask = coherent_jbar_anonIn_a_bits_mask; // @[Xbar.scala:159:18]
wire [127:0] coherent_jbar_in_0_a_bits_data = coherent_jbar_anonIn_a_bits_data; // @[Xbar.scala:159:18]
wire coherent_jbar_in_0_a_bits_corrupt = coherent_jbar_anonIn_a_bits_corrupt; // @[Xbar.scala:159:18]
wire coherent_jbar_in_0_b_ready = coherent_jbar_anonIn_b_ready; // @[Xbar.scala:159:18]
wire coherent_jbar_in_0_b_valid; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_b_valid = coherent_jbar_anonIn_b_valid; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_in_0_b_bits_param; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_b_bits_param = coherent_jbar_anonIn_b_bits_param; // @[Jbar.scala:44:9]
wire [31:0] coherent_jbar_in_0_b_bits_address; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_b_bits_address = coherent_jbar_anonIn_b_bits_address; // @[Jbar.scala:44:9]
wire coherent_jbar_in_0_c_ready; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_c_ready = coherent_jbar_anonIn_c_ready; // @[Jbar.scala:44:9]
wire coherent_jbar_in_0_c_valid = coherent_jbar_anonIn_c_valid; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_in_0_c_bits_opcode = coherent_jbar_anonIn_c_bits_opcode; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_in_0_c_bits_param = coherent_jbar_anonIn_c_bits_param; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_in_0_c_bits_size = coherent_jbar_anonIn_c_bits_size; // @[Xbar.scala:159:18]
wire [5:0] coherent_jbar__in_0_c_bits_source_T = coherent_jbar_anonIn_c_bits_source; // @[Xbar.scala:187:55]
wire [31:0] coherent_jbar_in_0_c_bits_address = coherent_jbar_anonIn_c_bits_address; // @[Xbar.scala:159:18]
wire [127:0] coherent_jbar_in_0_c_bits_data = coherent_jbar_anonIn_c_bits_data; // @[Xbar.scala:159:18]
wire coherent_jbar_in_0_c_bits_corrupt = coherent_jbar_anonIn_c_bits_corrupt; // @[Xbar.scala:159:18]
wire coherent_jbar_in_0_d_ready = coherent_jbar_anonIn_d_ready; // @[Xbar.scala:159:18]
wire coherent_jbar_in_0_d_valid; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_valid = coherent_jbar_anonIn_d_valid; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_bits_opcode = coherent_jbar_anonIn_d_bits_opcode; // @[Jbar.scala:44:9]
wire [1:0] coherent_jbar_in_0_d_bits_param; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_bits_param = coherent_jbar_anonIn_d_bits_param; // @[Jbar.scala:44:9]
wire [2:0] coherent_jbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_bits_size = coherent_jbar_anonIn_d_bits_size; // @[Jbar.scala:44:9]
wire [5:0] coherent_jbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign coherent_jbar_auto_anon_in_d_bits_source = coherent_jbar_anonIn_d_bits_source; // @[Jbar.scala:44:9]
wire [3:0] coherent_jbar_in_0_d_bits_sink; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_bits_sink = coherent_jbar_anonIn_d_bits_sink; // @[Jbar.scala:44:9]
wire coherent_jbar_in_0_d_bits_denied; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_bits_denied = coherent_jbar_anonIn_d_bits_denied; // @[Jbar.scala:44:9]
wire [127:0] coherent_jbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_bits_data = coherent_jbar_anonIn_d_bits_data; // @[Jbar.scala:44:9]
wire coherent_jbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
assign coherent_jbar_auto_anon_in_d_bits_corrupt = coherent_jbar_anonIn_d_bits_corrupt; // @[Jbar.scala:44:9]
wire coherent_jbar_in_0_e_valid = coherent_jbar_anonIn_e_valid; // @[Xbar.scala:159:18]
wire [3:0] coherent_jbar_in_0_e_bits_sink = coherent_jbar_anonIn_e_bits_sink; // @[Xbar.scala:159:18]
wire coherent_jbar_portsAOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_a_ready = coherent_jbar_in_0_a_ready; // @[Xbar.scala:159:18]
wire coherent_jbar__portsAOI_filtered_0_valid_T_1 = coherent_jbar_in_0_a_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] coherent_jbar_portsAOI_filtered_0_bits_opcode = coherent_jbar_in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] coherent_jbar_portsAOI_filtered_0_bits_param = coherent_jbar_in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [2:0] coherent_jbar_portsAOI_filtered_0_bits_size = coherent_jbar_in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24]
wire [5:0] coherent_jbar_portsAOI_filtered_0_bits_source = coherent_jbar_in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] coherent_jbar__requestAIO_T = coherent_jbar_in_0_a_bits_address; // @[Xbar.scala:159:18]
wire [31:0] coherent_jbar_portsAOI_filtered_0_bits_address = coherent_jbar_in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [15:0] coherent_jbar_portsAOI_filtered_0_bits_mask = coherent_jbar_in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24]
wire [127:0] coherent_jbar_portsAOI_filtered_0_bits_data = coherent_jbar_in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24]
wire coherent_jbar_portsAOI_filtered_0_bits_corrupt = coherent_jbar_in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire coherent_jbar_portsBIO_filtered_0_ready = coherent_jbar_in_0_b_ready; // @[Xbar.scala:159:18, :352:24]
wire coherent_jbar_portsBIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_b_valid = coherent_jbar_in_0_b_valid; // @[Xbar.scala:159:18]
wire [1:0] coherent_jbar_portsBIO_filtered_0_bits_param; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_b_bits_param = coherent_jbar_in_0_b_bits_param; // @[Xbar.scala:159:18]
wire [31:0] coherent_jbar_portsBIO_filtered_0_bits_address; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_b_bits_address = coherent_jbar_in_0_b_bits_address; // @[Xbar.scala:159:18]
wire coherent_jbar_portsCOI_filtered_0_ready; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_c_ready = coherent_jbar_in_0_c_ready; // @[Xbar.scala:159:18]
wire coherent_jbar__portsCOI_filtered_0_valid_T_1 = coherent_jbar_in_0_c_valid; // @[Xbar.scala:159:18, :355:40]
wire [2:0] coherent_jbar_portsCOI_filtered_0_bits_opcode = coherent_jbar_in_0_c_bits_opcode; // @[Xbar.scala:159:18, :352:24]
wire [2:0] coherent_jbar_portsCOI_filtered_0_bits_param = coherent_jbar_in_0_c_bits_param; // @[Xbar.scala:159:18, :352:24]
wire [2:0] coherent_jbar_portsCOI_filtered_0_bits_size = coherent_jbar_in_0_c_bits_size; // @[Xbar.scala:159:18, :352:24]
wire [5:0] coherent_jbar_portsCOI_filtered_0_bits_source = coherent_jbar_in_0_c_bits_source; // @[Xbar.scala:159:18, :352:24]
wire [31:0] coherent_jbar__requestCIO_T = coherent_jbar_in_0_c_bits_address; // @[Xbar.scala:159:18]
wire [31:0] coherent_jbar_portsCOI_filtered_0_bits_address = coherent_jbar_in_0_c_bits_address; // @[Xbar.scala:159:18, :352:24]
wire [127:0] coherent_jbar_portsCOI_filtered_0_bits_data = coherent_jbar_in_0_c_bits_data; // @[Xbar.scala:159:18, :352:24]
wire coherent_jbar_portsCOI_filtered_0_bits_corrupt = coherent_jbar_in_0_c_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
wire coherent_jbar_portsDIO_filtered_0_ready = coherent_jbar_in_0_d_ready; // @[Xbar.scala:159:18, :352:24]
wire coherent_jbar_portsDIO_filtered_0_valid; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_valid = coherent_jbar_in_0_d_valid; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_bits_opcode = coherent_jbar_in_0_d_bits_opcode; // @[Xbar.scala:159:18]
wire [1:0] coherent_jbar_portsDIO_filtered_0_bits_param; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_bits_param = coherent_jbar_in_0_d_bits_param; // @[Xbar.scala:159:18]
wire [2:0] coherent_jbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_bits_size = coherent_jbar_in_0_d_bits_size; // @[Xbar.scala:159:18]
wire [5:0] coherent_jbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:352:24]
assign coherent_jbar__anonIn_d_bits_source_T = coherent_jbar_in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18]
wire [3:0] coherent_jbar_portsDIO_filtered_0_bits_sink; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_bits_sink = coherent_jbar_in_0_d_bits_sink; // @[Xbar.scala:159:18]
wire coherent_jbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_bits_denied = coherent_jbar_in_0_d_bits_denied; // @[Xbar.scala:159:18]
wire [127:0] coherent_jbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_bits_data = coherent_jbar_in_0_d_bits_data; // @[Xbar.scala:159:18]
wire coherent_jbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:352:24]
assign coherent_jbar_anonIn_d_bits_corrupt = coherent_jbar_in_0_d_bits_corrupt; // @[Xbar.scala:159:18]
wire coherent_jbar__portsEOI_filtered_0_valid_T_1 = coherent_jbar_in_0_e_valid; // @[Xbar.scala:159:18, :355:40]
wire [3:0] coherent_jbar__requestEIO_uncommonBits_T = coherent_jbar_in_0_e_bits_sink; // @[Xbar.scala:159:18]
wire [3:0] coherent_jbar_portsEOI_filtered_0_bits_sink = coherent_jbar_in_0_e_bits_sink; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_a_bits_source = coherent_jbar__in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55]
assign coherent_jbar_in_0_c_bits_source = coherent_jbar__in_0_c_bits_source_T; // @[Xbar.scala:159:18, :187:55]
assign coherent_jbar_anonIn_d_bits_source = coherent_jbar__anonIn_d_bits_source_T; // @[Xbar.scala:156:69]
assign coherent_jbar_portsAOI_filtered_0_ready = coherent_jbar_out_0_a_ready; // @[Xbar.scala:216:19, :352:24]
wire coherent_jbar_portsAOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign coherent_jbar_anonOut_a_valid = coherent_jbar_out_0_a_valid; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_opcode = coherent_jbar_out_0_a_bits_opcode; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_param = coherent_jbar_out_0_a_bits_param; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_size = coherent_jbar_out_0_a_bits_size; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_source = coherent_jbar_out_0_a_bits_source; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_address = coherent_jbar_out_0_a_bits_address; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_mask = coherent_jbar_out_0_a_bits_mask; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_data = coherent_jbar_out_0_a_bits_data; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_a_bits_corrupt = coherent_jbar_out_0_a_bits_corrupt; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_b_ready = coherent_jbar_out_0_b_ready; // @[Xbar.scala:216:19]
wire coherent_jbar__portsBIO_filtered_0_valid_T_1 = coherent_jbar_out_0_b_valid; // @[Xbar.scala:216:19, :355:40]
assign coherent_jbar_portsBIO_filtered_0_bits_param = coherent_jbar_out_0_b_bits_param; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsBIO_filtered_0_bits_address = coherent_jbar_out_0_b_bits_address; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsCOI_filtered_0_ready = coherent_jbar_out_0_c_ready; // @[Xbar.scala:216:19, :352:24]
wire coherent_jbar_portsCOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign coherent_jbar_anonOut_c_valid = coherent_jbar_out_0_c_valid; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_c_bits_opcode = coherent_jbar_out_0_c_bits_opcode; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_c_bits_param = coherent_jbar_out_0_c_bits_param; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_c_bits_size = coherent_jbar_out_0_c_bits_size; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_c_bits_source = coherent_jbar_out_0_c_bits_source; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_c_bits_address = coherent_jbar_out_0_c_bits_address; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_c_bits_data = coherent_jbar_out_0_c_bits_data; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_c_bits_corrupt = coherent_jbar_out_0_c_bits_corrupt; // @[Xbar.scala:216:19]
assign coherent_jbar_anonOut_d_ready = coherent_jbar_out_0_d_ready; // @[Xbar.scala:216:19]
wire coherent_jbar__portsDIO_filtered_0_valid_T_1 = coherent_jbar_out_0_d_valid; // @[Xbar.scala:216:19, :355:40]
assign coherent_jbar_portsDIO_filtered_0_bits_opcode = coherent_jbar_out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsDIO_filtered_0_bits_param = coherent_jbar_out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsDIO_filtered_0_bits_size = coherent_jbar_out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24]
wire [5:0] coherent_jbar__requestDOI_uncommonBits_T = coherent_jbar_out_0_d_bits_source; // @[Xbar.scala:216:19]
assign coherent_jbar_portsDIO_filtered_0_bits_source = coherent_jbar_out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsDIO_filtered_0_bits_sink = coherent_jbar_out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsDIO_filtered_0_bits_denied = coherent_jbar_out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsDIO_filtered_0_bits_data = coherent_jbar_out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsDIO_filtered_0_bits_corrupt = coherent_jbar_out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
wire coherent_jbar_portsEOI_filtered_0_valid; // @[Xbar.scala:352:24]
assign coherent_jbar_anonOut_e_valid = coherent_jbar_out_0_e_valid; // @[Xbar.scala:216:19]
assign coherent_jbar__anonOut_e_bits_sink_T = coherent_jbar_out_0_e_bits_sink; // @[Xbar.scala:156:69, :216:19]
assign coherent_jbar_out_0_d_bits_sink = coherent_jbar__out_0_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53]
assign coherent_jbar_anonOut_e_bits_sink = coherent_jbar__anonOut_e_bits_sink_T; // @[Xbar.scala:156:69]
wire [32:0] coherent_jbar__requestAIO_T_1 = {1'h0, coherent_jbar__requestAIO_T}; // @[Parameters.scala:137:{31,41}]
wire [32:0] coherent_jbar__requestCIO_T_1 = {1'h0, coherent_jbar__requestCIO_T}; // @[Parameters.scala:137:{31,41}]
wire [5:0] coherent_jbar_requestDOI_uncommonBits = coherent_jbar__requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [3:0] coherent_jbar_requestEIO_uncommonBits = coherent_jbar__requestEIO_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [12:0] coherent_jbar__beatsAI_decode_T = 13'h3F << coherent_jbar_in_0_a_bits_size; // @[package.scala:243:71]
wire [5:0] coherent_jbar__beatsAI_decode_T_1 = coherent_jbar__beatsAI_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] coherent_jbar__beatsAI_decode_T_2 = ~coherent_jbar__beatsAI_decode_T_1; // @[package.scala:243:{46,76}]
wire [1:0] coherent_jbar_beatsAI_decode = coherent_jbar__beatsAI_decode_T_2[5:4]; // @[package.scala:243:46]
wire coherent_jbar__beatsAI_opdata_T = coherent_jbar_in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18]
wire coherent_jbar_beatsAI_opdata = ~coherent_jbar__beatsAI_opdata_T; // @[Edges.scala:92:{28,37}]
wire [1:0] coherent_jbar_beatsAI_0 = coherent_jbar_beatsAI_opdata ? coherent_jbar_beatsAI_decode : 2'h0; // @[Edges.scala:92:28, :220:59, :221:14]
wire [12:0] coherent_jbar__beatsCI_decode_T = 13'h3F << coherent_jbar_in_0_c_bits_size; // @[package.scala:243:71]
wire [5:0] coherent_jbar__beatsCI_decode_T_1 = coherent_jbar__beatsCI_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] coherent_jbar__beatsCI_decode_T_2 = ~coherent_jbar__beatsCI_decode_T_1; // @[package.scala:243:{46,76}]
wire [1:0] coherent_jbar_beatsCI_decode = coherent_jbar__beatsCI_decode_T_2[5:4]; // @[package.scala:243:46]
wire coherent_jbar_beatsCI_opdata = coherent_jbar_in_0_c_bits_opcode[0]; // @[Xbar.scala:159:18]
wire [1:0] coherent_jbar_beatsCI_0 = coherent_jbar_beatsCI_opdata ? coherent_jbar_beatsCI_decode : 2'h0; // @[Edges.scala:102:36, :220:59, :221:14]
wire [12:0] coherent_jbar__beatsDO_decode_T = 13'h3F << coherent_jbar_out_0_d_bits_size; // @[package.scala:243:71]
wire [5:0] coherent_jbar__beatsDO_decode_T_1 = coherent_jbar__beatsDO_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] coherent_jbar__beatsDO_decode_T_2 = ~coherent_jbar__beatsDO_decode_T_1; // @[package.scala:243:{46,76}]
wire [1:0] coherent_jbar_beatsDO_decode = coherent_jbar__beatsDO_decode_T_2[5:4]; // @[package.scala:243:46]
wire coherent_jbar_beatsDO_opdata = coherent_jbar_out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19]
wire [1:0] coherent_jbar_beatsDO_0 = coherent_jbar_beatsDO_opdata ? coherent_jbar_beatsDO_decode : 2'h0; // @[Edges.scala:106:36, :220:59, :221:14]
assign coherent_jbar_in_0_a_ready = coherent_jbar_portsAOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_out_0_a_valid = coherent_jbar_portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_opcode = coherent_jbar_portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_param = coherent_jbar_portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_size = coherent_jbar_portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_source = coherent_jbar_portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_address = coherent_jbar_portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_mask = coherent_jbar_portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_data = coherent_jbar_portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_a_bits_corrupt = coherent_jbar_portsAOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsAOI_filtered_0_valid = coherent_jbar__portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign coherent_jbar_out_0_b_ready = coherent_jbar_portsBIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_in_0_b_valid = coherent_jbar_portsBIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_b_bits_param = coherent_jbar_portsBIO_filtered_0_bits_param; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_b_bits_address = coherent_jbar_portsBIO_filtered_0_bits_address; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_portsBIO_filtered_0_valid = coherent_jbar__portsBIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign coherent_jbar_in_0_c_ready = coherent_jbar_portsCOI_filtered_0_ready; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_out_0_c_valid = coherent_jbar_portsCOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_c_bits_opcode = coherent_jbar_portsCOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_c_bits_param = coherent_jbar_portsCOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_c_bits_size = coherent_jbar_portsCOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_c_bits_source = coherent_jbar_portsCOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_c_bits_address = coherent_jbar_portsCOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_c_bits_data = coherent_jbar_portsCOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_c_bits_corrupt = coherent_jbar_portsCOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsCOI_filtered_0_valid = coherent_jbar__portsCOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign coherent_jbar_out_0_d_ready = coherent_jbar_portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_in_0_d_valid = coherent_jbar_portsDIO_filtered_0_valid; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_opcode = coherent_jbar_portsDIO_filtered_0_bits_opcode; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_param = coherent_jbar_portsDIO_filtered_0_bits_param; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_size = coherent_jbar_portsDIO_filtered_0_bits_size; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_source = coherent_jbar_portsDIO_filtered_0_bits_source; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_sink = coherent_jbar_portsDIO_filtered_0_bits_sink; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_denied = coherent_jbar_portsDIO_filtered_0_bits_denied; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_data = coherent_jbar_portsDIO_filtered_0_bits_data; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_in_0_d_bits_corrupt = coherent_jbar_portsDIO_filtered_0_bits_corrupt; // @[Xbar.scala:159:18, :352:24]
assign coherent_jbar_portsDIO_filtered_0_valid = coherent_jbar__portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
assign coherent_jbar_out_0_e_valid = coherent_jbar_portsEOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_out_0_e_bits_sink = coherent_jbar_portsEOI_filtered_0_bits_sink; // @[Xbar.scala:216:19, :352:24]
assign coherent_jbar_portsEOI_filtered_0_valid = coherent_jbar__portsEOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_a_valid = coupler_to_bus_named_mbus_auto_widget_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_opcode = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_param = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_size = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_source = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_address = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_mask = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_data = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_corrupt = coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_d_ready = coupler_to_bus_named_mbus_auto_widget_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_bus_xingOut_a_ready = coupler_to_bus_named_mbus_auto_bus_xing_out_a_ready; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_mbus_bus_xingOut_a_valid; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_valid_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_valid; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_mbus_bus_xingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_opcode_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_opcode; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_mbus_bus_xingOut_a_bits_param; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_param_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_param; // @[ClockDomain.scala:14:9]
wire [2:0] coupler_to_bus_named_mbus_bus_xingOut_a_bits_size; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_size_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_size; // @[ClockDomain.scala:14:9]
wire [4:0] coupler_to_bus_named_mbus_bus_xingOut_a_bits_source; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_source_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_source; // @[ClockDomain.scala:14:9]
wire [31:0] coupler_to_bus_named_mbus_bus_xingOut_a_bits_address; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_address_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_address; // @[ClockDomain.scala:14:9]
wire [7:0] coupler_to_bus_named_mbus_bus_xingOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_mask_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_mask; // @[ClockDomain.scala:14:9]
wire [63:0] coupler_to_bus_named_mbus_bus_xingOut_a_bits_data; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_data_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_data; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_mbus_bus_xingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_corrupt_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_corrupt; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_mbus_bus_xingOut_d_ready; // @[MixedNode.scala:542:17]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_d_ready_0 = coupler_to_bus_named_mbus_auto_bus_xing_out_d_ready; // @[ClockDomain.scala:14:9]
wire coupler_to_bus_named_mbus_bus_xingOut_d_valid = coupler_to_bus_named_mbus_auto_bus_xing_out_d_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_mbus_bus_xingOut_d_bits_opcode = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_opcode; // @[MixedNode.scala:542:17]
wire [1:0] coupler_to_bus_named_mbus_bus_xingOut_d_bits_param = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_mbus_bus_xingOut_d_bits_size = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] coupler_to_bus_named_mbus_bus_xingOut_d_bits_source = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_source; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_mbus_bus_xingOut_d_bits_sink = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_sink; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_mbus_bus_xingOut_d_bits_denied = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_denied; // @[MixedNode.scala:542:17]
wire [63:0] coupler_to_bus_named_mbus_bus_xingOut_d_bits_data = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_data; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_mbus_bus_xingOut_d_bits_corrupt = coupler_to_bus_named_mbus_auto_bus_xing_out_d_bits_corrupt; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_a_ready; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [1:0] coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire [4:0] coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_sink; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_denied; // @[LazyModuleImp.scala:138:7]
wire [63:0] coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_auto_widget_anon_in_d_valid; // @[LazyModuleImp.scala:138:7]
wire coupler_to_bus_named_mbus_widget_anonIn_a_ready; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_a_ready = coupler_to_bus_named_mbus_widget_auto_anon_in_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonIn_a_valid = coupler_to_bus_named_mbus_widget_auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_anonIn_a_bits_opcode = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_anonIn_a_bits_param = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_anonIn_a_bits_size = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_widget_anonIn_a_bits_source = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_mbus_widget_anonIn_a_bits_address = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_mbus_widget_anonIn_a_bits_mask = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_widget_anonIn_a_bits_data = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonIn_a_bits_corrupt = coupler_to_bus_named_mbus_widget_auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonIn_d_ready = coupler_to_bus_named_mbus_widget_auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonIn_d_valid; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_valid = coupler_to_bus_named_mbus_widget_auto_anon_in_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_opcode = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_mbus_widget_anonIn_d_bits_param; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_param = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_anonIn_d_bits_size; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_size = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_widget_anonIn_d_bits_source; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_source = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_sink = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_denied = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_widget_anonIn_d_bits_data; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_data = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
assign coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_corrupt = coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_bus_xingIn_a_ready; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_mbus_widget_anonOut_a_ready = coupler_to_bus_named_mbus_widget_auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_mbus_widget_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_mbus_bus_xingIn_a_valid = coupler_to_bus_named_mbus_widget_auto_anon_out_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_mbus_bus_xingIn_a_bits_opcode = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_widget_anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_mbus_bus_xingIn_a_bits_param = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_param; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_widget_anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [2:0] coupler_to_bus_named_mbus_bus_xingIn_a_bits_size = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_size; // @[WidthWidget.scala:27:9]
wire [31:0] coupler_to_bus_named_mbus_widget_anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [4:0] coupler_to_bus_named_mbus_bus_xingIn_a_bits_source = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_source; // @[WidthWidget.scala:27:9]
wire [7:0] coupler_to_bus_named_mbus_widget_anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [31:0] coupler_to_bus_named_mbus_bus_xingIn_a_bits_address = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_address; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_widget_anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire [7:0] coupler_to_bus_named_mbus_bus_xingIn_a_bits_mask = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_mask; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire [63:0] coupler_to_bus_named_mbus_bus_xingIn_a_bits_data = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_widget_anonOut_d_ready; // @[MixedNode.scala:542:17]
wire coupler_to_bus_named_mbus_bus_xingIn_a_bits_corrupt = coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_bus_xingIn_d_ready = coupler_to_bus_named_mbus_widget_auto_anon_out_d_ready; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_bus_xingIn_d_valid; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_mbus_widget_anonOut_d_valid = coupler_to_bus_named_mbus_widget_auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_bus_xingIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_bus_named_mbus_widget_anonOut_d_bits_opcode = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] coupler_to_bus_named_mbus_bus_xingIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [1:0] coupler_to_bus_named_mbus_widget_anonOut_d_bits_param = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [2:0] coupler_to_bus_named_mbus_bus_xingIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [2:0] coupler_to_bus_named_mbus_widget_anonOut_d_bits_size = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [4:0] coupler_to_bus_named_mbus_bus_xingIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [4:0] coupler_to_bus_named_mbus_widget_anonOut_d_bits_source = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_bus_xingIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_mbus_widget_anonOut_d_bits_sink = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_bus_xingIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_mbus_widget_anonOut_d_bits_denied = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] coupler_to_bus_named_mbus_bus_xingIn_d_bits_data; // @[MixedNode.scala:551:17]
wire [63:0] coupler_to_bus_named_mbus_widget_anonOut_d_bits_data = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire coupler_to_bus_named_mbus_bus_xingIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire coupler_to_bus_named_mbus_widget_anonOut_d_bits_corrupt = coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_anonIn_a_ready = coupler_to_bus_named_mbus_widget_anonOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_valid = coupler_to_bus_named_mbus_widget_anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_opcode = coupler_to_bus_named_mbus_widget_anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_param = coupler_to_bus_named_mbus_widget_anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_size = coupler_to_bus_named_mbus_widget_anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_source = coupler_to_bus_named_mbus_widget_anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_address = coupler_to_bus_named_mbus_widget_anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_mask = coupler_to_bus_named_mbus_widget_anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_data = coupler_to_bus_named_mbus_widget_anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_bits_corrupt = coupler_to_bus_named_mbus_widget_anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_ready = coupler_to_bus_named_mbus_widget_anonOut_d_ready; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_anonIn_d_valid = coupler_to_bus_named_mbus_widget_anonOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_opcode = coupler_to_bus_named_mbus_widget_anonOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_param = coupler_to_bus_named_mbus_widget_anonOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_size = coupler_to_bus_named_mbus_widget_anonOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_source = coupler_to_bus_named_mbus_widget_anonOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_sink = coupler_to_bus_named_mbus_widget_anonOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_denied = coupler_to_bus_named_mbus_widget_anonOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_data = coupler_to_bus_named_mbus_widget_anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonIn_d_bits_corrupt = coupler_to_bus_named_mbus_widget_anonOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_a_ready = coupler_to_bus_named_mbus_widget_anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_anonOut_a_valid = coupler_to_bus_named_mbus_widget_anonIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_opcode = coupler_to_bus_named_mbus_widget_anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_param = coupler_to_bus_named_mbus_widget_anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_size = coupler_to_bus_named_mbus_widget_anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_source = coupler_to_bus_named_mbus_widget_anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_address = coupler_to_bus_named_mbus_widget_anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_mask = coupler_to_bus_named_mbus_widget_anonIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_data = coupler_to_bus_named_mbus_widget_anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_a_bits_corrupt = coupler_to_bus_named_mbus_widget_anonIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_anonOut_d_ready = coupler_to_bus_named_mbus_widget_anonIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_valid = coupler_to_bus_named_mbus_widget_anonIn_d_valid; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_opcode = coupler_to_bus_named_mbus_widget_anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_param = coupler_to_bus_named_mbus_widget_anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_size = coupler_to_bus_named_mbus_widget_anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_source = coupler_to_bus_named_mbus_widget_anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_sink = coupler_to_bus_named_mbus_widget_anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_denied = coupler_to_bus_named_mbus_widget_anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_data = coupler_to_bus_named_mbus_widget_anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_in_d_bits_corrupt = coupler_to_bus_named_mbus_widget_anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_bus_xingIn_a_ready = coupler_to_bus_named_mbus_bus_xingOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_valid = coupler_to_bus_named_mbus_bus_xingOut_a_valid; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_opcode = coupler_to_bus_named_mbus_bus_xingOut_a_bits_opcode; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_param = coupler_to_bus_named_mbus_bus_xingOut_a_bits_param; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_size = coupler_to_bus_named_mbus_bus_xingOut_a_bits_size; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_source = coupler_to_bus_named_mbus_bus_xingOut_a_bits_source; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_address = coupler_to_bus_named_mbus_bus_xingOut_a_bits_address; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_mask = coupler_to_bus_named_mbus_bus_xingOut_a_bits_mask; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_data = coupler_to_bus_named_mbus_bus_xingOut_a_bits_data; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_a_bits_corrupt = coupler_to_bus_named_mbus_bus_xingOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_auto_bus_xing_out_d_ready = coupler_to_bus_named_mbus_bus_xingOut_d_ready; // @[MixedNode.scala:542:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_valid = coupler_to_bus_named_mbus_bus_xingOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_opcode = coupler_to_bus_named_mbus_bus_xingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_param = coupler_to_bus_named_mbus_bus_xingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_size = coupler_to_bus_named_mbus_bus_xingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_source = coupler_to_bus_named_mbus_bus_xingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_sink = coupler_to_bus_named_mbus_bus_xingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_denied = coupler_to_bus_named_mbus_bus_xingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_data = coupler_to_bus_named_mbus_bus_xingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingIn_d_bits_corrupt = coupler_to_bus_named_mbus_bus_xingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_a_ready = coupler_to_bus_named_mbus_bus_xingIn_a_ready; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_bus_xingOut_a_valid = coupler_to_bus_named_mbus_bus_xingIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_opcode = coupler_to_bus_named_mbus_bus_xingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_param = coupler_to_bus_named_mbus_bus_xingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_size = coupler_to_bus_named_mbus_bus_xingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_source = coupler_to_bus_named_mbus_bus_xingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_address = coupler_to_bus_named_mbus_bus_xingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_mask = coupler_to_bus_named_mbus_bus_xingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_data = coupler_to_bus_named_mbus_bus_xingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_a_bits_corrupt = coupler_to_bus_named_mbus_bus_xingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_bus_xingOut_d_ready = coupler_to_bus_named_mbus_bus_xingIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_valid = coupler_to_bus_named_mbus_bus_xingIn_d_valid; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_opcode = coupler_to_bus_named_mbus_bus_xingIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_param = coupler_to_bus_named_mbus_bus_xingIn_d_bits_param; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_size = coupler_to_bus_named_mbus_bus_xingIn_d_bits_size; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_source = coupler_to_bus_named_mbus_bus_xingIn_d_bits_source; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_sink = coupler_to_bus_named_mbus_bus_xingIn_d_bits_sink; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_denied = coupler_to_bus_named_mbus_bus_xingIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_data = coupler_to_bus_named_mbus_bus_xingIn_d_bits_data; // @[WidthWidget.scala:27:9]
assign coupler_to_bus_named_mbus_widget_auto_anon_out_d_bits_corrupt = coupler_to_bus_named_mbus_bus_xingIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
assign childClock = clockSinkNodeIn_clock; // @[MixedNode.scala:551:17]
assign childReset = clockSinkNodeIn_reset; // @[MixedNode.scala:551:17]
InclusiveCache l2 ( // @[Configs.scala:93:24]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_ctrls_ctrl_in_a_ready (auto_l2_ctrls_ctrl_in_a_ready_0),
.auto_ctrls_ctrl_in_a_valid (auto_l2_ctrls_ctrl_in_a_valid_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_opcode (auto_l2_ctrls_ctrl_in_a_bits_opcode_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_param (auto_l2_ctrls_ctrl_in_a_bits_param_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_size (auto_l2_ctrls_ctrl_in_a_bits_size_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_source (auto_l2_ctrls_ctrl_in_a_bits_source_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_address (auto_l2_ctrls_ctrl_in_a_bits_address_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_mask (auto_l2_ctrls_ctrl_in_a_bits_mask_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_data (auto_l2_ctrls_ctrl_in_a_bits_data_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_a_bits_corrupt (auto_l2_ctrls_ctrl_in_a_bits_corrupt_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_d_ready (auto_l2_ctrls_ctrl_in_d_ready_0), // @[ClockDomain.scala:14:9]
.auto_ctrls_ctrl_in_d_valid (auto_l2_ctrls_ctrl_in_d_valid_0),
.auto_ctrls_ctrl_in_d_bits_opcode (auto_l2_ctrls_ctrl_in_d_bits_opcode_0),
.auto_ctrls_ctrl_in_d_bits_size (auto_l2_ctrls_ctrl_in_d_bits_size_0),
.auto_ctrls_ctrl_in_d_bits_source (auto_l2_ctrls_ctrl_in_d_bits_source_0),
.auto_ctrls_ctrl_in_d_bits_data (auto_l2_ctrls_ctrl_in_d_bits_data_0),
.auto_in_a_ready (_l2_auto_in_a_ready),
.auto_in_a_valid (_InclusiveCache_inner_TLBuffer_auto_out_a_valid), // @[Parameters.scala:56:69]
.auto_in_a_bits_opcode (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_opcode), // @[Parameters.scala:56:69]
.auto_in_a_bits_param (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_param), // @[Parameters.scala:56:69]
.auto_in_a_bits_size (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_size), // @[Parameters.scala:56:69]
.auto_in_a_bits_source (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_source), // @[Parameters.scala:56:69]
.auto_in_a_bits_address (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_address), // @[Parameters.scala:56:69]
.auto_in_a_bits_mask (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_mask), // @[Parameters.scala:56:69]
.auto_in_a_bits_data (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_data), // @[Parameters.scala:56:69]
.auto_in_a_bits_corrupt (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_corrupt), // @[Parameters.scala:56:69]
.auto_in_b_ready (_InclusiveCache_inner_TLBuffer_auto_out_b_ready), // @[Parameters.scala:56:69]
.auto_in_b_valid (_l2_auto_in_b_valid),
.auto_in_b_bits_param (_l2_auto_in_b_bits_param),
.auto_in_b_bits_address (_l2_auto_in_b_bits_address),
.auto_in_c_ready (_l2_auto_in_c_ready),
.auto_in_c_valid (_InclusiveCache_inner_TLBuffer_auto_out_c_valid), // @[Parameters.scala:56:69]
.auto_in_c_bits_opcode (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_opcode), // @[Parameters.scala:56:69]
.auto_in_c_bits_param (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_param), // @[Parameters.scala:56:69]
.auto_in_c_bits_size (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_size), // @[Parameters.scala:56:69]
.auto_in_c_bits_source (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_source), // @[Parameters.scala:56:69]
.auto_in_c_bits_address (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_address), // @[Parameters.scala:56:69]
.auto_in_c_bits_data (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_data), // @[Parameters.scala:56:69]
.auto_in_c_bits_corrupt (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_corrupt), // @[Parameters.scala:56:69]
.auto_in_d_ready (_InclusiveCache_inner_TLBuffer_auto_out_d_ready), // @[Parameters.scala:56:69]
.auto_in_d_valid (_l2_auto_in_d_valid),
.auto_in_d_bits_opcode (_l2_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_l2_auto_in_d_bits_param),
.auto_in_d_bits_size (_l2_auto_in_d_bits_size),
.auto_in_d_bits_source (_l2_auto_in_d_bits_source),
.auto_in_d_bits_sink (_l2_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_l2_auto_in_d_bits_denied),
.auto_in_d_bits_data (_l2_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_l2_auto_in_d_bits_corrupt),
.auto_in_e_valid (_InclusiveCache_inner_TLBuffer_auto_out_e_valid), // @[Parameters.scala:56:69]
.auto_in_e_bits_sink (_InclusiveCache_inner_TLBuffer_auto_out_e_bits_sink), // @[Parameters.scala:56:69]
.auto_out_a_ready (InclusiveCache_outer_TLBuffer_auto_in_a_ready), // @[Buffer.scala:40:9]
.auto_out_a_valid (InclusiveCache_outer_TLBuffer_auto_in_a_valid),
.auto_out_a_bits_opcode (InclusiveCache_outer_TLBuffer_auto_in_a_bits_opcode),
.auto_out_a_bits_param (InclusiveCache_outer_TLBuffer_auto_in_a_bits_param),
.auto_out_a_bits_size (InclusiveCache_outer_TLBuffer_auto_in_a_bits_size),
.auto_out_a_bits_source (InclusiveCache_outer_TLBuffer_auto_in_a_bits_source),
.auto_out_a_bits_address (InclusiveCache_outer_TLBuffer_auto_in_a_bits_address),
.auto_out_a_bits_mask (InclusiveCache_outer_TLBuffer_auto_in_a_bits_mask),
.auto_out_a_bits_data (InclusiveCache_outer_TLBuffer_auto_in_a_bits_data),
.auto_out_a_bits_corrupt (InclusiveCache_outer_TLBuffer_auto_in_a_bits_corrupt),
.auto_out_c_ready (InclusiveCache_outer_TLBuffer_auto_in_c_ready), // @[Buffer.scala:40:9]
.auto_out_c_valid (InclusiveCache_outer_TLBuffer_auto_in_c_valid),
.auto_out_c_bits_opcode (InclusiveCache_outer_TLBuffer_auto_in_c_bits_opcode),
.auto_out_c_bits_param (InclusiveCache_outer_TLBuffer_auto_in_c_bits_param),
.auto_out_c_bits_size (InclusiveCache_outer_TLBuffer_auto_in_c_bits_size),
.auto_out_c_bits_source (InclusiveCache_outer_TLBuffer_auto_in_c_bits_source),
.auto_out_c_bits_address (InclusiveCache_outer_TLBuffer_auto_in_c_bits_address),
.auto_out_c_bits_data (InclusiveCache_outer_TLBuffer_auto_in_c_bits_data),
.auto_out_c_bits_corrupt (InclusiveCache_outer_TLBuffer_auto_in_c_bits_corrupt),
.auto_out_d_ready (InclusiveCache_outer_TLBuffer_auto_in_d_ready),
.auto_out_d_valid (InclusiveCache_outer_TLBuffer_auto_in_d_valid), // @[Buffer.scala:40:9]
.auto_out_d_bits_opcode (InclusiveCache_outer_TLBuffer_auto_in_d_bits_opcode), // @[Buffer.scala:40:9]
.auto_out_d_bits_param (InclusiveCache_outer_TLBuffer_auto_in_d_bits_param), // @[Buffer.scala:40:9]
.auto_out_d_bits_size (InclusiveCache_outer_TLBuffer_auto_in_d_bits_size), // @[Buffer.scala:40:9]
.auto_out_d_bits_source (InclusiveCache_outer_TLBuffer_auto_in_d_bits_source), // @[Buffer.scala:40:9]
.auto_out_d_bits_sink (InclusiveCache_outer_TLBuffer_auto_in_d_bits_sink), // @[Buffer.scala:40:9]
.auto_out_d_bits_denied (InclusiveCache_outer_TLBuffer_auto_in_d_bits_denied), // @[Buffer.scala:40:9]
.auto_out_d_bits_data (InclusiveCache_outer_TLBuffer_auto_in_d_bits_data), // @[Buffer.scala:40:9]
.auto_out_d_bits_corrupt (InclusiveCache_outer_TLBuffer_auto_in_d_bits_corrupt), // @[Buffer.scala:40:9]
.auto_out_e_valid (InclusiveCache_outer_TLBuffer_auto_in_e_valid),
.auto_out_e_bits_sink (InclusiveCache_outer_TLBuffer_auto_in_e_bits_sink)
); // @[Configs.scala:93:24]
TLBuffer_a32d128s6k4z3c InclusiveCache_inner_TLBuffer ( // @[Parameters.scala:56:69]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_in_a_ready (filter_auto_anon_out_a_ready),
.auto_in_a_valid (filter_auto_anon_out_a_valid), // @[Filter.scala:60:9]
.auto_in_a_bits_opcode (filter_auto_anon_out_a_bits_opcode), // @[Filter.scala:60:9]
.auto_in_a_bits_param (filter_auto_anon_out_a_bits_param), // @[Filter.scala:60:9]
.auto_in_a_bits_size (filter_auto_anon_out_a_bits_size), // @[Filter.scala:60:9]
.auto_in_a_bits_source (filter_auto_anon_out_a_bits_source), // @[Filter.scala:60:9]
.auto_in_a_bits_address (filter_auto_anon_out_a_bits_address), // @[Filter.scala:60:9]
.auto_in_a_bits_mask (filter_auto_anon_out_a_bits_mask), // @[Filter.scala:60:9]
.auto_in_a_bits_data (filter_auto_anon_out_a_bits_data), // @[Filter.scala:60:9]
.auto_in_a_bits_corrupt (filter_auto_anon_out_a_bits_corrupt), // @[Filter.scala:60:9]
.auto_in_b_ready (filter_auto_anon_out_b_ready), // @[Filter.scala:60:9]
.auto_in_b_valid (filter_auto_anon_out_b_valid),
.auto_in_b_bits_param (filter_auto_anon_out_b_bits_param),
.auto_in_b_bits_address (filter_auto_anon_out_b_bits_address),
.auto_in_c_ready (filter_auto_anon_out_c_ready),
.auto_in_c_valid (filter_auto_anon_out_c_valid), // @[Filter.scala:60:9]
.auto_in_c_bits_opcode (filter_auto_anon_out_c_bits_opcode), // @[Filter.scala:60:9]
.auto_in_c_bits_param (filter_auto_anon_out_c_bits_param), // @[Filter.scala:60:9]
.auto_in_c_bits_size (filter_auto_anon_out_c_bits_size), // @[Filter.scala:60:9]
.auto_in_c_bits_source (filter_auto_anon_out_c_bits_source), // @[Filter.scala:60:9]
.auto_in_c_bits_address (filter_auto_anon_out_c_bits_address), // @[Filter.scala:60:9]
.auto_in_c_bits_data (filter_auto_anon_out_c_bits_data), // @[Filter.scala:60:9]
.auto_in_c_bits_corrupt (filter_auto_anon_out_c_bits_corrupt), // @[Filter.scala:60:9]
.auto_in_d_ready (filter_auto_anon_out_d_ready), // @[Filter.scala:60:9]
.auto_in_d_valid (filter_auto_anon_out_d_valid),
.auto_in_d_bits_opcode (filter_auto_anon_out_d_bits_opcode),
.auto_in_d_bits_param (filter_auto_anon_out_d_bits_param),
.auto_in_d_bits_size (filter_auto_anon_out_d_bits_size),
.auto_in_d_bits_source (filter_auto_anon_out_d_bits_source),
.auto_in_d_bits_sink (filter_auto_anon_out_d_bits_sink),
.auto_in_d_bits_denied (filter_auto_anon_out_d_bits_denied),
.auto_in_d_bits_data (filter_auto_anon_out_d_bits_data),
.auto_in_d_bits_corrupt (filter_auto_anon_out_d_bits_corrupt),
.auto_in_e_valid (filter_auto_anon_out_e_valid), // @[Filter.scala:60:9]
.auto_in_e_bits_sink (filter_auto_anon_out_e_bits_sink), // @[Filter.scala:60:9]
.auto_out_a_ready (_l2_auto_in_a_ready), // @[Configs.scala:93:24]
.auto_out_a_valid (_InclusiveCache_inner_TLBuffer_auto_out_a_valid),
.auto_out_a_bits_opcode (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_opcode),
.auto_out_a_bits_param (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_param),
.auto_out_a_bits_size (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_size),
.auto_out_a_bits_source (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_source),
.auto_out_a_bits_address (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_address),
.auto_out_a_bits_mask (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_mask),
.auto_out_a_bits_data (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_data),
.auto_out_a_bits_corrupt (_InclusiveCache_inner_TLBuffer_auto_out_a_bits_corrupt),
.auto_out_b_ready (_InclusiveCache_inner_TLBuffer_auto_out_b_ready),
.auto_out_b_valid (_l2_auto_in_b_valid), // @[Configs.scala:93:24]
.auto_out_b_bits_param (_l2_auto_in_b_bits_param), // @[Configs.scala:93:24]
.auto_out_b_bits_address (_l2_auto_in_b_bits_address), // @[Configs.scala:93:24]
.auto_out_c_ready (_l2_auto_in_c_ready), // @[Configs.scala:93:24]
.auto_out_c_valid (_InclusiveCache_inner_TLBuffer_auto_out_c_valid),
.auto_out_c_bits_opcode (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_opcode),
.auto_out_c_bits_param (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_param),
.auto_out_c_bits_size (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_size),
.auto_out_c_bits_source (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_source),
.auto_out_c_bits_address (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_address),
.auto_out_c_bits_data (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_data),
.auto_out_c_bits_corrupt (_InclusiveCache_inner_TLBuffer_auto_out_c_bits_corrupt),
.auto_out_d_ready (_InclusiveCache_inner_TLBuffer_auto_out_d_ready),
.auto_out_d_valid (_l2_auto_in_d_valid), // @[Configs.scala:93:24]
.auto_out_d_bits_opcode (_l2_auto_in_d_bits_opcode), // @[Configs.scala:93:24]
.auto_out_d_bits_param (_l2_auto_in_d_bits_param), // @[Configs.scala:93:24]
.auto_out_d_bits_size (_l2_auto_in_d_bits_size), // @[Configs.scala:93:24]
.auto_out_d_bits_source (_l2_auto_in_d_bits_source), // @[Configs.scala:93:24]
.auto_out_d_bits_sink (_l2_auto_in_d_bits_sink), // @[Configs.scala:93:24]
.auto_out_d_bits_denied (_l2_auto_in_d_bits_denied), // @[Configs.scala:93:24]
.auto_out_d_bits_data (_l2_auto_in_d_bits_data), // @[Configs.scala:93:24]
.auto_out_d_bits_corrupt (_l2_auto_in_d_bits_corrupt), // @[Configs.scala:93:24]
.auto_out_e_valid (_InclusiveCache_inner_TLBuffer_auto_out_e_valid),
.auto_out_e_bits_sink (_InclusiveCache_inner_TLBuffer_auto_out_e_bits_sink)
); // @[Parameters.scala:56:69]
TLCacheCork cork ( // @[Configs.scala:120:26]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_in_a_ready (InclusiveCache_outer_TLBuffer_auto_out_a_ready),
.auto_in_a_valid (InclusiveCache_outer_TLBuffer_auto_out_a_valid), // @[Buffer.scala:40:9]
.auto_in_a_bits_opcode (InclusiveCache_outer_TLBuffer_auto_out_a_bits_opcode), // @[Buffer.scala:40:9]
.auto_in_a_bits_param (InclusiveCache_outer_TLBuffer_auto_out_a_bits_param), // @[Buffer.scala:40:9]
.auto_in_a_bits_size (InclusiveCache_outer_TLBuffer_auto_out_a_bits_size), // @[Buffer.scala:40:9]
.auto_in_a_bits_source (InclusiveCache_outer_TLBuffer_auto_out_a_bits_source), // @[Buffer.scala:40:9]
.auto_in_a_bits_address (InclusiveCache_outer_TLBuffer_auto_out_a_bits_address), // @[Buffer.scala:40:9]
.auto_in_a_bits_mask (InclusiveCache_outer_TLBuffer_auto_out_a_bits_mask), // @[Buffer.scala:40:9]
.auto_in_a_bits_data (InclusiveCache_outer_TLBuffer_auto_out_a_bits_data), // @[Buffer.scala:40:9]
.auto_in_a_bits_corrupt (InclusiveCache_outer_TLBuffer_auto_out_a_bits_corrupt), // @[Buffer.scala:40:9]
.auto_in_c_ready (InclusiveCache_outer_TLBuffer_auto_out_c_ready),
.auto_in_c_valid (InclusiveCache_outer_TLBuffer_auto_out_c_valid), // @[Buffer.scala:40:9]
.auto_in_c_bits_opcode (InclusiveCache_outer_TLBuffer_auto_out_c_bits_opcode), // @[Buffer.scala:40:9]
.auto_in_c_bits_param (InclusiveCache_outer_TLBuffer_auto_out_c_bits_param), // @[Buffer.scala:40:9]
.auto_in_c_bits_size (InclusiveCache_outer_TLBuffer_auto_out_c_bits_size), // @[Buffer.scala:40:9]
.auto_in_c_bits_source (InclusiveCache_outer_TLBuffer_auto_out_c_bits_source), // @[Buffer.scala:40:9]
.auto_in_c_bits_address (InclusiveCache_outer_TLBuffer_auto_out_c_bits_address), // @[Buffer.scala:40:9]
.auto_in_c_bits_data (InclusiveCache_outer_TLBuffer_auto_out_c_bits_data), // @[Buffer.scala:40:9]
.auto_in_c_bits_corrupt (InclusiveCache_outer_TLBuffer_auto_out_c_bits_corrupt), // @[Buffer.scala:40:9]
.auto_in_d_ready (InclusiveCache_outer_TLBuffer_auto_out_d_ready), // @[Buffer.scala:40:9]
.auto_in_d_valid (InclusiveCache_outer_TLBuffer_auto_out_d_valid),
.auto_in_d_bits_opcode (InclusiveCache_outer_TLBuffer_auto_out_d_bits_opcode),
.auto_in_d_bits_param (InclusiveCache_outer_TLBuffer_auto_out_d_bits_param),
.auto_in_d_bits_size (InclusiveCache_outer_TLBuffer_auto_out_d_bits_size),
.auto_in_d_bits_source (InclusiveCache_outer_TLBuffer_auto_out_d_bits_source),
.auto_in_d_bits_sink (InclusiveCache_outer_TLBuffer_auto_out_d_bits_sink),
.auto_in_d_bits_denied (InclusiveCache_outer_TLBuffer_auto_out_d_bits_denied),
.auto_in_d_bits_data (InclusiveCache_outer_TLBuffer_auto_out_d_bits_data),
.auto_in_d_bits_corrupt (InclusiveCache_outer_TLBuffer_auto_out_d_bits_corrupt),
.auto_in_e_valid (InclusiveCache_outer_TLBuffer_auto_out_e_valid), // @[Buffer.scala:40:9]
.auto_in_e_bits_sink (InclusiveCache_outer_TLBuffer_auto_out_e_bits_sink), // @[Buffer.scala:40:9]
.auto_out_a_ready (_binder_auto_in_a_ready), // @[BankBinder.scala:71:28]
.auto_out_a_valid (_cork_auto_out_a_valid),
.auto_out_a_bits_opcode (_cork_auto_out_a_bits_opcode),
.auto_out_a_bits_param (_cork_auto_out_a_bits_param),
.auto_out_a_bits_size (_cork_auto_out_a_bits_size),
.auto_out_a_bits_source (_cork_auto_out_a_bits_source),
.auto_out_a_bits_address (_cork_auto_out_a_bits_address),
.auto_out_a_bits_mask (_cork_auto_out_a_bits_mask),
.auto_out_a_bits_data (_cork_auto_out_a_bits_data),
.auto_out_a_bits_corrupt (_cork_auto_out_a_bits_corrupt),
.auto_out_d_ready (_cork_auto_out_d_ready),
.auto_out_d_valid (_binder_auto_in_d_valid), // @[BankBinder.scala:71:28]
.auto_out_d_bits_opcode (_binder_auto_in_d_bits_opcode), // @[BankBinder.scala:71:28]
.auto_out_d_bits_param (_binder_auto_in_d_bits_param), // @[BankBinder.scala:71:28]
.auto_out_d_bits_size (_binder_auto_in_d_bits_size), // @[BankBinder.scala:71:28]
.auto_out_d_bits_source (_binder_auto_in_d_bits_source), // @[BankBinder.scala:71:28]
.auto_out_d_bits_sink (_binder_auto_in_d_bits_sink), // @[BankBinder.scala:71:28]
.auto_out_d_bits_denied (_binder_auto_in_d_bits_denied), // @[BankBinder.scala:71:28]
.auto_out_d_bits_data (_binder_auto_in_d_bits_data), // @[BankBinder.scala:71:28]
.auto_out_d_bits_corrupt (_binder_auto_in_d_bits_corrupt) // @[BankBinder.scala:71:28]
); // @[Configs.scala:120:26]
BankBinder binder ( // @[BankBinder.scala:71:28]
.clock (childClock), // @[LazyModuleImp.scala:155:31]
.reset (childReset), // @[LazyModuleImp.scala:158:31]
.auto_in_a_ready (_binder_auto_in_a_ready),
.auto_in_a_valid (_cork_auto_out_a_valid), // @[Configs.scala:120:26]
.auto_in_a_bits_opcode (_cork_auto_out_a_bits_opcode), // @[Configs.scala:120:26]
.auto_in_a_bits_param (_cork_auto_out_a_bits_param), // @[Configs.scala:120:26]
.auto_in_a_bits_size (_cork_auto_out_a_bits_size), // @[Configs.scala:120:26]
.auto_in_a_bits_source (_cork_auto_out_a_bits_source), // @[Configs.scala:120:26]
.auto_in_a_bits_address (_cork_auto_out_a_bits_address), // @[Configs.scala:120:26]
.auto_in_a_bits_mask (_cork_auto_out_a_bits_mask), // @[Configs.scala:120:26]
.auto_in_a_bits_data (_cork_auto_out_a_bits_data), // @[Configs.scala:120:26]
.auto_in_a_bits_corrupt (_cork_auto_out_a_bits_corrupt), // @[Configs.scala:120:26]
.auto_in_d_ready (_cork_auto_out_d_ready), // @[Configs.scala:120:26]
.auto_in_d_valid (_binder_auto_in_d_valid),
.auto_in_d_bits_opcode (_binder_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_binder_auto_in_d_bits_param),
.auto_in_d_bits_size (_binder_auto_in_d_bits_size),
.auto_in_d_bits_source (_binder_auto_in_d_bits_source),
.auto_in_d_bits_sink (_binder_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_binder_auto_in_d_bits_denied),
.auto_in_d_bits_data (_binder_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_binder_auto_in_d_bits_corrupt),
.auto_out_a_ready (coupler_to_bus_named_mbus_auto_widget_anon_in_a_ready), // @[LazyModuleImp.scala:138:7]
.auto_out_a_valid (coupler_to_bus_named_mbus_auto_widget_anon_in_a_valid),
.auto_out_a_bits_opcode (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_opcode),
.auto_out_a_bits_param (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_param),
.auto_out_a_bits_size (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_size),
.auto_out_a_bits_source (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_source),
.auto_out_a_bits_address (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_address),
.auto_out_a_bits_mask (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_mask),
.auto_out_a_bits_data (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_data),
.auto_out_a_bits_corrupt (coupler_to_bus_named_mbus_auto_widget_anon_in_a_bits_corrupt),
.auto_out_d_ready (coupler_to_bus_named_mbus_auto_widget_anon_in_d_ready),
.auto_out_d_valid (coupler_to_bus_named_mbus_auto_widget_anon_in_d_valid), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_opcode (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_opcode), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_param (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_param), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_size (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_size), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_source (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_source), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_sink (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_sink), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_denied (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_denied), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_data (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_data), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_corrupt (coupler_to_bus_named_mbus_auto_widget_anon_in_d_bits_corrupt) // @[LazyModuleImp.scala:138:7]
); // @[BankBinder.scala:71:28]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_valid = auto_coupler_to_bus_named_mbus_bus_xing_out_a_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_opcode = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_param = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_size = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_source = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_address = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_address_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_mask = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_mask_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_data = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_corrupt = auto_coupler_to_bus_named_mbus_bus_xing_out_a_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_coupler_to_bus_named_mbus_bus_xing_out_d_ready = auto_coupler_to_bus_named_mbus_bus_xing_out_d_ready_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_a_ready = auto_coherent_jbar_anon_in_a_ready_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_b_valid = auto_coherent_jbar_anon_in_b_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_b_bits_param = auto_coherent_jbar_anon_in_b_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_b_bits_address = auto_coherent_jbar_anon_in_b_bits_address_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_c_ready = auto_coherent_jbar_anon_in_c_ready_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_valid = auto_coherent_jbar_anon_in_d_valid_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_opcode = auto_coherent_jbar_anon_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_param = auto_coherent_jbar_anon_in_d_bits_param_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_size = auto_coherent_jbar_anon_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_source = auto_coherent_jbar_anon_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_sink = auto_coherent_jbar_anon_in_d_bits_sink_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_denied = auto_coherent_jbar_anon_in_d_bits_denied_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_data = auto_coherent_jbar_anon_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
assign auto_coherent_jbar_anon_in_d_bits_corrupt = auto_coherent_jbar_anon_in_d_bits_corrupt_0; // @[ClockDomain.scala:14:9]
assign auto_l2_ctrls_ctrl_in_a_ready = auto_l2_ctrls_ctrl_in_a_ready_0; // @[ClockDomain.scala:14:9]
assign auto_l2_ctrls_ctrl_in_d_valid = auto_l2_ctrls_ctrl_in_d_valid_0; // @[ClockDomain.scala:14:9]
assign auto_l2_ctrls_ctrl_in_d_bits_opcode = auto_l2_ctrls_ctrl_in_d_bits_opcode_0; // @[ClockDomain.scala:14:9]
assign auto_l2_ctrls_ctrl_in_d_bits_size = auto_l2_ctrls_ctrl_in_d_bits_size_0; // @[ClockDomain.scala:14:9]
assign auto_l2_ctrls_ctrl_in_d_bits_source = auto_l2_ctrls_ctrl_in_d_bits_source_0; // @[ClockDomain.scala:14:9]
assign auto_l2_ctrls_ctrl_in_d_bits_data = auto_l2_ctrls_ctrl_in_d_bits_data_0; // @[ClockDomain.scala:14:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_59( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [3:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [3:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10]
wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67]
wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32]
wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74]
wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_first_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_first_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_set_wo_ready_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_wo_ready_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_interm_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [3:0] _c_opcodes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_opcodes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_sizes_set_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_sizes_set_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _c_probe_ack_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _c_probe_ack_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_1_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_2_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_3_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [3:0] _same_cycle_resp_WIRE_4_bits_source = 4'h0; // @[Bundles.scala:265:74]
wire [3:0] _same_cycle_resp_WIRE_5_bits_source = 4'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [130:0] _c_opcodes_set_T_1 = 131'h0; // @[Monitor.scala:767:54]
wire [130:0] _c_sizes_set_T_1 = 131'h0; // @[Monitor.scala:768:52]
wire [6:0] _c_opcodes_set_T = 7'h0; // @[Monitor.scala:767:79]
wire [6:0] _c_sizes_set_T = 7'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [15:0] _c_set_wo_ready_T = 16'h1; // @[OneHot.scala:58:35]
wire [15:0] _c_set_T = 16'h1; // @[OneHot.scala:58:35]
wire [39:0] c_opcodes_set = 40'h0; // @[Monitor.scala:740:34]
wire [39:0] c_sizes_set = 40'h0; // @[Monitor.scala:741:34]
wire [9:0] c_set = 10'h0; // @[Monitor.scala:738:34]
wire [9:0] c_set_wo_ready = 10'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [3:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [3:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_4 = source_ok_uncommonBits < 4'hA; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [3:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}]
wire [3:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}]
wire [3:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_10 = source_ok_uncommonBits_1 < 4'hA; // @[Parameters.scala:52:56, :57:20]
wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20]
wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31]
wire _T_672 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_672; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_672; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
wire _T_745 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_745; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_745; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [9:0] inflight; // @[Monitor.scala:614:27]
reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [39:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [9:0] a_set; // @[Monitor.scala:626:34]
wire [9:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [39:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [39:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [6:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [6:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [6:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [6:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [6:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [6:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [6:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [6:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [6:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [39:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [39:0] _a_opcode_lookup_T_6 = {36'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [39:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[39:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [39:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [39:0] _a_size_lookup_T_6 = {36'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [39:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[39:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [15:0] _GEN_2 = 16'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [15:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [15:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire _T_598 = _T_672 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_598 ? _a_set_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [6:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [6:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [6:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [130:0] _a_opcodes_set_T_1 = {127'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[39:0] : 40'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [130:0] _a_sizes_set_T_1 = {127'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[39:0] : 40'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [9:0] d_clr; // @[Monitor.scala:664:34]
wire [9:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [39:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [39:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [15:0] _GEN_5 = 16'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [15:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire _T_613 = _T_745 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_613 ? _d_clr_T[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire [142:0] _d_opcodes_clr_T_5 = 143'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[39:0] : 40'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [142:0] _d_sizes_clr_T_5 = 143'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[39:0] : 40'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [9:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [9:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [9:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [39:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [39:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [39:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [39:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [39:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [39:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [9:0] inflight_1; // @[Monitor.scala:726:35]
wire [9:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [39:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [39:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [39:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [39:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [39:0] _c_opcode_lookup_T_6 = {36'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [39:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[39:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [39:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [39:0] _c_size_lookup_T_6 = {36'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [39:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[39:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [9:0] d_clr_1; // @[Monitor.scala:774:34]
wire [9:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [39:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [39:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_716 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_716 & d_release_ack_1 ? _d_clr_wo_ready_T_1[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire _T_698 = _T_745 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_698 ? _d_clr_T_1[9:0] : 10'h0; // @[OneHot.scala:58:35]
wire [142:0] _d_opcodes_clr_T_11 = 143'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_698 ? _d_opcodes_clr_T_11[39:0] : 40'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [142:0] _d_sizes_clr_T_11 = 143'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_698 ? _d_sizes_clr_T_11[39:0] : 40'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 4'h0; // @[Monitor.scala:36:7, :795:113]
wire [9:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [9:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [39:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [39:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [39:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [39:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File SinkE.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
class SinkEResponse(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val sink = UInt(params.inner.bundle.sinkBits.W)
}
class SinkE(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val resp = Valid(new SinkEResponse(params))
val e = Flipped(Decoupled(new TLBundleE(params.inner.bundle)))
})
if (params.firstLevel) {
// Tie off unused ports
io.resp.valid := false.B
io.resp.bits := DontCare
io.e.ready := true.B
} else {
// No restrictions on buffer
val e = params.micro.innerBuf.e(io.e)
e.ready := true.B
io.resp.valid := e.valid
io.resp.bits.sink := e.bits.sink
}
}
| module SinkE_2( // @[SinkE.scala:29:7]
input clock, // @[SinkE.scala:29:7]
input reset, // @[SinkE.scala:29:7]
output io_resp_valid, // @[SinkE.scala:31:14]
output [3:0] io_resp_bits_sink, // @[SinkE.scala:31:14]
input io_e_valid, // @[SinkE.scala:31:14]
input [3:0] io_e_bits_sink // @[SinkE.scala:31:14]
);
wire io_e_valid_0 = io_e_valid; // @[SinkE.scala:29:7]
wire [3:0] io_e_bits_sink_0 = io_e_bits_sink; // @[SinkE.scala:29:7]
wire io_e_ready = 1'h1; // @[SinkE.scala:29:7]
wire io_resp_valid_0 = io_e_valid_0; // @[SinkE.scala:29:7]
wire [3:0] io_resp_bits_sink_0 = io_e_bits_sink_0; // @[SinkE.scala:29:7]
assign io_resp_valid = io_resp_valid_0; // @[SinkE.scala:29:7]
assign io_resp_bits_sink = io_resp_bits_sink_0; // @[SinkE.scala:29:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File fNFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
object fNFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits) =
{
val minNormExp = (BigInt(1)<<(expWidth - 1)) + 2
val rawIn = rawFloatFromRecFN(expWidth, sigWidth, in)
val isSubnormal = rawIn.sExp < minNormExp.S
val denormShiftDist = 1.U - rawIn.sExp(log2Up(sigWidth - 1) - 1, 0)
val denormFract = ((rawIn.sig>>1)>>denormShiftDist)(sigWidth - 2, 0)
val expOut =
Mux(isSubnormal,
0.U,
rawIn.sExp(expWidth - 1, 0) -
((BigInt(1)<<(expWidth - 1)) + 1).U
) | Fill(expWidth, rawIn.isNaN || rawIn.isInf)
val fractOut =
Mux(isSubnormal,
denormFract,
Mux(rawIn.isInf, 0.U, rawIn.sig(sigWidth - 2, 0))
)
Cat(rawIn.sign, expOut, fractOut)
}
}
File execution-unit.scala:
//******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Execution Units
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// The issue window schedules micro-ops onto a specific execution pipeline
// A given execution pipeline may contain multiple functional units; one or more
// read ports, and one or more writeports.
package boom.v3.exu
import scala.collection.mutable.{ArrayBuffer}
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.rocket.{BP}
import freechips.rocketchip.tile
import FUConstants._
import boom.v3.common._
import boom.v3.ifu.{GetPCFromFtqIO}
import boom.v3.util.{ImmGen, IsKilledByBranch, BranchKillableQueue, BoomCoreStringPrefix}
/**
* Response from Execution Unit. Bundles a MicroOp with data
*
* @param dataWidth width of the data coming from the execution unit
*/
class ExeUnitResp(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle
with HasBoomUOP
{
val data = Bits(dataWidth.W)
val predicated = Bool() // Was this predicated off?
val fflags = new ValidIO(new FFlagsResp) // write fflags to ROB // TODO: Do this better
}
/**
* Floating Point flag response
*/
class FFlagsResp(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp()
val flags = Bits(tile.FPConstants.FLAGS_SZ.W)
}
/**
* Abstract Top level Execution Unit that wraps lower level functional units to make a
* multi function execution unit.
*
* @param readsIrf does this exe unit need a integer regfile port
* @param writesIrf does this exe unit need a integer regfile port
* @param readsFrf does this exe unit need a integer regfile port
* @param writesFrf does this exe unit need a integer regfile port
* @param writesLlIrf does this exe unit need a integer regfile port
* @param writesLlFrf does this exe unit need a integer regfile port
* @param numBypassStages number of bypass ports for the exe unit
* @param dataWidth width of the data coming out of the exe unit
* @param bypassable is the exe unit able to be bypassed
* @param hasMem does the exe unit have a MemAddrCalcUnit
* @param hasCSR does the exe unit write to the CSRFile
* @param hasBrUnit does the exe unit have a branch unit
* @param hasAlu does the exe unit have a alu
* @param hasFpu does the exe unit have a fpu
* @param hasMul does the exe unit have a multiplier
* @param hasDiv does the exe unit have a divider
* @param hasFdiv does the exe unit have a FP divider
* @param hasIfpu does the exe unit have a int to FP unit
* @param hasFpiu does the exe unit have a FP to int unit
*/
abstract class ExecutionUnit(
val readsIrf : Boolean = false,
val writesIrf : Boolean = false,
val readsFrf : Boolean = false,
val writesFrf : Boolean = false,
val writesLlIrf : Boolean = false,
val writesLlFrf : Boolean = false,
val numBypassStages : Int,
val dataWidth : Int,
val bypassable : Boolean = false, // TODO make override def for code clarity
val alwaysBypassable : Boolean = false,
val hasMem : Boolean = false,
val hasCSR : Boolean = false,
val hasJmpUnit : Boolean = false,
val hasAlu : Boolean = false,
val hasFpu : Boolean = false,
val hasMul : Boolean = false,
val hasDiv : Boolean = false,
val hasFdiv : Boolean = false,
val hasIfpu : Boolean = false,
val hasFpiu : Boolean = false,
val hasRocc : Boolean = false
)(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val fu_types = Output(Bits(FUC_SZ.W))
val req = Flipped(new DecoupledIO(new FuncUnitReq(dataWidth)))
val iresp = if (writesIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val fresp = if (writesFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val ll_iresp = if (writesLlIrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val ll_fresp = if (writesLlFrf) new DecoupledIO(new ExeUnitResp(dataWidth)) else null
val bypass = Output(Vec(numBypassStages, Valid(new ExeUnitResp(dataWidth))))
val brupdate = Input(new BrUpdateInfo())
// only used by the rocc unit
val rocc = if (hasRocc) new RoCCShimCoreIO else null
// only used by the branch unit
val brinfo = if (hasAlu) Output(new BrResolutionInfo()) else null
val get_ftq_pc = if (hasJmpUnit) Flipped(new GetPCFromFtqIO()) else null
val status = Input(new freechips.rocketchip.rocket.MStatus())
// only used by the fpu unit
val fcsr_rm = if (hasFcsr) Input(Bits(tile.FPConstants.RM_SZ.W)) else null
// only used by the mem unit
val lsu_io = if (hasMem) Flipped(new boom.v3.lsu.LSUExeIO) else null
val bp = if (hasMem) Input(Vec(nBreakpoints, new BP)) else null
val mcontext = if (hasMem) Input(UInt(coreParams.mcontextWidth.W)) else null
val scontext = if (hasMem) Input(UInt(coreParams.scontextWidth.W)) else null
// TODO move this out of ExecutionUnit
val com_exception = if (hasMem || hasRocc) Input(Bool()) else null
})
io.req.ready := false.B
if (writesIrf) {
io.iresp.valid := false.B
io.iresp.bits := DontCare
io.iresp.bits.fflags.valid := false.B
io.iresp.bits.predicated := false.B
assert(io.iresp.ready)
}
if (writesLlIrf) {
io.ll_iresp.valid := false.B
io.ll_iresp.bits := DontCare
io.ll_iresp.bits.fflags.valid := false.B
io.ll_iresp.bits.predicated := false.B
}
if (writesFrf) {
io.fresp.valid := false.B
io.fresp.bits := DontCare
io.fresp.bits.fflags.valid := false.B
io.fresp.bits.predicated := false.B
assert(io.fresp.ready)
}
if (writesLlFrf) {
io.ll_fresp.valid := false.B
io.ll_fresp.bits := DontCare
io.ll_fresp.bits.fflags.valid := false.B
io.ll_fresp.bits.predicated := false.B
}
// TODO add "number of fflag ports", so we can properly account for FPU+Mem combinations
def hasFFlags : Boolean = hasFpu || hasFdiv
require ((hasFpu || hasFdiv) ^ (hasAlu || hasMul || hasMem || hasIfpu),
"[execute] we no longer support mixing FP and Integer functional units in the same exe unit.")
def hasFcsr = hasIfpu || hasFpu || hasFdiv
require (bypassable || !alwaysBypassable,
"[execute] an execution unit must be bypassable if it is always bypassable")
def supportedFuncUnits = {
new SupportedFuncUnits(
alu = hasAlu,
jmp = hasJmpUnit,
mem = hasMem,
muld = hasMul || hasDiv,
fpu = hasFpu,
csr = hasCSR,
fdiv = hasFdiv,
ifpu = hasIfpu)
}
}
/**
* ALU execution unit that can have a branch, alu, mul, div, int to FP,
* and memory unit.
*
* @param hasBrUnit does the exe unit have a branch unit
* @param hasCSR does the exe unit write to the CSRFile
* @param hasAlu does the exe unit have a alu
* @param hasMul does the exe unit have a multiplier
* @param hasDiv does the exe unit have a divider
* @param hasIfpu does the exe unit have a int to FP unit
* @param hasMem does the exe unit have a MemAddrCalcUnit
*/
class ALUExeUnit(
hasJmpUnit : Boolean = false,
hasCSR : Boolean = false,
hasAlu : Boolean = true,
hasMul : Boolean = false,
hasDiv : Boolean = false,
hasIfpu : Boolean = false,
hasMem : Boolean = false,
hasRocc : Boolean = false)
(implicit p: Parameters)
extends ExecutionUnit(
readsIrf = true,
writesIrf = hasAlu || hasMul || hasDiv,
writesLlIrf = hasMem || hasRocc,
writesLlFrf = (hasIfpu || hasMem) && p(tile.TileKey).core.fpu != None,
numBypassStages =
if (hasAlu && hasMul) 3 //TODO XXX p(tile.TileKey).core.imulLatency
else if (hasAlu) 1 else 0,
dataWidth = 64 + 1,
bypassable = hasAlu,
alwaysBypassable = hasAlu && !(hasMem || hasJmpUnit || hasMul || hasDiv || hasCSR || hasIfpu || hasRocc),
hasCSR = hasCSR,
hasJmpUnit = hasJmpUnit,
hasAlu = hasAlu,
hasMul = hasMul,
hasDiv = hasDiv,
hasIfpu = hasIfpu,
hasMem = hasMem,
hasRocc = hasRocc)
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
require(!(hasRocc && !hasCSR),
"RoCC needs to be shared with CSR unit")
require(!(hasMem && hasRocc),
"We do not support execution unit with both Mem and Rocc writebacks")
require(!(hasMem && hasIfpu),
"TODO. Currently do not support AluMemExeUnit with FP")
val out_str =
BoomCoreStringPrefix("==ExeUnit==") +
(if (hasAlu) BoomCoreStringPrefix(" - ALU") else "") +
(if (hasMul) BoomCoreStringPrefix(" - Mul") else "") +
(if (hasDiv) BoomCoreStringPrefix(" - Div") else "") +
(if (hasIfpu) BoomCoreStringPrefix(" - IFPU") else "") +
(if (hasMem) BoomCoreStringPrefix(" - Mem") else "") +
(if (hasRocc) BoomCoreStringPrefix(" - RoCC") else "")
override def toString: String = out_str.toString
val div_busy = WireInit(false.B)
val ifpu_busy = WireInit(false.B)
// The Functional Units --------------------
// Specifically the functional units with fast writeback to IRF
val iresp_fu_units = ArrayBuffer[FunctionalUnit]()
io.fu_types := Mux(hasAlu.B, FU_ALU, 0.U) |
Mux(hasMul.B, FU_MUL, 0.U) |
Mux(!div_busy && hasDiv.B, FU_DIV, 0.U) |
Mux(hasCSR.B, FU_CSR, 0.U) |
Mux(hasJmpUnit.B, FU_JMP, 0.U) |
Mux(!ifpu_busy && hasIfpu.B, FU_I2F, 0.U) |
Mux(hasMem.B, FU_MEM, 0.U)
// ALU Unit -------------------------------
var alu: ALUUnit = null
if (hasAlu) {
alu = Module(new ALUUnit(isJmpUnit = hasJmpUnit,
numStages = numBypassStages,
dataWidth = xLen))
alu.io.req.valid := (
io.req.valid &&
(io.req.bits.uop.fu_code === FU_ALU ||
io.req.bits.uop.fu_code === FU_JMP ||
(io.req.bits.uop.fu_code === FU_CSR && io.req.bits.uop.uopc =/= uopROCC)))
//ROCC Rocc Commands are taken by the RoCC unit
alu.io.req.bits.uop := io.req.bits.uop
alu.io.req.bits.kill := io.req.bits.kill
alu.io.req.bits.rs1_data := io.req.bits.rs1_data
alu.io.req.bits.rs2_data := io.req.bits.rs2_data
alu.io.req.bits.rs3_data := DontCare
alu.io.req.bits.pred_data := io.req.bits.pred_data
alu.io.resp.ready := DontCare
alu.io.brupdate := io.brupdate
iresp_fu_units += alu
// Bypassing only applies to ALU
io.bypass := alu.io.bypass
// branch unit is embedded inside the ALU
io.brinfo := alu.io.brinfo
if (hasJmpUnit) {
alu.io.get_ftq_pc <> io.get_ftq_pc
}
}
var rocc: RoCCShim = null
if (hasRocc) {
rocc = Module(new RoCCShim)
rocc.io.req.valid := io.req.valid && io.req.bits.uop.uopc === uopROCC
rocc.io.req.bits := DontCare
rocc.io.req.bits.uop := io.req.bits.uop
rocc.io.req.bits.kill := io.req.bits.kill
rocc.io.req.bits.rs1_data := io.req.bits.rs1_data
rocc.io.req.bits.rs2_data := io.req.bits.rs2_data
rocc.io.brupdate := io.brupdate // We should assert on this somewhere
rocc.io.status := io.status
rocc.io.exception := io.com_exception
io.rocc <> rocc.io.core
rocc.io.resp.ready := io.ll_iresp.ready
io.ll_iresp.valid := rocc.io.resp.valid
io.ll_iresp.bits.uop := rocc.io.resp.bits.uop
io.ll_iresp.bits.data := rocc.io.resp.bits.data
}
// Pipelined, IMul Unit ------------------
var imul: PipelinedMulUnit = null
if (hasMul) {
imul = Module(new PipelinedMulUnit(imulLatency, xLen))
imul.io <> DontCare
imul.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MUL)
imul.io.req.bits.uop := io.req.bits.uop
imul.io.req.bits.rs1_data := io.req.bits.rs1_data
imul.io.req.bits.rs2_data := io.req.bits.rs2_data
imul.io.req.bits.kill := io.req.bits.kill
imul.io.brupdate := io.brupdate
iresp_fu_units += imul
}
var ifpu: IntToFPUnit = null
if (hasIfpu) {
ifpu = Module(new IntToFPUnit(latency=intToFpLatency))
ifpu.io.req <> io.req
ifpu.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_I2F)
ifpu.io.fcsr_rm := io.fcsr_rm
ifpu.io.brupdate <> io.brupdate
ifpu.io.resp.ready := DontCare
// buffer up results since we share write-port on integer regfile.
val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = intToFpLatency + 3)) // TODO being overly conservative
queue.io.enq.valid := ifpu.io.resp.valid
queue.io.enq.bits.uop := ifpu.io.resp.bits.uop
queue.io.enq.bits.data := ifpu.io.resp.bits.data
queue.io.enq.bits.predicated := ifpu.io.resp.bits.predicated
queue.io.enq.bits.fflags := ifpu.io.resp.bits.fflags
queue.io.brupdate := io.brupdate
queue.io.flush := io.req.bits.kill
io.ll_fresp <> queue.io.deq
ifpu_busy := !(queue.io.empty)
assert (queue.io.enq.ready)
}
// Div/Rem Unit -----------------------
var div: DivUnit = null
val div_resp_val = WireInit(false.B)
if (hasDiv) {
div = Module(new DivUnit(xLen))
div.io <> DontCare
div.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV) && hasDiv.B
div.io.req.bits.uop := io.req.bits.uop
div.io.req.bits.rs1_data := io.req.bits.rs1_data
div.io.req.bits.rs2_data := io.req.bits.rs2_data
div.io.brupdate := io.brupdate
div.io.req.bits.kill := io.req.bits.kill
// share write port with the pipelined units
div.io.resp.ready := !(iresp_fu_units.map(_.io.resp.valid).reduce(_|_))
div_resp_val := div.io.resp.valid
div_busy := !div.io.req.ready ||
(io.req.valid && io.req.bits.uop.fu_code_is(FU_DIV))
iresp_fu_units += div
}
// Mem Unit --------------------------
if (hasMem) {
require(!hasAlu)
val maddrcalc = Module(new MemAddrCalcUnit)
maddrcalc.io.req <> io.req
maddrcalc.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_MEM)
maddrcalc.io.brupdate <> io.brupdate
maddrcalc.io.status := io.status
maddrcalc.io.bp := io.bp
maddrcalc.io.mcontext := io.mcontext
maddrcalc.io.scontext := io.scontext
maddrcalc.io.resp.ready := DontCare
require(numBypassStages == 0)
io.lsu_io.req := maddrcalc.io.resp
io.ll_iresp <> io.lsu_io.iresp
if (usingFPU) {
io.ll_fresp <> io.lsu_io.fresp
}
}
// Outputs (Write Port #0) ---------------
if (writesIrf) {
io.iresp.valid := iresp_fu_units.map(_.io.resp.valid).reduce(_|_)
io.iresp.bits.uop := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.uop)).toSeq)
io.iresp.bits.data := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.data)).toSeq)
io.iresp.bits.predicated := PriorityMux(iresp_fu_units.map(f =>
(f.io.resp.valid, f.io.resp.bits.predicated)).toSeq)
// pulled out for critical path reasons
// TODO: Does this make sense as part of the iresp bundle?
if (hasAlu) {
io.iresp.bits.uop.csr_addr := ImmGen(alu.io.resp.bits.uop.imm_packed, IS_I).asUInt
io.iresp.bits.uop.ctrl.csr_cmd := alu.io.resp.bits.uop.ctrl.csr_cmd
}
}
assert ((PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 1.U && !div_resp_val) ||
(PopCount(iresp_fu_units.map(_.io.resp.valid)) <= 2.U && (div_resp_val)),
"Multiple functional units are fighting over the write port.")
}
/**
* FPU-only unit, with optional second write-port for ToInt micro-ops.
*
* @param hasFpu does the exe unit have a fpu
* @param hasFdiv does the exe unit have a FP divider
* @param hasFpiu does the exe unit have a FP to int unit
*/
class FPUExeUnit(
hasFpu : Boolean = true,
hasFdiv : Boolean = false,
hasFpiu : Boolean = false
)
(implicit p: Parameters)
extends ExecutionUnit(
readsFrf = true,
writesFrf = true,
writesLlIrf = hasFpiu,
writesIrf = false,
numBypassStages = 0,
dataWidth = p(tile.TileKey).core.fpu.get.fLen + 1,
bypassable = false,
hasFpu = hasFpu,
hasFdiv = hasFdiv,
hasFpiu = hasFpiu) with tile.HasFPUParameters
{
val out_str =
BoomCoreStringPrefix("==ExeUnit==")
(if (hasFpu) BoomCoreStringPrefix("- FPU (Latency: " + dfmaLatency + ")") else "") +
(if (hasFdiv) BoomCoreStringPrefix("- FDiv/FSqrt") else "") +
(if (hasFpiu) BoomCoreStringPrefix("- FPIU (writes to Integer RF)") else "")
val fdiv_busy = WireInit(false.B)
val fpiu_busy = WireInit(false.B)
// The Functional Units --------------------
val fu_units = ArrayBuffer[FunctionalUnit]()
io.fu_types := Mux(hasFpu.B, FU_FPU, 0.U) |
Mux(!fdiv_busy && hasFdiv.B, FU_FDV, 0.U) |
Mux(!fpiu_busy && hasFpiu.B, FU_F2I, 0.U)
// FPU Unit -----------------------
var fpu: FPUUnit = null
val fpu_resp_val = WireInit(false.B)
val fpu_resp_fflags = Wire(new ValidIO(new FFlagsResp()))
fpu_resp_fflags.valid := false.B
if (hasFpu) {
fpu = Module(new FPUUnit())
fpu.io.req.valid := io.req.valid &&
(io.req.bits.uop.fu_code_is(FU_FPU) ||
io.req.bits.uop.fu_code_is(FU_F2I)) // TODO move to using a separate unit
fpu.io.req.bits.uop := io.req.bits.uop
fpu.io.req.bits.rs1_data := io.req.bits.rs1_data
fpu.io.req.bits.rs2_data := io.req.bits.rs2_data
fpu.io.req.bits.rs3_data := io.req.bits.rs3_data
fpu.io.req.bits.pred_data := false.B
fpu.io.req.bits.kill := io.req.bits.kill
fpu.io.fcsr_rm := io.fcsr_rm
fpu.io.brupdate := io.brupdate
fpu.io.resp.ready := DontCare
fpu_resp_val := fpu.io.resp.valid
fpu_resp_fflags := fpu.io.resp.bits.fflags
fu_units += fpu
}
// FDiv/FSqrt Unit -----------------------
var fdivsqrt: FDivSqrtUnit = null
val fdiv_resp_fflags = Wire(new ValidIO(new FFlagsResp()))
fdiv_resp_fflags := DontCare
fdiv_resp_fflags.valid := false.B
if (hasFdiv) {
fdivsqrt = Module(new FDivSqrtUnit())
fdivsqrt.io.req.valid := io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV)
fdivsqrt.io.req.bits.uop := io.req.bits.uop
fdivsqrt.io.req.bits.rs1_data := io.req.bits.rs1_data
fdivsqrt.io.req.bits.rs2_data := io.req.bits.rs2_data
fdivsqrt.io.req.bits.rs3_data := DontCare
fdivsqrt.io.req.bits.pred_data := false.B
fdivsqrt.io.req.bits.kill := io.req.bits.kill
fdivsqrt.io.fcsr_rm := io.fcsr_rm
fdivsqrt.io.brupdate := io.brupdate
// share write port with the pipelined units
fdivsqrt.io.resp.ready := !(fu_units.map(_.io.resp.valid).reduce(_|_)) // TODO PERF will get blocked by fpiu.
fdiv_busy := !fdivsqrt.io.req.ready || (io.req.valid && io.req.bits.uop.fu_code_is(FU_FDV))
fdiv_resp_fflags := fdivsqrt.io.resp.bits.fflags
fu_units += fdivsqrt
}
// Outputs (Write Port #0) ---------------
io.fresp.valid := fu_units.map(_.io.resp.valid).reduce(_|_) &&
!(fpu.io.resp.valid && fpu.io.resp.bits.uop.fu_code_is(FU_F2I))
io.fresp.bits.uop := PriorityMux(fu_units.map(f => (f.io.resp.valid,
f.io.resp.bits.uop)).toSeq)
io.fresp.bits.data:= PriorityMux(fu_units.map(f => (f.io.resp.valid, f.io.resp.bits.data)).toSeq)
io.fresp.bits.fflags := Mux(fpu_resp_val, fpu_resp_fflags, fdiv_resp_fflags)
// Outputs (Write Port #1) -- FpToInt Queuing Unit -----------------------
if (hasFpiu) {
// TODO instantiate our own fpiu; and remove it from fpu.scala.
// buffer up results since we share write-port on integer regfile.
val queue = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = dfmaLatency + 3)) // TODO being overly conservative
queue.io.enq.valid := (fpu.io.resp.valid &&
fpu.io.resp.bits.uop.fu_code_is(FU_F2I) &&
fpu.io.resp.bits.uop.uopc =/= uopSTA) // STA means store data gen for floating point
queue.io.enq.bits.uop := fpu.io.resp.bits.uop
queue.io.enq.bits.data := fpu.io.resp.bits.data
queue.io.enq.bits.predicated := fpu.io.resp.bits.predicated
queue.io.enq.bits.fflags := fpu.io.resp.bits.fflags
queue.io.brupdate := io.brupdate
queue.io.flush := io.req.bits.kill
assert (queue.io.enq.ready) // If this backs up, we've miscalculated the size of the queue.
val fp_sdq = Module(new BranchKillableQueue(new ExeUnitResp(dataWidth),
entries = 3)) // Lets us backpressure floating point store data
fp_sdq.io.enq.valid := io.req.valid && io.req.bits.uop.uopc === uopSTA && !IsKilledByBranch(io.brupdate, io.req.bits.uop)
fp_sdq.io.enq.bits.uop := io.req.bits.uop
fp_sdq.io.enq.bits.data := ieee(io.req.bits.rs2_data)
fp_sdq.io.enq.bits.predicated := false.B
fp_sdq.io.enq.bits.fflags := DontCare
fp_sdq.io.brupdate := io.brupdate
fp_sdq.io.flush := io.req.bits.kill
assert(!(fp_sdq.io.enq.valid && !fp_sdq.io.enq.ready))
val resp_arb = Module(new Arbiter(new ExeUnitResp(dataWidth), 2))
resp_arb.io.in(0) <> queue.io.deq
resp_arb.io.in(1) <> fp_sdq.io.deq
io.ll_iresp <> resp_arb.io.out
fpiu_busy := !(queue.io.empty && fp_sdq.io.empty)
}
override def toString: String = out_str.toString
}
File micro-op.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// MicroOp
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.common
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v3.exu.FUConstants
/**
* Extension to BoomBundle to add a MicroOp
*/
abstract trait HasBoomUOP extends BoomBundle
{
val uop = new MicroOp()
}
/**
* MicroOp passing through the pipeline
*/
class MicroOp(implicit p: Parameters) extends BoomBundle
with freechips.rocketchip.rocket.constants.MemoryOpConstants
with freechips.rocketchip.rocket.constants.ScalarOpConstants
{
val uopc = UInt(UOPC_SZ.W) // micro-op code
val inst = UInt(32.W)
val debug_inst = UInt(32.W)
val is_rvc = Bool()
val debug_pc = UInt(coreMaxAddrBits.W)
val iq_type = UInt(IQT_SZ.W) // which issue unit do we use?
val fu_code = UInt(FUConstants.FUC_SZ.W) // which functional unit do we use?
val ctrl = new CtrlSignals
// What is the next state of this uop in the issue window? useful
// for the compacting queue.
val iw_state = UInt(2.W)
// Has operand 1 or 2 been waken speculatively by a load?
// Only integer operands are speculaively woken up,
// so we can ignore p3.
val iw_p1_poisoned = Bool()
val iw_p2_poisoned = Bool()
val is_br = Bool() // is this micro-op a (branch) vs a regular PC+4 inst?
val is_jalr = Bool() // is this a jump? (jal or jalr)
val is_jal = Bool() // is this a JAL (doesn't include JR)? used for branch unit
val is_sfb = Bool() // is this a sfb or in the shadow of a sfb
val br_mask = UInt(maxBrCount.W) // which branches are we being speculated under?
val br_tag = UInt(brTagSz.W)
// Index into FTQ to figure out our fetch PC.
val ftq_idx = UInt(log2Ceil(ftqSz).W)
// This inst straddles two fetch packets
val edge_inst = Bool()
// Low-order bits of our own PC. Combine with ftq[ftq_idx] to get PC.
// Aligned to a cache-line size, as that is the greater fetch granularity.
// TODO: Shouldn't this be aligned to fetch-width size?
val pc_lob = UInt(log2Ceil(icBlockBytes).W)
// Was this a branch that was predicted taken?
val taken = Bool()
val imm_packed = UInt(LONGEST_IMM_SZ.W) // densely pack the imm in decode...
// then translate and sign-extend in execute
val csr_addr = UInt(CSR_ADDR_SZ.W) // only used for critical path reasons in Exe
val rob_idx = UInt(robAddrSz.W)
val ldq_idx = UInt(ldqAddrSz.W)
val stq_idx = UInt(stqAddrSz.W)
val rxq_idx = UInt(log2Ceil(numRxqEntries).W)
val pdst = UInt(maxPregSz.W)
val prs1 = UInt(maxPregSz.W)
val prs2 = UInt(maxPregSz.W)
val prs3 = UInt(maxPregSz.W)
val ppred = UInt(log2Ceil(ftqSz).W)
val prs1_busy = Bool()
val prs2_busy = Bool()
val prs3_busy = Bool()
val ppred_busy = Bool()
val stale_pdst = UInt(maxPregSz.W)
val exception = Bool()
val exc_cause = UInt(xLen.W) // TODO compress this down, xlen is insanity
val bypassable = Bool() // can we bypass ALU results? (doesn't include loads, csr, etc...)
val mem_cmd = UInt(M_SZ.W) // sync primitives/cache flushes
val mem_size = UInt(2.W)
val mem_signed = Bool()
val is_fence = Bool()
val is_fencei = Bool()
val is_amo = Bool()
val uses_ldq = Bool()
val uses_stq = Bool()
val is_sys_pc2epc = Bool() // Is a ECall or Breakpoint -- both set EPC to PC.
val is_unique = Bool() // only allow this instruction in the pipeline, wait for STQ to
// drain, clear fetcha fter it (tell ROB to un-ready until empty)
val flush_on_commit = Bool() // some instructions need to flush the pipeline behind them
// Preditation
def is_sfb_br = is_br && is_sfb && enableSFBOpt.B // Does this write a predicate
def is_sfb_shadow = !is_br && is_sfb && enableSFBOpt.B // Is this predicated
val ldst_is_rs1 = Bool() // If this is set and we are predicated off, copy rs1 to dst,
// else copy rs2 to dst
// logical specifiers (only used in Decode->Rename), except rollback (ldst)
val ldst = UInt(lregSz.W)
val lrs1 = UInt(lregSz.W)
val lrs2 = UInt(lregSz.W)
val lrs3 = UInt(lregSz.W)
val ldst_val = Bool() // is there a destination? invalid for stores, rd==x0, etc.
val dst_rtype = UInt(2.W)
val lrs1_rtype = UInt(2.W)
val lrs2_rtype = UInt(2.W)
val frs3_en = Bool()
// floating point information
val fp_val = Bool() // is a floating-point instruction (F- or D-extension)?
// If it's non-ld/st it will write back exception bits to the fcsr.
val fp_single = Bool() // single-precision floating point instruction (F-extension)
// frontend exception information
val xcpt_pf_if = Bool() // I-TLB page fault.
val xcpt_ae_if = Bool() // I$ access exception.
val xcpt_ma_if = Bool() // Misaligned fetch (jal/brjumping to misaligned addr).
val bp_debug_if = Bool() // Breakpoint
val bp_xcpt_if = Bool() // Breakpoint
// What prediction structure provides the prediction FROM this op
val debug_fsrc = UInt(BSRC_SZ.W)
// What prediction structure provides the prediction TO this op
val debug_tsrc = UInt(BSRC_SZ.W)
// Do we allocate a branch tag for this?
// SFB branches don't get a mask, they get a predicate bit
def allocate_brtag = (is_br && !is_sfb) || is_jalr
// Does this register write-back
def rf_wen = dst_rtype =/= RT_X
// Is it possible for this uop to misspeculate, preventing the commit of subsequent uops?
def unsafe = uses_ldq || (uses_stq && !is_fence) || is_br || is_jalr
def fu_code_is(_fu: UInt) = (fu_code & _fu) =/= 0.U
}
/**
* Control signals within a MicroOp
*
* TODO REFACTOR this, as this should no longer be true, as bypass occurs in stage before branch resolution
*/
class CtrlSignals extends Bundle()
{
val br_type = UInt(BR_N.getWidth.W)
val op1_sel = UInt(OP1_X.getWidth.W)
val op2_sel = UInt(OP2_X.getWidth.W)
val imm_sel = UInt(IS_X.getWidth.W)
val op_fcn = UInt(freechips.rocketchip.rocket.ALU.SZ_ALU_FN.W)
val fcn_dw = Bool()
val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W)
val is_load = Bool() // will invoke TLB address lookup
val is_sta = Bool() // will invoke TLB address lookup
val is_std = Bool()
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module FPUExeUnit_1( // @[execution-unit.scala:437:7]
input clock, // @[execution-unit.scala:437:7]
input reset, // @[execution-unit.scala:437:7]
output [9:0] io_fu_types, // @[execution-unit.scala:104:14]
input io_req_valid, // @[execution-unit.scala:104:14]
input [6:0] io_req_bits_uop_uopc, // @[execution-unit.scala:104:14]
input [31:0] io_req_bits_uop_inst, // @[execution-unit.scala:104:14]
input [31:0] io_req_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
input [39:0] io_req_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_iq_type, // @[execution-unit.scala:104:14]
input [9:0] io_req_bits_uop_fu_code, // @[execution-unit.scala:104:14]
input [3:0] io_req_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_iw_state, // @[execution-unit.scala:104:14]
input io_req_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
input io_req_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_br, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_jal, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
input [7:0] io_req_bits_uop_br_mask, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_br_tag, // @[execution-unit.scala:104:14]
input [3:0] io_req_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
input io_req_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
input io_req_bits_uop_taken, // @[execution-unit.scala:104:14]
input [19:0] io_req_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
input [11:0] io_req_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
input [2:0] io_req_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_pdst, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_prs1, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_prs2, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_prs3, // @[execution-unit.scala:104:14]
input [3:0] io_req_bits_uop_ppred, // @[execution-unit.scala:104:14]
input io_req_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
input io_req_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
input io_req_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
input io_req_bits_uop_exception, // @[execution-unit.scala:104:14]
input [63:0] io_req_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
input io_req_bits_uop_bypassable, // @[execution-unit.scala:104:14]
input [4:0] io_req_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_mem_size, // @[execution-unit.scala:104:14]
input io_req_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_fence, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_amo, // @[execution-unit.scala:104:14]
input io_req_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
input io_req_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
input io_req_bits_uop_is_unique, // @[execution-unit.scala:104:14]
input io_req_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_ldst, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_lrs1, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_lrs2, // @[execution-unit.scala:104:14]
input [5:0] io_req_bits_uop_lrs3, // @[execution-unit.scala:104:14]
input io_req_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
input io_req_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
input io_req_bits_uop_fp_val, // @[execution-unit.scala:104:14]
input io_req_bits_uop_fp_single, // @[execution-unit.scala:104:14]
input io_req_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
input io_req_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
input [1:0] io_req_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
input [64:0] io_req_bits_rs1_data, // @[execution-unit.scala:104:14]
input [64:0] io_req_bits_rs2_data, // @[execution-unit.scala:104:14]
input [64:0] io_req_bits_rs3_data, // @[execution-unit.scala:104:14]
input io_req_bits_kill, // @[execution-unit.scala:104:14]
output io_fresp_valid, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_fresp_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_fresp_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [7:0] io_fresp_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_fresp_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_fresp_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_fresp_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [64:0] io_fresp_bits_data, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_valid, // @[execution-unit.scala:104:14]
output [6:0] io_fresp_bits_fflags_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_fflags_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_fresp_bits_fflags_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_fresp_bits_fflags_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_fresp_bits_fflags_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_fflags_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [7:0] io_fresp_bits_fflags_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_fflags_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_fresp_bits_fflags_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_fresp_bits_fflags_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [2:0] io_fresp_bits_fflags_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [3:0] io_fresp_bits_fflags_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_fresp_bits_fflags_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_fresp_bits_fflags_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_fresp_bits_fflags_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_fresp_bits_fflags_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [4:0] io_fresp_bits_fflags_bits_flags, // @[execution-unit.scala:104:14]
input io_ll_iresp_ready, // @[execution-unit.scala:104:14]
output io_ll_iresp_valid, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_ll_iresp_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_ll_iresp_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [7:0] io_ll_iresp_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_ll_iresp_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_ll_iresp_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_ll_iresp_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [64:0] io_ll_iresp_bits_data, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_predicated, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_valid, // @[execution-unit.scala:104:14]
output [6:0] io_ll_iresp_bits_fflags_bits_uop_uopc, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_fflags_bits_uop_inst, // @[execution-unit.scala:104:14]
output [31:0] io_ll_iresp_bits_fflags_bits_uop_debug_inst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_rvc, // @[execution-unit.scala:104:14]
output [39:0] io_ll_iresp_bits_fflags_bits_uop_debug_pc, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_iq_type, // @[execution-unit.scala:104:14]
output [9:0] io_ll_iresp_bits_fflags_bits_uop_fu_code, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_iw_state, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_br, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_jalr, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_jal, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_sfb, // @[execution-unit.scala:104:14]
output [7:0] io_ll_iresp_bits_fflags_bits_uop_br_mask, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_br_tag, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_fflags_bits_uop_ftq_idx, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_edge_inst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_pc_lob, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_taken, // @[execution-unit.scala:104:14]
output [19:0] io_ll_iresp_bits_fflags_bits_uop_imm_packed, // @[execution-unit.scala:104:14]
output [11:0] io_ll_iresp_bits_fflags_bits_uop_csr_addr, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_rob_idx, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_ldq_idx, // @[execution-unit.scala:104:14]
output [2:0] io_ll_iresp_bits_fflags_bits_uop_stq_idx, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_rxq_idx, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_pdst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_prs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_prs2, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_prs3, // @[execution-unit.scala:104:14]
output [3:0] io_ll_iresp_bits_fflags_bits_uop_ppred, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_prs1_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_prs2_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_prs3_busy, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ppred_busy, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_stale_pdst, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_exception, // @[execution-unit.scala:104:14]
output [63:0] io_ll_iresp_bits_fflags_bits_uop_exc_cause, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_bypassable, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_uop_mem_cmd, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_mem_size, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_mem_signed, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_fence, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_fencei, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_amo, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_uses_ldq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_uses_stq, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_is_unique, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_flush_on_commit, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_ldst, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs1, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs2, // @[execution-unit.scala:104:14]
output [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs3, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_ldst_val, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_dst_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_frs3_en, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_fp_val, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_fp_single, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_bp_debug_if, // @[execution-unit.scala:104:14]
output io_ll_iresp_bits_fflags_bits_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_fsrc, // @[execution-unit.scala:104:14]
output [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_tsrc, // @[execution-unit.scala:104:14]
output [4:0] io_ll_iresp_bits_fflags_bits_flags, // @[execution-unit.scala:104:14]
input [7:0] io_brupdate_b1_resolve_mask, // @[execution-unit.scala:104:14]
input [7:0] io_brupdate_b1_mispredict_mask, // @[execution-unit.scala:104:14]
input [6:0] io_brupdate_b2_uop_uopc, // @[execution-unit.scala:104:14]
input [31:0] io_brupdate_b2_uop_inst, // @[execution-unit.scala:104:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_rvc, // @[execution-unit.scala:104:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_iq_type, // @[execution-unit.scala:104:14]
input [9:0] io_brupdate_b2_uop_fu_code, // @[execution-unit.scala:104:14]
input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_fcn_dw, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_is_load, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_is_sta, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ctrl_is_std, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_iw_state, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_iw_p1_poisoned, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_iw_p2_poisoned, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_br, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_jalr, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_jal, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_sfb, // @[execution-unit.scala:104:14]
input [7:0] io_brupdate_b2_uop_br_mask, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_br_tag, // @[execution-unit.scala:104:14]
input [3:0] io_brupdate_b2_uop_ftq_idx, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_edge_inst, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_taken, // @[execution-unit.scala:104:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[execution-unit.scala:104:14]
input [11:0] io_brupdate_b2_uop_csr_addr, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_rob_idx, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_ldq_idx, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_uop_stq_idx, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_pdst, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_prs1, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_prs2, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_prs3, // @[execution-unit.scala:104:14]
input [3:0] io_brupdate_b2_uop_ppred, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_prs1_busy, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_prs2_busy, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_prs3_busy, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ppred_busy, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_stale_pdst, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_exception, // @[execution-unit.scala:104:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_bypassable, // @[execution-unit.scala:104:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_mem_signed, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_fence, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_fencei, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_amo, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_uses_ldq, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_uses_stq, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_is_unique, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_flush_on_commit, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[execution-unit.scala:104:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_ldst_val, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_frs3_en, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_fp_val, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_fp_single, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_bp_debug_if, // @[execution-unit.scala:104:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[execution-unit.scala:104:14]
input io_brupdate_b2_valid, // @[execution-unit.scala:104:14]
input io_brupdate_b2_mispredict, // @[execution-unit.scala:104:14]
input io_brupdate_b2_taken, // @[execution-unit.scala:104:14]
input [2:0] io_brupdate_b2_cfi_type, // @[execution-unit.scala:104:14]
input [1:0] io_brupdate_b2_pc_sel, // @[execution-unit.scala:104:14]
input [39:0] io_brupdate_b2_jalr_target, // @[execution-unit.scala:104:14]
input [20:0] io_brupdate_b2_target_offset, // @[execution-unit.scala:104:14]
input io_status_debug, // @[execution-unit.scala:104:14]
input io_status_cease, // @[execution-unit.scala:104:14]
input io_status_wfi, // @[execution-unit.scala:104:14]
input [1:0] io_status_dprv, // @[execution-unit.scala:104:14]
input io_status_dv, // @[execution-unit.scala:104:14]
input [1:0] io_status_prv, // @[execution-unit.scala:104:14]
input io_status_v, // @[execution-unit.scala:104:14]
input io_status_sd, // @[execution-unit.scala:104:14]
input io_status_mpv, // @[execution-unit.scala:104:14]
input io_status_gva, // @[execution-unit.scala:104:14]
input io_status_tsr, // @[execution-unit.scala:104:14]
input io_status_tw, // @[execution-unit.scala:104:14]
input io_status_tvm, // @[execution-unit.scala:104:14]
input io_status_mxr, // @[execution-unit.scala:104:14]
input io_status_sum, // @[execution-unit.scala:104:14]
input io_status_mprv, // @[execution-unit.scala:104:14]
input [1:0] io_status_fs, // @[execution-unit.scala:104:14]
input [1:0] io_status_mpp, // @[execution-unit.scala:104:14]
input io_status_spp, // @[execution-unit.scala:104:14]
input io_status_mpie, // @[execution-unit.scala:104:14]
input io_status_spie, // @[execution-unit.scala:104:14]
input io_status_mie, // @[execution-unit.scala:104:14]
input io_status_sie, // @[execution-unit.scala:104:14]
input [2:0] io_fcsr_rm // @[execution-unit.scala:104:14]
);
wire _resp_arb_io_in_0_ready; // @[execution-unit.scala:563:26]
wire _resp_arb_io_in_1_ready; // @[execution-unit.scala:563:26]
wire _fp_sdq_io_enq_ready; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_valid; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_uop_uopc; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_uop_inst; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_uop_debug_inst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_rvc; // @[execution-unit.scala:551:24]
wire [39:0] _fp_sdq_io_deq_bits_uop_debug_pc; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_iq_type; // @[execution-unit.scala:551:24]
wire [9:0] _fp_sdq_io_deq_bits_uop_fu_code; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_uop_ctrl_br_type; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_is_load; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_is_sta; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ctrl_is_std; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_iw_state; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_br; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_jalr; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_jal; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_sfb; // @[execution-unit.scala:551:24]
wire [7:0] _fp_sdq_io_deq_bits_uop_br_mask; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_br_tag; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_uop_ftq_idx; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_edge_inst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_pc_lob; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_taken; // @[execution-unit.scala:551:24]
wire [19:0] _fp_sdq_io_deq_bits_uop_imm_packed; // @[execution-unit.scala:551:24]
wire [11:0] _fp_sdq_io_deq_bits_uop_csr_addr; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_rob_idx; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_ldq_idx; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_uop_stq_idx; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_rxq_idx; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_pdst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_prs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_prs2; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_prs3; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_uop_ppred; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_prs1_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_prs2_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_prs3_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ppred_busy; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_stale_pdst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_exception; // @[execution-unit.scala:551:24]
wire [63:0] _fp_sdq_io_deq_bits_uop_exc_cause; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_bypassable; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_uop_mem_cmd; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_mem_size; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_mem_signed; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_fence; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_fencei; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_amo; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_uses_ldq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_uses_stq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_is_unique; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_flush_on_commit; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ldst_is_rs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_ldst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_lrs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_lrs2; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_uop_lrs3; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_ldst_val; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_dst_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_lrs1_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_lrs2_rtype; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_frs3_en; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_fp_val; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_fp_single; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_xcpt_pf_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_xcpt_ae_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_xcpt_ma_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_bp_debug_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_uop_bp_xcpt_if; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_debug_fsrc; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_uop_debug_tsrc; // @[execution-unit.scala:551:24]
wire [64:0] _fp_sdq_io_deq_bits_data; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_predicated; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_valid; // @[execution-unit.scala:551:24]
wire [6:0] _fp_sdq_io_deq_bits_fflags_bits_uop_uopc; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_fflags_bits_uop_inst; // @[execution-unit.scala:551:24]
wire [31:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_inst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_rvc; // @[execution-unit.scala:551:24]
wire [39:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_pc; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_iq_type; // @[execution-unit.scala:551:24]
wire [9:0] _fp_sdq_io_deq_bits_fflags_bits_uop_fu_code; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_iw_state; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_br; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_jalr; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_jal; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_sfb; // @[execution-unit.scala:551:24]
wire [7:0] _fp_sdq_io_deq_bits_fflags_bits_uop_br_mask; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_br_tag; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_edge_inst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_pc_lob; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_taken; // @[execution-unit.scala:551:24]
wire [19:0] _fp_sdq_io_deq_bits_fflags_bits_uop_imm_packed; // @[execution-unit.scala:551:24]
wire [11:0] _fp_sdq_io_deq_bits_fflags_bits_uop_csr_addr; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_rob_idx; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:551:24]
wire [2:0] _fp_sdq_io_deq_bits_fflags_bits_uop_stq_idx; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_pdst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_prs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_prs2; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_prs3; // @[execution-unit.scala:551:24]
wire [3:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ppred; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_exception; // @[execution-unit.scala:551:24]
wire [63:0] _fp_sdq_io_deq_bits_fflags_bits_uop_exc_cause; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_bypassable; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_mem_size; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_mem_signed; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_fence; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_fencei; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_amo; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_uses_stq; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_is_unique; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_ldst; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs1; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs2; // @[execution-unit.scala:551:24]
wire [5:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs3; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_ldst_val; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_frs3_en; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_fp_val; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_fp_single; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_deq_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:551:24]
wire [1:0] _fp_sdq_io_deq_bits_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:551:24]
wire [4:0] _fp_sdq_io_deq_bits_fflags_bits_flags; // @[execution-unit.scala:551:24]
wire _fp_sdq_io_empty; // @[execution-unit.scala:551:24]
wire _queue_io_enq_ready; // @[execution-unit.scala:537:23]
wire _queue_io_deq_valid; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_uop_uopc; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_uop_inst; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_uop_debug_inst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_rvc; // @[execution-unit.scala:537:23]
wire [39:0] _queue_io_deq_bits_uop_debug_pc; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_iq_type; // @[execution-unit.scala:537:23]
wire [9:0] _queue_io_deq_bits_uop_fu_code; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_uop_ctrl_br_type; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_is_load; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_is_sta; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ctrl_is_std; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_iw_state; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_br; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_jalr; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_jal; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_sfb; // @[execution-unit.scala:537:23]
wire [7:0] _queue_io_deq_bits_uop_br_mask; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_br_tag; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_uop_ftq_idx; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_edge_inst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_pc_lob; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_taken; // @[execution-unit.scala:537:23]
wire [19:0] _queue_io_deq_bits_uop_imm_packed; // @[execution-unit.scala:537:23]
wire [11:0] _queue_io_deq_bits_uop_csr_addr; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_rob_idx; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_ldq_idx; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_uop_stq_idx; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_rxq_idx; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_pdst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_prs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_prs2; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_prs3; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_uop_ppred; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_prs1_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_prs2_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_prs3_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ppred_busy; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_stale_pdst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_exception; // @[execution-unit.scala:537:23]
wire [63:0] _queue_io_deq_bits_uop_exc_cause; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_bypassable; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_uop_mem_cmd; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_mem_size; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_mem_signed; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_fence; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_fencei; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_amo; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_uses_ldq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_uses_stq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_is_unique; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_flush_on_commit; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ldst_is_rs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_ldst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_lrs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_lrs2; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_uop_lrs3; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_ldst_val; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_dst_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_lrs1_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_lrs2_rtype; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_frs3_en; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_fp_val; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_fp_single; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_xcpt_pf_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_xcpt_ae_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_xcpt_ma_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_bp_debug_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_uop_bp_xcpt_if; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_debug_fsrc; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_uop_debug_tsrc; // @[execution-unit.scala:537:23]
wire [64:0] _queue_io_deq_bits_data; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_predicated; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_valid; // @[execution-unit.scala:537:23]
wire [6:0] _queue_io_deq_bits_fflags_bits_uop_uopc; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_fflags_bits_uop_inst; // @[execution-unit.scala:537:23]
wire [31:0] _queue_io_deq_bits_fflags_bits_uop_debug_inst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_rvc; // @[execution-unit.scala:537:23]
wire [39:0] _queue_io_deq_bits_fflags_bits_uop_debug_pc; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_iq_type; // @[execution-unit.scala:537:23]
wire [9:0] _queue_io_deq_bits_fflags_bits_uop_fu_code; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_iw_state; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_br; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_jalr; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_jal; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_sfb; // @[execution-unit.scala:537:23]
wire [7:0] _queue_io_deq_bits_fflags_bits_uop_br_mask; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_br_tag; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_edge_inst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_pc_lob; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_taken; // @[execution-unit.scala:537:23]
wire [19:0] _queue_io_deq_bits_fflags_bits_uop_imm_packed; // @[execution-unit.scala:537:23]
wire [11:0] _queue_io_deq_bits_fflags_bits_uop_csr_addr; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_rob_idx; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:537:23]
wire [2:0] _queue_io_deq_bits_fflags_bits_uop_stq_idx; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_pdst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_prs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_prs2; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_prs3; // @[execution-unit.scala:537:23]
wire [3:0] _queue_io_deq_bits_fflags_bits_uop_ppred; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_exception; // @[execution-unit.scala:537:23]
wire [63:0] _queue_io_deq_bits_fflags_bits_uop_exc_cause; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_bypassable; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_mem_size; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_mem_signed; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_fence; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_fencei; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_amo; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_uses_stq; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_is_unique; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_ldst; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_lrs1; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_lrs2; // @[execution-unit.scala:537:23]
wire [5:0] _queue_io_deq_bits_fflags_bits_uop_lrs3; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_ldst_val; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_frs3_en; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_fp_val; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_fp_single; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:537:23]
wire _queue_io_deq_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:537:23]
wire [1:0] _queue_io_deq_bits_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:537:23]
wire [4:0] _queue_io_deq_bits_fflags_bits_flags; // @[execution-unit.scala:537:23]
wire _queue_io_empty; // @[execution-unit.scala:537:23]
wire _FDivSqrtUnit_io_req_ready; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_valid; // @[execution-unit.scala:502:22]
wire [6:0] _FDivSqrtUnit_io_resp_bits_uop_uopc; // @[execution-unit.scala:502:22]
wire [31:0] _FDivSqrtUnit_io_resp_bits_uop_inst; // @[execution-unit.scala:502:22]
wire [31:0] _FDivSqrtUnit_io_resp_bits_uop_debug_inst; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_rvc; // @[execution-unit.scala:502:22]
wire [39:0] _FDivSqrtUnit_io_resp_bits_uop_debug_pc; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_iq_type; // @[execution-unit.scala:502:22]
wire [9:0] _FDivSqrtUnit_io_resp_bits_uop_fu_code; // @[execution-unit.scala:502:22]
wire [3:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_br_type; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_load; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_sta; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_std; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_iw_state; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_br; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_jalr; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_jal; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_sfb; // @[execution-unit.scala:502:22]
wire [7:0] _FDivSqrtUnit_io_resp_bits_uop_br_mask; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_br_tag; // @[execution-unit.scala:502:22]
wire [3:0] _FDivSqrtUnit_io_resp_bits_uop_ftq_idx; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_edge_inst; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_pc_lob; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_taken; // @[execution-unit.scala:502:22]
wire [19:0] _FDivSqrtUnit_io_resp_bits_uop_imm_packed; // @[execution-unit.scala:502:22]
wire [11:0] _FDivSqrtUnit_io_resp_bits_uop_csr_addr; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_rob_idx; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_ldq_idx; // @[execution-unit.scala:502:22]
wire [2:0] _FDivSqrtUnit_io_resp_bits_uop_stq_idx; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_rxq_idx; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_pdst; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_prs1; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_prs2; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_prs3; // @[execution-unit.scala:502:22]
wire [3:0] _FDivSqrtUnit_io_resp_bits_uop_ppred; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_prs1_busy; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_prs2_busy; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_prs3_busy; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ppred_busy; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_stale_pdst; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_exception; // @[execution-unit.scala:502:22]
wire [63:0] _FDivSqrtUnit_io_resp_bits_uop_exc_cause; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_bypassable; // @[execution-unit.scala:502:22]
wire [4:0] _FDivSqrtUnit_io_resp_bits_uop_mem_cmd; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_mem_size; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_mem_signed; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_fence; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_fencei; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_amo; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_uses_ldq; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_uses_stq; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_is_unique; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_flush_on_commit; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ldst_is_rs1; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_ldst; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_lrs1; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_lrs2; // @[execution-unit.scala:502:22]
wire [5:0] _FDivSqrtUnit_io_resp_bits_uop_lrs3; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_ldst_val; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_dst_rtype; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_lrs1_rtype; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_lrs2_rtype; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_frs3_en; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_fp_val; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_fp_single; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_xcpt_pf_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_xcpt_ae_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_xcpt_ma_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_bp_debug_if; // @[execution-unit.scala:502:22]
wire _FDivSqrtUnit_io_resp_bits_uop_bp_xcpt_if; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_debug_fsrc; // @[execution-unit.scala:502:22]
wire [1:0] _FDivSqrtUnit_io_resp_bits_uop_debug_tsrc; // @[execution-unit.scala:502:22]
wire [64:0] _FDivSqrtUnit_io_resp_bits_data; // @[execution-unit.scala:502:22]
wire _FPUUnit_io_resp_valid; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_uop_uopc; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_uop_inst; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_uop_debug_inst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_rvc; // @[execution-unit.scala:477:17]
wire [39:0] _FPUUnit_io_resp_bits_uop_debug_pc; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_iq_type; // @[execution-unit.scala:477:17]
wire [9:0] _FPUUnit_io_resp_bits_uop_fu_code; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_uop_ctrl_br_type; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_is_load; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_is_sta; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ctrl_is_std; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_iw_state; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_br; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_jalr; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_jal; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_sfb; // @[execution-unit.scala:477:17]
wire [7:0] _FPUUnit_io_resp_bits_uop_br_mask; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_br_tag; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_uop_ftq_idx; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_edge_inst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_pc_lob; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_taken; // @[execution-unit.scala:477:17]
wire [19:0] _FPUUnit_io_resp_bits_uop_imm_packed; // @[execution-unit.scala:477:17]
wire [11:0] _FPUUnit_io_resp_bits_uop_csr_addr; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_rob_idx; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_ldq_idx; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_uop_stq_idx; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_rxq_idx; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_pdst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_prs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_prs2; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_prs3; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_uop_ppred; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_prs1_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_prs2_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_prs3_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ppred_busy; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_stale_pdst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_exception; // @[execution-unit.scala:477:17]
wire [63:0] _FPUUnit_io_resp_bits_uop_exc_cause; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_bypassable; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_uop_mem_cmd; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_mem_size; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_mem_signed; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_fence; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_fencei; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_amo; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_uses_ldq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_uses_stq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_is_unique; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_flush_on_commit; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ldst_is_rs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_ldst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_lrs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_lrs2; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_uop_lrs3; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_ldst_val; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_dst_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_lrs1_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_lrs2_rtype; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_frs3_en; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_fp_val; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_fp_single; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_xcpt_pf_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_xcpt_ae_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_xcpt_ma_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_bp_debug_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_uop_bp_xcpt_if; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_debug_fsrc; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_uop_debug_tsrc; // @[execution-unit.scala:477:17]
wire [64:0] _FPUUnit_io_resp_bits_data; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_valid; // @[execution-unit.scala:477:17]
wire [6:0] _FPUUnit_io_resp_bits_fflags_bits_uop_uopc; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_fflags_bits_uop_inst; // @[execution-unit.scala:477:17]
wire [31:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_inst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_rvc; // @[execution-unit.scala:477:17]
wire [39:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_pc; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_iq_type; // @[execution-unit.scala:477:17]
wire [9:0] _FPUUnit_io_resp_bits_fflags_bits_uop_fu_code; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_iw_state; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_br; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_jalr; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_jal; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_sfb; // @[execution-unit.scala:477:17]
wire [7:0] _FPUUnit_io_resp_bits_fflags_bits_uop_br_mask; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_br_tag; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_edge_inst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_pc_lob; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_taken; // @[execution-unit.scala:477:17]
wire [19:0] _FPUUnit_io_resp_bits_fflags_bits_uop_imm_packed; // @[execution-unit.scala:477:17]
wire [11:0] _FPUUnit_io_resp_bits_fflags_bits_uop_csr_addr; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_rob_idx; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:477:17]
wire [2:0] _FPUUnit_io_resp_bits_fflags_bits_uop_stq_idx; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_pdst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_prs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_prs2; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_prs3; // @[execution-unit.scala:477:17]
wire [3:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ppred; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_exception; // @[execution-unit.scala:477:17]
wire [63:0] _FPUUnit_io_resp_bits_fflags_bits_uop_exc_cause; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_bypassable; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_mem_size; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_mem_signed; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_fence; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_fencei; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_amo; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_uses_stq; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_is_unique; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_ldst; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs1; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs2; // @[execution-unit.scala:477:17]
wire [5:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs3; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_ldst_val; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_frs3_en; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_fp_val; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_fp_single; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:477:17]
wire _FPUUnit_io_resp_bits_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:477:17]
wire [1:0] _FPUUnit_io_resp_bits_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:477:17]
wire [4:0] _FPUUnit_io_resp_bits_fflags_bits_flags; // @[execution-unit.scala:477:17]
wire io_req_valid_0 = io_req_valid; // @[execution-unit.scala:437:7]
wire [6:0] io_req_bits_uop_uopc_0 = io_req_bits_uop_uopc; // @[execution-unit.scala:437:7]
wire [31:0] io_req_bits_uop_inst_0 = io_req_bits_uop_inst; // @[execution-unit.scala:437:7]
wire [31:0] io_req_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc; // @[execution-unit.scala:437:7]
wire [39:0] io_req_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_iq_type_0 = io_req_bits_uop_iq_type; // @[execution-unit.scala:437:7]
wire [9:0] io_req_bits_uop_fu_code_0 = io_req_bits_uop_fu_code; // @[execution-unit.scala:437:7]
wire [3:0] io_req_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_iw_state_0 = io_req_bits_uop_iw_state; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_br_0 = io_req_bits_uop_is_br; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_jal_0 = io_req_bits_uop_is_jal; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb; // @[execution-unit.scala:437:7]
wire [7:0] io_req_bits_uop_br_mask_0 = io_req_bits_uop_br_mask; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_br_tag_0 = io_req_bits_uop_br_tag; // @[execution-unit.scala:437:7]
wire [3:0] io_req_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_taken_0 = io_req_bits_uop_taken; // @[execution-unit.scala:437:7]
wire [19:0] io_req_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed; // @[execution-unit.scala:437:7]
wire [11:0] io_req_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx; // @[execution-unit.scala:437:7]
wire [2:0] io_req_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_pdst_0 = io_req_bits_uop_pdst; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_prs1_0 = io_req_bits_uop_prs1; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_prs2_0 = io_req_bits_uop_prs2; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_prs3_0 = io_req_bits_uop_prs3; // @[execution-unit.scala:437:7]
wire [3:0] io_req_bits_uop_ppred_0 = io_req_bits_uop_ppred; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_exception_0 = io_req_bits_uop_exception; // @[execution-unit.scala:437:7]
wire [63:0] io_req_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_bypassable_0 = io_req_bits_uop_bypassable; // @[execution-unit.scala:437:7]
wire [4:0] io_req_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_mem_size_0 = io_req_bits_uop_mem_size; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_fence_0 = io_req_bits_uop_is_fence; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_amo_0 = io_req_bits_uop_is_amo; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_is_unique_0 = io_req_bits_uop_is_unique; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_ldst_0 = io_req_bits_uop_ldst; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_lrs1_0 = io_req_bits_uop_lrs1; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_lrs2_0 = io_req_bits_uop_lrs2; // @[execution-unit.scala:437:7]
wire [5:0] io_req_bits_uop_lrs3_0 = io_req_bits_uop_lrs3; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_fp_val_0 = io_req_bits_uop_fp_val; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_fp_single_0 = io_req_bits_uop_fp_single; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if; // @[execution-unit.scala:437:7]
wire io_req_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc; // @[execution-unit.scala:437:7]
wire [1:0] io_req_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc; // @[execution-unit.scala:437:7]
wire [64:0] io_req_bits_rs1_data_0 = io_req_bits_rs1_data; // @[execution-unit.scala:437:7]
wire [64:0] io_req_bits_rs2_data_0 = io_req_bits_rs2_data; // @[execution-unit.scala:437:7]
wire [64:0] io_req_bits_rs3_data_0 = io_req_bits_rs3_data; // @[execution-unit.scala:437:7]
wire io_req_bits_kill_0 = io_req_bits_kill; // @[execution-unit.scala:437:7]
wire io_ll_iresp_ready_0 = io_ll_iresp_ready; // @[execution-unit.scala:437:7]
wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[execution-unit.scala:437:7]
wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[execution-unit.scala:437:7]
wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[execution-unit.scala:437:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[execution-unit.scala:437:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[execution-unit.scala:437:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[execution-unit.scala:437:7]
wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[execution-unit.scala:437:7]
wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[execution-unit.scala:437:7]
wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[execution-unit.scala:437:7]
wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[execution-unit.scala:437:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[execution-unit.scala:437:7]
wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[execution-unit.scala:437:7]
wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[execution-unit.scala:437:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[execution-unit.scala:437:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[execution-unit.scala:437:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[execution-unit.scala:437:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[execution-unit.scala:437:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[execution-unit.scala:437:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[execution-unit.scala:437:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[execution-unit.scala:437:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[execution-unit.scala:437:7]
wire io_status_debug_0 = io_status_debug; // @[execution-unit.scala:437:7]
wire io_status_cease_0 = io_status_cease; // @[execution-unit.scala:437:7]
wire io_status_wfi_0 = io_status_wfi; // @[execution-unit.scala:437:7]
wire [1:0] io_status_dprv_0 = io_status_dprv; // @[execution-unit.scala:437:7]
wire io_status_dv_0 = io_status_dv; // @[execution-unit.scala:437:7]
wire [1:0] io_status_prv_0 = io_status_prv; // @[execution-unit.scala:437:7]
wire io_status_v_0 = io_status_v; // @[execution-unit.scala:437:7]
wire io_status_sd_0 = io_status_sd; // @[execution-unit.scala:437:7]
wire io_status_mpv_0 = io_status_mpv; // @[execution-unit.scala:437:7]
wire io_status_gva_0 = io_status_gva; // @[execution-unit.scala:437:7]
wire io_status_tsr_0 = io_status_tsr; // @[execution-unit.scala:437:7]
wire io_status_tw_0 = io_status_tw; // @[execution-unit.scala:437:7]
wire io_status_tvm_0 = io_status_tvm; // @[execution-unit.scala:437:7]
wire io_status_mxr_0 = io_status_mxr; // @[execution-unit.scala:437:7]
wire io_status_sum_0 = io_status_sum; // @[execution-unit.scala:437:7]
wire io_status_mprv_0 = io_status_mprv; // @[execution-unit.scala:437:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[execution-unit.scala:437:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[execution-unit.scala:437:7]
wire io_status_spp_0 = io_status_spp; // @[execution-unit.scala:437:7]
wire io_status_mpie_0 = io_status_mpie; // @[execution-unit.scala:437:7]
wire io_status_spie_0 = io_status_spie; // @[execution-unit.scala:437:7]
wire io_status_mie_0 = io_status_mie; // @[execution-unit.scala:437:7]
wire io_status_sie_0 = io_status_sie; // @[execution-unit.scala:437:7]
wire [2:0] io_fcsr_rm_0 = io_fcsr_rm; // @[execution-unit.scala:437:7]
wire [31:0] io_status_isa = 32'h14112D; // @[execution-unit.scala:437:7]
wire [22:0] io_status_zero2 = 23'h0; // @[execution-unit.scala:437:7]
wire io_req_ready = 1'h0; // @[execution-unit.scala:437:7]
wire io_req_bits_pred_data = 1'h0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_predicated = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_mbe = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_sbe = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_sd_rv32 = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_ube = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_upie = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_hie = 1'h0; // @[execution-unit.scala:437:7]
wire io_status_uie = 1'h0; // @[execution-unit.scala:437:7]
wire [7:0] io_status_zero1 = 8'h0; // @[execution-unit.scala:437:7]
wire [1:0] io_status_xs = 2'h0; // @[execution-unit.scala:437:7]
wire [1:0] io_status_vs = 2'h0; // @[execution-unit.scala:437:7]
wire io_fresp_ready = 1'h1; // @[execution-unit.scala:437:7]
wire [1:0] io_status_sxl = 2'h2; // @[execution-unit.scala:437:7]
wire [1:0] io_status_uxl = 2'h2; // @[execution-unit.scala:437:7]
wire [9:0] _io_fu_types_T = 10'h40; // @[execution-unit.scala:467:21]
wire [9:0] _io_fu_types_T_8; // @[execution-unit.scala:468:60]
wire _io_fresp_valid_T_5; // @[execution-unit.scala:525:69]
wire [6:0] _io_fresp_bits_uop_T_uopc; // @[Mux.scala:50:70]
wire [31:0] _io_fresp_bits_uop_T_inst; // @[Mux.scala:50:70]
wire [31:0] _io_fresp_bits_uop_T_debug_inst; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_rvc; // @[Mux.scala:50:70]
wire [39:0] _io_fresp_bits_uop_T_debug_pc; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_iq_type; // @[Mux.scala:50:70]
wire [9:0] _io_fresp_bits_uop_T_fu_code; // @[Mux.scala:50:70]
wire [3:0] _io_fresp_bits_uop_T_ctrl_br_type; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_ctrl_op1_sel; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_ctrl_op2_sel; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_ctrl_imm_sel; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_ctrl_op_fcn; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_fcn_dw; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_ctrl_csr_cmd; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_is_load; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_is_sta; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ctrl_is_std; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_iw_state; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_iw_p1_poisoned; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_iw_p2_poisoned; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_br; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_jalr; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_jal; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_sfb; // @[Mux.scala:50:70]
wire [7:0] _io_fresp_bits_uop_T_br_mask; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_br_tag; // @[Mux.scala:50:70]
wire [3:0] _io_fresp_bits_uop_T_ftq_idx; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_edge_inst; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_pc_lob; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_taken; // @[Mux.scala:50:70]
wire [19:0] _io_fresp_bits_uop_T_imm_packed; // @[Mux.scala:50:70]
wire [11:0] _io_fresp_bits_uop_T_csr_addr; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_rob_idx; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_ldq_idx; // @[Mux.scala:50:70]
wire [2:0] _io_fresp_bits_uop_T_stq_idx; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_rxq_idx; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_pdst; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_prs1; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_prs2; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_prs3; // @[Mux.scala:50:70]
wire [3:0] _io_fresp_bits_uop_T_ppred; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_prs1_busy; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_prs2_busy; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_prs3_busy; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ppred_busy; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_stale_pdst; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_exception; // @[Mux.scala:50:70]
wire [63:0] _io_fresp_bits_uop_T_exc_cause; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_bypassable; // @[Mux.scala:50:70]
wire [4:0] _io_fresp_bits_uop_T_mem_cmd; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_mem_size; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_mem_signed; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_fence; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_fencei; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_amo; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_uses_ldq; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_uses_stq; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_sys_pc2epc; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_is_unique; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_flush_on_commit; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ldst_is_rs1; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_ldst; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_lrs1; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_lrs2; // @[Mux.scala:50:70]
wire [5:0] _io_fresp_bits_uop_T_lrs3; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_ldst_val; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_dst_rtype; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_lrs1_rtype; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_lrs2_rtype; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_frs3_en; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_fp_val; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_fp_single; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_xcpt_pf_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_xcpt_ae_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_xcpt_ma_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_bp_debug_if; // @[Mux.scala:50:70]
wire _io_fresp_bits_uop_T_bp_xcpt_if; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_debug_fsrc; // @[Mux.scala:50:70]
wire [1:0] _io_fresp_bits_uop_T_debug_tsrc; // @[Mux.scala:50:70]
wire [64:0] _io_fresp_bits_data_T; // @[Mux.scala:50:70]
wire _io_fresp_bits_fflags_T_valid; // @[execution-unit.scala:530:30]
wire [6:0] _io_fresp_bits_fflags_T_bits_uop_uopc; // @[execution-unit.scala:530:30]
wire [31:0] _io_fresp_bits_fflags_T_bits_uop_inst; // @[execution-unit.scala:530:30]
wire [31:0] _io_fresp_bits_fflags_T_bits_uop_debug_inst; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_rvc; // @[execution-unit.scala:530:30]
wire [39:0] _io_fresp_bits_fflags_T_bits_uop_debug_pc; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_iq_type; // @[execution-unit.scala:530:30]
wire [9:0] _io_fresp_bits_fflags_T_bits_uop_fu_code; // @[execution-unit.scala:530:30]
wire [3:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_br_type; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_is_load; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_is_sta; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ctrl_is_std; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_iw_state; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_br; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_jalr; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_jal; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_sfb; // @[execution-unit.scala:530:30]
wire [7:0] _io_fresp_bits_fflags_T_bits_uop_br_mask; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_br_tag; // @[execution-unit.scala:530:30]
wire [3:0] _io_fresp_bits_fflags_T_bits_uop_ftq_idx; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_edge_inst; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_pc_lob; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_taken; // @[execution-unit.scala:530:30]
wire [19:0] _io_fresp_bits_fflags_T_bits_uop_imm_packed; // @[execution-unit.scala:530:30]
wire [11:0] _io_fresp_bits_fflags_T_bits_uop_csr_addr; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_rob_idx; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_ldq_idx; // @[execution-unit.scala:530:30]
wire [2:0] _io_fresp_bits_fflags_T_bits_uop_stq_idx; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_rxq_idx; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_pdst; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_prs1; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_prs2; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_prs3; // @[execution-unit.scala:530:30]
wire [3:0] _io_fresp_bits_fflags_T_bits_uop_ppred; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_prs1_busy; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_prs2_busy; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_prs3_busy; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ppred_busy; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_stale_pdst; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_exception; // @[execution-unit.scala:530:30]
wire [63:0] _io_fresp_bits_fflags_T_bits_uop_exc_cause; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_bypassable; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_uop_mem_cmd; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_mem_size; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_mem_signed; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_fence; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_fencei; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_amo; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_uses_ldq; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_uses_stq; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_is_unique; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_flush_on_commit; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ldst_is_rs1; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_ldst; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_lrs1; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_lrs2; // @[execution-unit.scala:530:30]
wire [5:0] _io_fresp_bits_fflags_T_bits_uop_lrs3; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_ldst_val; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_dst_rtype; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_lrs1_rtype; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_lrs2_rtype; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_frs3_en; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_fp_val; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_fp_single; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_xcpt_pf_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_xcpt_ae_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_xcpt_ma_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_bp_debug_if; // @[execution-unit.scala:530:30]
wire _io_fresp_bits_fflags_T_bits_uop_bp_xcpt_if; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_debug_fsrc; // @[execution-unit.scala:530:30]
wire [1:0] _io_fresp_bits_fflags_T_bits_uop_debug_tsrc; // @[execution-unit.scala:530:30]
wire [4:0] _io_fresp_bits_fflags_T_bits_flags; // @[execution-unit.scala:530:30]
wire [3:0] io_fresp_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_fresp_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_fresp_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [7:0] io_fresp_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_fresp_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_fresp_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_fresp_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_fflags_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_fresp_bits_fflags_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_fflags_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_fresp_bits_fflags_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_fresp_bits_fflags_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_fresp_bits_fflags_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [7:0] io_fresp_bits_fflags_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_fflags_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_fresp_bits_fflags_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_fresp_bits_fflags_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_fresp_bits_fflags_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [3:0] io_fresp_bits_fflags_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_fresp_bits_fflags_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_fresp_bits_fflags_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_fresp_bits_fflags_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [4:0] io_fresp_bits_fflags_bits_flags_0; // @[execution-unit.scala:437:7]
wire io_fresp_bits_fflags_valid_0; // @[execution-unit.scala:437:7]
wire [64:0] io_fresp_bits_data_0; // @[execution-unit.scala:437:7]
wire io_fresp_valid_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_ll_iresp_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_ll_iresp_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [7:0] io_ll_iresp_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_ll_iresp_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_ll_iresp_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_ll_iresp_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_br_type_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_is_load_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_is_sta_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ctrl_is_std_0; // @[execution-unit.scala:437:7]
wire [6:0] io_ll_iresp_bits_fflags_bits_uop_uopc_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_fflags_bits_uop_inst_0; // @[execution-unit.scala:437:7]
wire [31:0] io_ll_iresp_bits_fflags_bits_uop_debug_inst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_rvc_0; // @[execution-unit.scala:437:7]
wire [39:0] io_ll_iresp_bits_fflags_bits_uop_debug_pc_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_iq_type_0; // @[execution-unit.scala:437:7]
wire [9:0] io_ll_iresp_bits_fflags_bits_uop_fu_code_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_iw_state_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_br_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_jalr_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_jal_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_sfb_0; // @[execution-unit.scala:437:7]
wire [7:0] io_ll_iresp_bits_fflags_bits_uop_br_mask_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_br_tag_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_fflags_bits_uop_ftq_idx_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_edge_inst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_pc_lob_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_taken_0; // @[execution-unit.scala:437:7]
wire [19:0] io_ll_iresp_bits_fflags_bits_uop_imm_packed_0; // @[execution-unit.scala:437:7]
wire [11:0] io_ll_iresp_bits_fflags_bits_uop_csr_addr_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_rob_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_ldq_idx_0; // @[execution-unit.scala:437:7]
wire [2:0] io_ll_iresp_bits_fflags_bits_uop_stq_idx_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_rxq_idx_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_pdst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_prs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_prs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_prs3_0; // @[execution-unit.scala:437:7]
wire [3:0] io_ll_iresp_bits_fflags_bits_uop_ppred_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_prs1_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_prs2_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_prs3_busy_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ppred_busy_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_stale_pdst_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_exception_0; // @[execution-unit.scala:437:7]
wire [63:0] io_ll_iresp_bits_fflags_bits_uop_exc_cause_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_bypassable_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_uop_mem_cmd_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_mem_size_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_mem_signed_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_fence_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_fencei_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_amo_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_uses_ldq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_uses_stq_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_is_unique_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_flush_on_commit_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ldst_is_rs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_ldst_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs1_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs2_0; // @[execution-unit.scala:437:7]
wire [5:0] io_ll_iresp_bits_fflags_bits_uop_lrs3_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_ldst_val_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_dst_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs1_rtype_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_lrs2_rtype_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_frs3_en_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_fp_val_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_fp_single_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_xcpt_pf_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_xcpt_ae_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_xcpt_ma_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_bp_debug_if_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_bits_uop_bp_xcpt_if_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_fsrc_0; // @[execution-unit.scala:437:7]
wire [1:0] io_ll_iresp_bits_fflags_bits_uop_debug_tsrc_0; // @[execution-unit.scala:437:7]
wire [4:0] io_ll_iresp_bits_fflags_bits_flags_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_fflags_valid_0; // @[execution-unit.scala:437:7]
wire [64:0] io_ll_iresp_bits_data_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_bits_predicated_0; // @[execution-unit.scala:437:7]
wire io_ll_iresp_valid_0; // @[execution-unit.scala:437:7]
wire [9:0] io_fu_types_0; // @[execution-unit.scala:437:7]
wire _fdiv_busy_T_4; // @[execution-unit.scala:516:41]
wire fdiv_busy; // @[execution-unit.scala:461:27]
wire _fpiu_busy_T_1; // @[execution-unit.scala:568:18]
wire fpiu_busy; // @[execution-unit.scala:462:27]
wire _io_fu_types_T_1 = ~fdiv_busy; // @[execution-unit.scala:461:27, :468:22]
wire _io_fu_types_T_2 = _io_fu_types_T_1; // @[execution-unit.scala:468:{22,33}]
wire [9:0] _io_fu_types_T_3 = {2'h0, _io_fu_types_T_2, 7'h0}; // @[execution-unit.scala:468:{21,33}]
wire [9:0] _io_fu_types_T_4 = _io_fu_types_T_3 | 10'h40; // @[execution-unit.scala:467:45, :468:21]
wire _io_fu_types_T_5 = ~fpiu_busy; // @[execution-unit.scala:462:27, :469:22]
wire _io_fu_types_T_6 = _io_fu_types_T_5; // @[execution-unit.scala:469:{22,33}]
wire [9:0] _io_fu_types_T_7 = {_io_fu_types_T_6, 9'h0}; // @[execution-unit.scala:469:{21,33}]
assign _io_fu_types_T_8 = _io_fu_types_T_4 | _io_fu_types_T_7; // @[execution-unit.scala:467:45, :468:60, :469:21]
assign io_fu_types_0 = _io_fu_types_T_8; // @[execution-unit.scala:437:7, :468:60]
wire fpu_resp_val; // @[execution-unit.scala:473:30]
wire [3:0] fpu_resp_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:474:29]
wire [6:0] fpu_resp_fflags_bits_uop_uopc; // @[execution-unit.scala:474:29]
wire [31:0] fpu_resp_fflags_bits_uop_inst; // @[execution-unit.scala:474:29]
wire [31:0] fpu_resp_fflags_bits_uop_debug_inst; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_rvc; // @[execution-unit.scala:474:29]
wire [39:0] fpu_resp_fflags_bits_uop_debug_pc; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_iq_type; // @[execution-unit.scala:474:29]
wire [9:0] fpu_resp_fflags_bits_uop_fu_code; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_iw_state; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_br; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_jalr; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_jal; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_sfb; // @[execution-unit.scala:474:29]
wire [7:0] fpu_resp_fflags_bits_uop_br_mask; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_br_tag; // @[execution-unit.scala:474:29]
wire [3:0] fpu_resp_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_edge_inst; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_pc_lob; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_taken; // @[execution-unit.scala:474:29]
wire [19:0] fpu_resp_fflags_bits_uop_imm_packed; // @[execution-unit.scala:474:29]
wire [11:0] fpu_resp_fflags_bits_uop_csr_addr; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_rob_idx; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:474:29]
wire [2:0] fpu_resp_fflags_bits_uop_stq_idx; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_pdst; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_prs1; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_prs2; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_prs3; // @[execution-unit.scala:474:29]
wire [3:0] fpu_resp_fflags_bits_uop_ppred; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_exception; // @[execution-unit.scala:474:29]
wire [63:0] fpu_resp_fflags_bits_uop_exc_cause; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_bypassable; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_mem_size; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_mem_signed; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_fence; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_fencei; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_amo; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_uses_stq; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_is_unique; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_ldst; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_lrs1; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_lrs2; // @[execution-unit.scala:474:29]
wire [5:0] fpu_resp_fflags_bits_uop_lrs3; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_ldst_val; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_frs3_en; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_fp_val; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_fp_single; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:474:29]
wire [1:0] fpu_resp_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:474:29]
wire [4:0] fpu_resp_fflags_bits_flags; // @[execution-unit.scala:474:29]
wire fpu_resp_fflags_valid; // @[execution-unit.scala:474:29]
wire [3:0] fdiv_resp_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:498:30]
wire [6:0] fdiv_resp_fflags_bits_uop_uopc; // @[execution-unit.scala:498:30]
wire [31:0] fdiv_resp_fflags_bits_uop_inst; // @[execution-unit.scala:498:30]
wire [31:0] fdiv_resp_fflags_bits_uop_debug_inst; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_rvc; // @[execution-unit.scala:498:30]
wire [39:0] fdiv_resp_fflags_bits_uop_debug_pc; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_iq_type; // @[execution-unit.scala:498:30]
wire [9:0] fdiv_resp_fflags_bits_uop_fu_code; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_iw_state; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_br; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_jalr; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_jal; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_sfb; // @[execution-unit.scala:498:30]
wire [7:0] fdiv_resp_fflags_bits_uop_br_mask; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_br_tag; // @[execution-unit.scala:498:30]
wire [3:0] fdiv_resp_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_edge_inst; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_pc_lob; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_taken; // @[execution-unit.scala:498:30]
wire [19:0] fdiv_resp_fflags_bits_uop_imm_packed; // @[execution-unit.scala:498:30]
wire [11:0] fdiv_resp_fflags_bits_uop_csr_addr; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_rob_idx; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:498:30]
wire [2:0] fdiv_resp_fflags_bits_uop_stq_idx; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_pdst; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_prs1; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_prs2; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_prs3; // @[execution-unit.scala:498:30]
wire [3:0] fdiv_resp_fflags_bits_uop_ppred; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_exception; // @[execution-unit.scala:498:30]
wire [63:0] fdiv_resp_fflags_bits_uop_exc_cause; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_bypassable; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_mem_size; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_mem_signed; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_fence; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_fencei; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_amo; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_uses_stq; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_is_unique; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_ldst; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_lrs1; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_lrs2; // @[execution-unit.scala:498:30]
wire [5:0] fdiv_resp_fflags_bits_uop_lrs3; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_ldst_val; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_frs3_en; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_fp_val; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_fp_single; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:498:30]
wire [1:0] fdiv_resp_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:498:30]
wire [4:0] fdiv_resp_fflags_bits_flags; // @[execution-unit.scala:498:30]
wire fdiv_resp_fflags_valid; // @[execution-unit.scala:498:30]
wire [9:0] _fdiv_busy_T_1 = io_req_bits_uop_fu_code_0 & 10'h80; // @[execution-unit.scala:437:7]
wire _fdiv_busy_T = ~_FDivSqrtUnit_io_req_ready; // @[execution-unit.scala:502:22, :516:18]
wire _fdiv_busy_T_2 = |_fdiv_busy_T_1; // @[micro-op.scala:154:{40,47}]
wire _fdiv_busy_T_3 = io_req_valid_0 & _fdiv_busy_T_2; // @[execution-unit.scala:437:7, :516:58]
assign _fdiv_busy_T_4 = _fdiv_busy_T | _fdiv_busy_T_3; // @[execution-unit.scala:516:{18,41,58}]
assign fdiv_busy = _fdiv_busy_T_4; // @[execution-unit.scala:461:27, :516:41]
wire _io_fresp_valid_T = _FPUUnit_io_resp_valid | _FDivSqrtUnit_io_resp_valid; // @[execution-unit.scala:477:17, :502:22, :525:65]
wire [9:0] _GEN = _FPUUnit_io_resp_bits_uop_fu_code & 10'h200; // @[execution-unit.scala:477:17]
wire [9:0] _io_fresp_valid_T_1; // @[micro-op.scala:154:40]
assign _io_fresp_valid_T_1 = _GEN; // @[micro-op.scala:154:40]
wire [9:0] _queue_io_enq_valid_T; // @[micro-op.scala:154:40]
assign _queue_io_enq_valid_T = _GEN; // @[micro-op.scala:154:40]
wire _io_fresp_valid_T_2 = |_io_fresp_valid_T_1; // @[micro-op.scala:154:{40,47}]
wire _io_fresp_valid_T_3 = _FPUUnit_io_resp_valid & _io_fresp_valid_T_2; // @[execution-unit.scala:477:17, :526:47]
wire _io_fresp_valid_T_4 = ~_io_fresp_valid_T_3; // @[execution-unit.scala:526:{27,47}]
assign _io_fresp_valid_T_5 = _io_fresp_valid_T & _io_fresp_valid_T_4; // @[execution-unit.scala:525:{65,69}, :526:27]
assign io_fresp_valid_0 = _io_fresp_valid_T_5; // @[execution-unit.scala:437:7, :525:69]
assign _io_fresp_bits_uop_T_uopc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_uopc : _FDivSqrtUnit_io_resp_bits_uop_uopc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_inst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_inst : _FDivSqrtUnit_io_resp_bits_uop_inst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_inst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_inst : _FDivSqrtUnit_io_resp_bits_uop_debug_inst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_rvc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_rvc : _FDivSqrtUnit_io_resp_bits_uop_is_rvc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_pc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_pc : _FDivSqrtUnit_io_resp_bits_uop_debug_pc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iq_type = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iq_type : _FDivSqrtUnit_io_resp_bits_uop_iq_type; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_fu_code = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_fu_code : _FDivSqrtUnit_io_resp_bits_uop_fu_code; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_br_type = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_br_type : _FDivSqrtUnit_io_resp_bits_uop_ctrl_br_type; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_op1_sel = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_op1_sel : _FDivSqrtUnit_io_resp_bits_uop_ctrl_op1_sel; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_op2_sel = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_op2_sel : _FDivSqrtUnit_io_resp_bits_uop_ctrl_op2_sel; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_imm_sel = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_imm_sel : _FDivSqrtUnit_io_resp_bits_uop_ctrl_imm_sel; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_op_fcn = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_op_fcn : _FDivSqrtUnit_io_resp_bits_uop_ctrl_op_fcn; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_fcn_dw = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_fcn_dw : _FDivSqrtUnit_io_resp_bits_uop_ctrl_fcn_dw; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_csr_cmd = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_csr_cmd : _FDivSqrtUnit_io_resp_bits_uop_ctrl_csr_cmd; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_is_load = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_is_load : _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_load; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_is_sta = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_is_sta : _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_sta; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ctrl_is_std = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ctrl_is_std : _FDivSqrtUnit_io_resp_bits_uop_ctrl_is_std; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iw_state = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iw_state : _FDivSqrtUnit_io_resp_bits_uop_iw_state; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iw_p1_poisoned = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iw_p1_poisoned : _FDivSqrtUnit_io_resp_bits_uop_iw_p1_poisoned; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_iw_p2_poisoned = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_iw_p2_poisoned : _FDivSqrtUnit_io_resp_bits_uop_iw_p2_poisoned; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_br = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_br : _FDivSqrtUnit_io_resp_bits_uop_is_br; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_jalr = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_jalr : _FDivSqrtUnit_io_resp_bits_uop_is_jalr; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_jal = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_jal : _FDivSqrtUnit_io_resp_bits_uop_is_jal; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_sfb = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_sfb : _FDivSqrtUnit_io_resp_bits_uop_is_sfb; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_br_mask = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_br_mask : _FDivSqrtUnit_io_resp_bits_uop_br_mask; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_br_tag = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_br_tag : _FDivSqrtUnit_io_resp_bits_uop_br_tag; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ftq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ftq_idx : _FDivSqrtUnit_io_resp_bits_uop_ftq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_edge_inst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_edge_inst : _FDivSqrtUnit_io_resp_bits_uop_edge_inst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_pc_lob = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_pc_lob : _FDivSqrtUnit_io_resp_bits_uop_pc_lob; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_taken = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_taken : _FDivSqrtUnit_io_resp_bits_uop_taken; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_imm_packed = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_imm_packed : _FDivSqrtUnit_io_resp_bits_uop_imm_packed; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_csr_addr = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_csr_addr : _FDivSqrtUnit_io_resp_bits_uop_csr_addr; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_rob_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_rob_idx : _FDivSqrtUnit_io_resp_bits_uop_rob_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldq_idx : _FDivSqrtUnit_io_resp_bits_uop_ldq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_stq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_stq_idx : _FDivSqrtUnit_io_resp_bits_uop_stq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_rxq_idx = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_rxq_idx : _FDivSqrtUnit_io_resp_bits_uop_rxq_idx; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_pdst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_pdst : _FDivSqrtUnit_io_resp_bits_uop_pdst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs1 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs1 : _FDivSqrtUnit_io_resp_bits_uop_prs1; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs2 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs2 : _FDivSqrtUnit_io_resp_bits_uop_prs2; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs3 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs3 : _FDivSqrtUnit_io_resp_bits_uop_prs3; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ppred = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ppred : _FDivSqrtUnit_io_resp_bits_uop_ppred; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs1_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs1_busy : _FDivSqrtUnit_io_resp_bits_uop_prs1_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs2_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs2_busy : _FDivSqrtUnit_io_resp_bits_uop_prs2_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_prs3_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_prs3_busy : _FDivSqrtUnit_io_resp_bits_uop_prs3_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ppred_busy = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ppred_busy : _FDivSqrtUnit_io_resp_bits_uop_ppred_busy; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_stale_pdst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_stale_pdst : _FDivSqrtUnit_io_resp_bits_uop_stale_pdst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_exception = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_exception : _FDivSqrtUnit_io_resp_bits_uop_exception; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_exc_cause = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_exc_cause : _FDivSqrtUnit_io_resp_bits_uop_exc_cause; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_bypassable = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_bypassable : _FDivSqrtUnit_io_resp_bits_uop_bypassable; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_mem_cmd = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_mem_cmd : _FDivSqrtUnit_io_resp_bits_uop_mem_cmd; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_mem_size = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_mem_size : _FDivSqrtUnit_io_resp_bits_uop_mem_size; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_mem_signed = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_mem_signed : _FDivSqrtUnit_io_resp_bits_uop_mem_signed; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_fence = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_fence : _FDivSqrtUnit_io_resp_bits_uop_is_fence; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_fencei = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_fencei : _FDivSqrtUnit_io_resp_bits_uop_is_fencei; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_amo = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_amo : _FDivSqrtUnit_io_resp_bits_uop_is_amo; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_uses_ldq = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_uses_ldq : _FDivSqrtUnit_io_resp_bits_uop_uses_ldq; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_uses_stq = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_uses_stq : _FDivSqrtUnit_io_resp_bits_uop_uses_stq; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_sys_pc2epc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_sys_pc2epc : _FDivSqrtUnit_io_resp_bits_uop_is_sys_pc2epc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_is_unique = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_is_unique : _FDivSqrtUnit_io_resp_bits_uop_is_unique; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_flush_on_commit = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_flush_on_commit : _FDivSqrtUnit_io_resp_bits_uop_flush_on_commit; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldst_is_rs1 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldst_is_rs1 : _FDivSqrtUnit_io_resp_bits_uop_ldst_is_rs1; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldst = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldst : _FDivSqrtUnit_io_resp_bits_uop_ldst; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs1 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs1 : _FDivSqrtUnit_io_resp_bits_uop_lrs1; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs2 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs2 : _FDivSqrtUnit_io_resp_bits_uop_lrs2; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs3 = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs3 : _FDivSqrtUnit_io_resp_bits_uop_lrs3; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_ldst_val = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_ldst_val : _FDivSqrtUnit_io_resp_bits_uop_ldst_val; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_dst_rtype = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_dst_rtype : _FDivSqrtUnit_io_resp_bits_uop_dst_rtype; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs1_rtype = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs1_rtype : _FDivSqrtUnit_io_resp_bits_uop_lrs1_rtype; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_lrs2_rtype = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_lrs2_rtype : _FDivSqrtUnit_io_resp_bits_uop_lrs2_rtype; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_frs3_en = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_frs3_en : _FDivSqrtUnit_io_resp_bits_uop_frs3_en; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_fp_val = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_fp_val : _FDivSqrtUnit_io_resp_bits_uop_fp_val; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_fp_single = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_fp_single : _FDivSqrtUnit_io_resp_bits_uop_fp_single; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_xcpt_pf_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_xcpt_pf_if : _FDivSqrtUnit_io_resp_bits_uop_xcpt_pf_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_xcpt_ae_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_xcpt_ae_if : _FDivSqrtUnit_io_resp_bits_uop_xcpt_ae_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_xcpt_ma_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_xcpt_ma_if : _FDivSqrtUnit_io_resp_bits_uop_xcpt_ma_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_bp_debug_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_bp_debug_if : _FDivSqrtUnit_io_resp_bits_uop_bp_debug_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_bp_xcpt_if = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_bp_xcpt_if : _FDivSqrtUnit_io_resp_bits_uop_bp_xcpt_if; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_fsrc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_fsrc : _FDivSqrtUnit_io_resp_bits_uop_debug_fsrc; // @[Mux.scala:50:70]
assign _io_fresp_bits_uop_T_debug_tsrc = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_uop_debug_tsrc : _FDivSqrtUnit_io_resp_bits_uop_debug_tsrc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_uopc_0 = _io_fresp_bits_uop_T_uopc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_inst_0 = _io_fresp_bits_uop_T_inst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_inst_0 = _io_fresp_bits_uop_T_debug_inst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_rvc_0 = _io_fresp_bits_uop_T_is_rvc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_pc_0 = _io_fresp_bits_uop_T_debug_pc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iq_type_0 = _io_fresp_bits_uop_T_iq_type; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_fu_code_0 = _io_fresp_bits_uop_T_fu_code; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_br_type_0 = _io_fresp_bits_uop_T_ctrl_br_type; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_op1_sel_0 = _io_fresp_bits_uop_T_ctrl_op1_sel; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_op2_sel_0 = _io_fresp_bits_uop_T_ctrl_op2_sel; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_imm_sel_0 = _io_fresp_bits_uop_T_ctrl_imm_sel; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_op_fcn_0 = _io_fresp_bits_uop_T_ctrl_op_fcn; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_fcn_dw_0 = _io_fresp_bits_uop_T_ctrl_fcn_dw; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_csr_cmd_0 = _io_fresp_bits_uop_T_ctrl_csr_cmd; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_is_load_0 = _io_fresp_bits_uop_T_ctrl_is_load; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_is_sta_0 = _io_fresp_bits_uop_T_ctrl_is_sta; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ctrl_is_std_0 = _io_fresp_bits_uop_T_ctrl_is_std; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iw_state_0 = _io_fresp_bits_uop_T_iw_state; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iw_p1_poisoned_0 = _io_fresp_bits_uop_T_iw_p1_poisoned; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_iw_p2_poisoned_0 = _io_fresp_bits_uop_T_iw_p2_poisoned; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_br_0 = _io_fresp_bits_uop_T_is_br; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_jalr_0 = _io_fresp_bits_uop_T_is_jalr; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_jal_0 = _io_fresp_bits_uop_T_is_jal; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_sfb_0 = _io_fresp_bits_uop_T_is_sfb; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_br_mask_0 = _io_fresp_bits_uop_T_br_mask; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_br_tag_0 = _io_fresp_bits_uop_T_br_tag; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ftq_idx_0 = _io_fresp_bits_uop_T_ftq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_edge_inst_0 = _io_fresp_bits_uop_T_edge_inst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_pc_lob_0 = _io_fresp_bits_uop_T_pc_lob; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_taken_0 = _io_fresp_bits_uop_T_taken; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_imm_packed_0 = _io_fresp_bits_uop_T_imm_packed; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_csr_addr_0 = _io_fresp_bits_uop_T_csr_addr; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_rob_idx_0 = _io_fresp_bits_uop_T_rob_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldq_idx_0 = _io_fresp_bits_uop_T_ldq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_stq_idx_0 = _io_fresp_bits_uop_T_stq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_rxq_idx_0 = _io_fresp_bits_uop_T_rxq_idx; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_pdst_0 = _io_fresp_bits_uop_T_pdst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs1_0 = _io_fresp_bits_uop_T_prs1; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs2_0 = _io_fresp_bits_uop_T_prs2; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs3_0 = _io_fresp_bits_uop_T_prs3; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ppred_0 = _io_fresp_bits_uop_T_ppred; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs1_busy_0 = _io_fresp_bits_uop_T_prs1_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs2_busy_0 = _io_fresp_bits_uop_T_prs2_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_prs3_busy_0 = _io_fresp_bits_uop_T_prs3_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ppred_busy_0 = _io_fresp_bits_uop_T_ppred_busy; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_stale_pdst_0 = _io_fresp_bits_uop_T_stale_pdst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_exception_0 = _io_fresp_bits_uop_T_exception; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_exc_cause_0 = _io_fresp_bits_uop_T_exc_cause; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_bypassable_0 = _io_fresp_bits_uop_T_bypassable; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_mem_cmd_0 = _io_fresp_bits_uop_T_mem_cmd; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_mem_size_0 = _io_fresp_bits_uop_T_mem_size; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_mem_signed_0 = _io_fresp_bits_uop_T_mem_signed; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_fence_0 = _io_fresp_bits_uop_T_is_fence; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_fencei_0 = _io_fresp_bits_uop_T_is_fencei; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_amo_0 = _io_fresp_bits_uop_T_is_amo; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_uses_ldq_0 = _io_fresp_bits_uop_T_uses_ldq; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_uses_stq_0 = _io_fresp_bits_uop_T_uses_stq; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_sys_pc2epc_0 = _io_fresp_bits_uop_T_is_sys_pc2epc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_is_unique_0 = _io_fresp_bits_uop_T_is_unique; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_flush_on_commit_0 = _io_fresp_bits_uop_T_flush_on_commit; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldst_is_rs1_0 = _io_fresp_bits_uop_T_ldst_is_rs1; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldst_0 = _io_fresp_bits_uop_T_ldst; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs1_0 = _io_fresp_bits_uop_T_lrs1; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs2_0 = _io_fresp_bits_uop_T_lrs2; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs3_0 = _io_fresp_bits_uop_T_lrs3; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_ldst_val_0 = _io_fresp_bits_uop_T_ldst_val; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_dst_rtype_0 = _io_fresp_bits_uop_T_dst_rtype; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs1_rtype_0 = _io_fresp_bits_uop_T_lrs1_rtype; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_lrs2_rtype_0 = _io_fresp_bits_uop_T_lrs2_rtype; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_frs3_en_0 = _io_fresp_bits_uop_T_frs3_en; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_fp_val_0 = _io_fresp_bits_uop_T_fp_val; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_fp_single_0 = _io_fresp_bits_uop_T_fp_single; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_xcpt_pf_if_0 = _io_fresp_bits_uop_T_xcpt_pf_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_xcpt_ae_if_0 = _io_fresp_bits_uop_T_xcpt_ae_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_xcpt_ma_if_0 = _io_fresp_bits_uop_T_xcpt_ma_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_bp_debug_if_0 = _io_fresp_bits_uop_T_bp_debug_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_bp_xcpt_if_0 = _io_fresp_bits_uop_T_bp_xcpt_if; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_fsrc_0 = _io_fresp_bits_uop_T_debug_fsrc; // @[Mux.scala:50:70]
assign io_fresp_bits_uop_debug_tsrc_0 = _io_fresp_bits_uop_T_debug_tsrc; // @[Mux.scala:50:70]
assign _io_fresp_bits_data_T = _FPUUnit_io_resp_valid ? _FPUUnit_io_resp_bits_data : _FDivSqrtUnit_io_resp_bits_data; // @[Mux.scala:50:70]
assign io_fresp_bits_data_0 = _io_fresp_bits_data_T; // @[Mux.scala:50:70]
assign _io_fresp_bits_fflags_T_valid = fpu_resp_val ? fpu_resp_fflags_valid : fdiv_resp_fflags_valid; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_uopc = fpu_resp_val ? fpu_resp_fflags_bits_uop_uopc : fdiv_resp_fflags_bits_uop_uopc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_inst = fpu_resp_val ? fpu_resp_fflags_bits_uop_inst : fdiv_resp_fflags_bits_uop_inst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_inst = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_inst : fdiv_resp_fflags_bits_uop_debug_inst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_rvc = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_rvc : fdiv_resp_fflags_bits_uop_is_rvc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_pc = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_pc : fdiv_resp_fflags_bits_uop_debug_pc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iq_type = fpu_resp_val ? fpu_resp_fflags_bits_uop_iq_type : fdiv_resp_fflags_bits_uop_iq_type; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_fu_code = fpu_resp_val ? fpu_resp_fflags_bits_uop_fu_code : fdiv_resp_fflags_bits_uop_fu_code; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_br_type = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_br_type : fdiv_resp_fflags_bits_uop_ctrl_br_type; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_op1_sel = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_op1_sel : fdiv_resp_fflags_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_op2_sel = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_op2_sel : fdiv_resp_fflags_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_imm_sel = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_imm_sel : fdiv_resp_fflags_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_op_fcn = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_op_fcn : fdiv_resp_fflags_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_fcn_dw = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_fcn_dw : fdiv_resp_fflags_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_csr_cmd = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_csr_cmd : fdiv_resp_fflags_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_is_load = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_is_load : fdiv_resp_fflags_bits_uop_ctrl_is_load; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_is_sta = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_is_sta : fdiv_resp_fflags_bits_uop_ctrl_is_sta; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ctrl_is_std = fpu_resp_val ? fpu_resp_fflags_bits_uop_ctrl_is_std : fdiv_resp_fflags_bits_uop_ctrl_is_std; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iw_state = fpu_resp_val ? fpu_resp_fflags_bits_uop_iw_state : fdiv_resp_fflags_bits_uop_iw_state; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iw_p1_poisoned = fpu_resp_val ? fpu_resp_fflags_bits_uop_iw_p1_poisoned : fdiv_resp_fflags_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_iw_p2_poisoned = fpu_resp_val ? fpu_resp_fflags_bits_uop_iw_p2_poisoned : fdiv_resp_fflags_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_br = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_br : fdiv_resp_fflags_bits_uop_is_br; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_jalr = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_jalr : fdiv_resp_fflags_bits_uop_is_jalr; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_jal = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_jal : fdiv_resp_fflags_bits_uop_is_jal; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_sfb = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_sfb : fdiv_resp_fflags_bits_uop_is_sfb; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_br_mask = fpu_resp_val ? fpu_resp_fflags_bits_uop_br_mask : fdiv_resp_fflags_bits_uop_br_mask; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_br_tag = fpu_resp_val ? fpu_resp_fflags_bits_uop_br_tag : fdiv_resp_fflags_bits_uop_br_tag; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ftq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_ftq_idx : fdiv_resp_fflags_bits_uop_ftq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_edge_inst = fpu_resp_val ? fpu_resp_fflags_bits_uop_edge_inst : fdiv_resp_fflags_bits_uop_edge_inst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_pc_lob = fpu_resp_val ? fpu_resp_fflags_bits_uop_pc_lob : fdiv_resp_fflags_bits_uop_pc_lob; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_taken = fpu_resp_val ? fpu_resp_fflags_bits_uop_taken : fdiv_resp_fflags_bits_uop_taken; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_imm_packed = fpu_resp_val ? fpu_resp_fflags_bits_uop_imm_packed : fdiv_resp_fflags_bits_uop_imm_packed; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_csr_addr = fpu_resp_val ? fpu_resp_fflags_bits_uop_csr_addr : fdiv_resp_fflags_bits_uop_csr_addr; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_rob_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_rob_idx : fdiv_resp_fflags_bits_uop_rob_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldq_idx : fdiv_resp_fflags_bits_uop_ldq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_stq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_stq_idx : fdiv_resp_fflags_bits_uop_stq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_rxq_idx = fpu_resp_val ? fpu_resp_fflags_bits_uop_rxq_idx : fdiv_resp_fflags_bits_uop_rxq_idx; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_pdst = fpu_resp_val ? fpu_resp_fflags_bits_uop_pdst : fdiv_resp_fflags_bits_uop_pdst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs1 = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs1 : fdiv_resp_fflags_bits_uop_prs1; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs2 = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs2 : fdiv_resp_fflags_bits_uop_prs2; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs3 = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs3 : fdiv_resp_fflags_bits_uop_prs3; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ppred = fpu_resp_val ? fpu_resp_fflags_bits_uop_ppred : fdiv_resp_fflags_bits_uop_ppred; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs1_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs1_busy : fdiv_resp_fflags_bits_uop_prs1_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs2_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs2_busy : fdiv_resp_fflags_bits_uop_prs2_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_prs3_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_prs3_busy : fdiv_resp_fflags_bits_uop_prs3_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ppred_busy = fpu_resp_val ? fpu_resp_fflags_bits_uop_ppred_busy : fdiv_resp_fflags_bits_uop_ppred_busy; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_stale_pdst = fpu_resp_val ? fpu_resp_fflags_bits_uop_stale_pdst : fdiv_resp_fflags_bits_uop_stale_pdst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_exception = fpu_resp_val ? fpu_resp_fflags_bits_uop_exception : fdiv_resp_fflags_bits_uop_exception; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_exc_cause = fpu_resp_val ? fpu_resp_fflags_bits_uop_exc_cause : fdiv_resp_fflags_bits_uop_exc_cause; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_bypassable = fpu_resp_val ? fpu_resp_fflags_bits_uop_bypassable : fdiv_resp_fflags_bits_uop_bypassable; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_mem_cmd = fpu_resp_val ? fpu_resp_fflags_bits_uop_mem_cmd : fdiv_resp_fflags_bits_uop_mem_cmd; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_mem_size = fpu_resp_val ? fpu_resp_fflags_bits_uop_mem_size : fdiv_resp_fflags_bits_uop_mem_size; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_mem_signed = fpu_resp_val ? fpu_resp_fflags_bits_uop_mem_signed : fdiv_resp_fflags_bits_uop_mem_signed; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_fence = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_fence : fdiv_resp_fflags_bits_uop_is_fence; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_fencei = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_fencei : fdiv_resp_fflags_bits_uop_is_fencei; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_amo = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_amo : fdiv_resp_fflags_bits_uop_is_amo; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_uses_ldq = fpu_resp_val ? fpu_resp_fflags_bits_uop_uses_ldq : fdiv_resp_fflags_bits_uop_uses_ldq; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_uses_stq = fpu_resp_val ? fpu_resp_fflags_bits_uop_uses_stq : fdiv_resp_fflags_bits_uop_uses_stq; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_sys_pc2epc = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_sys_pc2epc : fdiv_resp_fflags_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_is_unique = fpu_resp_val ? fpu_resp_fflags_bits_uop_is_unique : fdiv_resp_fflags_bits_uop_is_unique; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_flush_on_commit = fpu_resp_val ? fpu_resp_fflags_bits_uop_flush_on_commit : fdiv_resp_fflags_bits_uop_flush_on_commit; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldst_is_rs1 = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldst_is_rs1 : fdiv_resp_fflags_bits_uop_ldst_is_rs1; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldst = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldst : fdiv_resp_fflags_bits_uop_ldst; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs1 = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs1 : fdiv_resp_fflags_bits_uop_lrs1; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs2 = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs2 : fdiv_resp_fflags_bits_uop_lrs2; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs3 = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs3 : fdiv_resp_fflags_bits_uop_lrs3; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_ldst_val = fpu_resp_val ? fpu_resp_fflags_bits_uop_ldst_val : fdiv_resp_fflags_bits_uop_ldst_val; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_dst_rtype = fpu_resp_val ? fpu_resp_fflags_bits_uop_dst_rtype : fdiv_resp_fflags_bits_uop_dst_rtype; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs1_rtype = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs1_rtype : fdiv_resp_fflags_bits_uop_lrs1_rtype; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_lrs2_rtype = fpu_resp_val ? fpu_resp_fflags_bits_uop_lrs2_rtype : fdiv_resp_fflags_bits_uop_lrs2_rtype; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_frs3_en = fpu_resp_val ? fpu_resp_fflags_bits_uop_frs3_en : fdiv_resp_fflags_bits_uop_frs3_en; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_fp_val = fpu_resp_val ? fpu_resp_fflags_bits_uop_fp_val : fdiv_resp_fflags_bits_uop_fp_val; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_fp_single = fpu_resp_val ? fpu_resp_fflags_bits_uop_fp_single : fdiv_resp_fflags_bits_uop_fp_single; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_xcpt_pf_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_xcpt_pf_if : fdiv_resp_fflags_bits_uop_xcpt_pf_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_xcpt_ae_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_xcpt_ae_if : fdiv_resp_fflags_bits_uop_xcpt_ae_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_xcpt_ma_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_xcpt_ma_if : fdiv_resp_fflags_bits_uop_xcpt_ma_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_bp_debug_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_bp_debug_if : fdiv_resp_fflags_bits_uop_bp_debug_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_bp_xcpt_if = fpu_resp_val ? fpu_resp_fflags_bits_uop_bp_xcpt_if : fdiv_resp_fflags_bits_uop_bp_xcpt_if; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_fsrc = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_fsrc : fdiv_resp_fflags_bits_uop_debug_fsrc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_uop_debug_tsrc = fpu_resp_val ? fpu_resp_fflags_bits_uop_debug_tsrc : fdiv_resp_fflags_bits_uop_debug_tsrc; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign _io_fresp_bits_fflags_T_bits_flags = fpu_resp_val ? fpu_resp_fflags_bits_flags : fdiv_resp_fflags_bits_flags; // @[execution-unit.scala:473:30, :474:29, :498:30, :530:30]
assign io_fresp_bits_fflags_valid_0 = _io_fresp_bits_fflags_T_valid; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_uopc_0 = _io_fresp_bits_fflags_T_bits_uop_uopc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_inst_0 = _io_fresp_bits_fflags_T_bits_uop_inst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_inst_0 = _io_fresp_bits_fflags_T_bits_uop_debug_inst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_rvc_0 = _io_fresp_bits_fflags_T_bits_uop_is_rvc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_pc_0 = _io_fresp_bits_fflags_T_bits_uop_debug_pc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iq_type_0 = _io_fresp_bits_fflags_T_bits_uop_iq_type; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_fu_code_0 = _io_fresp_bits_fflags_T_bits_uop_fu_code; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_br_type_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_br_type; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_op1_sel_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_op1_sel; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_op2_sel_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_op2_sel; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_imm_sel_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_imm_sel; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_op_fcn_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_op_fcn; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_fcn_dw_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_fcn_dw; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_csr_cmd_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_csr_cmd; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_is_load_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_is_load; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_is_sta_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_is_sta; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ctrl_is_std_0 = _io_fresp_bits_fflags_T_bits_uop_ctrl_is_std; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iw_state_0 = _io_fresp_bits_fflags_T_bits_uop_iw_state; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iw_p1_poisoned_0 = _io_fresp_bits_fflags_T_bits_uop_iw_p1_poisoned; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_iw_p2_poisoned_0 = _io_fresp_bits_fflags_T_bits_uop_iw_p2_poisoned; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_br_0 = _io_fresp_bits_fflags_T_bits_uop_is_br; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_jalr_0 = _io_fresp_bits_fflags_T_bits_uop_is_jalr; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_jal_0 = _io_fresp_bits_fflags_T_bits_uop_is_jal; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_sfb_0 = _io_fresp_bits_fflags_T_bits_uop_is_sfb; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_br_mask_0 = _io_fresp_bits_fflags_T_bits_uop_br_mask; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_br_tag_0 = _io_fresp_bits_fflags_T_bits_uop_br_tag; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ftq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_ftq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_edge_inst_0 = _io_fresp_bits_fflags_T_bits_uop_edge_inst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_pc_lob_0 = _io_fresp_bits_fflags_T_bits_uop_pc_lob; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_taken_0 = _io_fresp_bits_fflags_T_bits_uop_taken; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_imm_packed_0 = _io_fresp_bits_fflags_T_bits_uop_imm_packed; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_csr_addr_0 = _io_fresp_bits_fflags_T_bits_uop_csr_addr; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_rob_idx_0 = _io_fresp_bits_fflags_T_bits_uop_rob_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_ldq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_stq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_stq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_rxq_idx_0 = _io_fresp_bits_fflags_T_bits_uop_rxq_idx; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_pdst_0 = _io_fresp_bits_fflags_T_bits_uop_pdst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs1_0 = _io_fresp_bits_fflags_T_bits_uop_prs1; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs2_0 = _io_fresp_bits_fflags_T_bits_uop_prs2; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs3_0 = _io_fresp_bits_fflags_T_bits_uop_prs3; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ppred_0 = _io_fresp_bits_fflags_T_bits_uop_ppred; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs1_busy_0 = _io_fresp_bits_fflags_T_bits_uop_prs1_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs2_busy_0 = _io_fresp_bits_fflags_T_bits_uop_prs2_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_prs3_busy_0 = _io_fresp_bits_fflags_T_bits_uop_prs3_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ppred_busy_0 = _io_fresp_bits_fflags_T_bits_uop_ppred_busy; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_stale_pdst_0 = _io_fresp_bits_fflags_T_bits_uop_stale_pdst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_exception_0 = _io_fresp_bits_fflags_T_bits_uop_exception; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_exc_cause_0 = _io_fresp_bits_fflags_T_bits_uop_exc_cause; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_bypassable_0 = _io_fresp_bits_fflags_T_bits_uop_bypassable; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_mem_cmd_0 = _io_fresp_bits_fflags_T_bits_uop_mem_cmd; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_mem_size_0 = _io_fresp_bits_fflags_T_bits_uop_mem_size; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_mem_signed_0 = _io_fresp_bits_fflags_T_bits_uop_mem_signed; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_fence_0 = _io_fresp_bits_fflags_T_bits_uop_is_fence; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_fencei_0 = _io_fresp_bits_fflags_T_bits_uop_is_fencei; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_amo_0 = _io_fresp_bits_fflags_T_bits_uop_is_amo; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_uses_ldq_0 = _io_fresp_bits_fflags_T_bits_uop_uses_ldq; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_uses_stq_0 = _io_fresp_bits_fflags_T_bits_uop_uses_stq; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_sys_pc2epc_0 = _io_fresp_bits_fflags_T_bits_uop_is_sys_pc2epc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_is_unique_0 = _io_fresp_bits_fflags_T_bits_uop_is_unique; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_flush_on_commit_0 = _io_fresp_bits_fflags_T_bits_uop_flush_on_commit; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldst_is_rs1_0 = _io_fresp_bits_fflags_T_bits_uop_ldst_is_rs1; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldst_0 = _io_fresp_bits_fflags_T_bits_uop_ldst; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs1_0 = _io_fresp_bits_fflags_T_bits_uop_lrs1; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs2_0 = _io_fresp_bits_fflags_T_bits_uop_lrs2; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs3_0 = _io_fresp_bits_fflags_T_bits_uop_lrs3; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_ldst_val_0 = _io_fresp_bits_fflags_T_bits_uop_ldst_val; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_dst_rtype_0 = _io_fresp_bits_fflags_T_bits_uop_dst_rtype; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs1_rtype_0 = _io_fresp_bits_fflags_T_bits_uop_lrs1_rtype; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_lrs2_rtype_0 = _io_fresp_bits_fflags_T_bits_uop_lrs2_rtype; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_frs3_en_0 = _io_fresp_bits_fflags_T_bits_uop_frs3_en; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_fp_val_0 = _io_fresp_bits_fflags_T_bits_uop_fp_val; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_fp_single_0 = _io_fresp_bits_fflags_T_bits_uop_fp_single; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_xcpt_pf_if_0 = _io_fresp_bits_fflags_T_bits_uop_xcpt_pf_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_xcpt_ae_if_0 = _io_fresp_bits_fflags_T_bits_uop_xcpt_ae_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_xcpt_ma_if_0 = _io_fresp_bits_fflags_T_bits_uop_xcpt_ma_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_bp_debug_if_0 = _io_fresp_bits_fflags_T_bits_uop_bp_debug_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_bp_xcpt_if_0 = _io_fresp_bits_fflags_T_bits_uop_bp_xcpt_if; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_fsrc_0 = _io_fresp_bits_fflags_T_bits_uop_debug_fsrc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_uop_debug_tsrc_0 = _io_fresp_bits_fflags_T_bits_uop_debug_tsrc; // @[execution-unit.scala:437:7, :530:30]
assign io_fresp_bits_fflags_bits_flags_0 = _io_fresp_bits_fflags_T_bits_flags; // @[execution-unit.scala:437:7, :530:30]
wire _queue_io_enq_valid_T_1 = |_queue_io_enq_valid_T; // @[micro-op.scala:154:{40,47}]
wire _queue_io_enq_valid_T_2 = _FPUUnit_io_resp_valid & _queue_io_enq_valid_T_1; // @[execution-unit.scala:477:17, :539:52]
wire _queue_io_enq_valid_T_3 = _FPUUnit_io_resp_bits_uop_uopc != 7'h2; // @[execution-unit.scala:477:17, :541:60]
wire _queue_io_enq_valid_T_4 = _queue_io_enq_valid_T_2 & _queue_io_enq_valid_T_3; // @[execution-unit.scala:539:52, :540:74, :541:60]
wire _fp_sdq_io_enq_valid_T = io_req_bits_uop_uopc_0 == 7'h2; // @[execution-unit.scala:437:7, :553:70]
wire _fp_sdq_io_enq_valid_T_1 = io_req_valid_0 & _fp_sdq_io_enq_valid_T; // @[execution-unit.scala:437:7, :553:{46,70}]
wire [7:0] _fp_sdq_io_enq_valid_T_2 = io_brupdate_b1_mispredict_mask_0 & io_req_bits_uop_br_mask_0; // @[util.scala:118:51]
wire _fp_sdq_io_enq_valid_T_3 = |_fp_sdq_io_enq_valid_T_2; // @[util.scala:118:{51,59}]
wire _fp_sdq_io_enq_valid_T_4 = ~_fp_sdq_io_enq_valid_T_3; // @[util.scala:118:59]
wire _fp_sdq_io_enq_valid_T_5 = _fp_sdq_io_enq_valid_T_1 & _fp_sdq_io_enq_valid_T_4; // @[execution-unit.scala:553:{46,81,84}]
wire [11:0] fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp = io_req_bits_rs2_data_0[63:52]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[11:9]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero_0 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[11:10]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial = &_fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [12:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [53:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_unrecoded_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [12:0] fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [53:0] fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp[9]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T_1 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_isNaN = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_1 = ~_fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_2 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sign_T = io_req_bits_rs2_data_0[64]; // @[rawFloatFromRecFN.scala:59:25]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_sign = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sExp_T = {1'h0, fp_sdq_io_enq_bits_data_unrecoded_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T = ~fp_sdq_io_enq_bits_data_unrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [51:0] _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_2 = io_req_bits_rs2_data_0[51:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_3 = {_fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_1, _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig = _fp_sdq_io_enq_bits_data_unrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire fp_sdq_io_enq_bits_data_unrecoded_isSubnormal = $signed(fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp) < 13'sh402; // @[rawFloatFromRecFN.scala:55:23]
wire [5:0] _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp[5:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [6:0] _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T_1 = 7'h1 - {1'h0, _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}]
wire [5:0] fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist = _fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist_T_1[5:0]; // @[fNFromRecFN.scala:52:35]
wire [52:0] _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig[53:1]; // @[rawFloatFromRecFN.scala:55:23]
wire [52:0] _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T_1 = _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T >> fp_sdq_io_enq_bits_data_unrecoded_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}]
wire [51:0] fp_sdq_io_enq_bits_data_unrecoded_denormFract = _fp_sdq_io_enq_bits_data_unrecoded_denormFract_T_1[51:0]; // @[fNFromRecFN.scala:53:{42,60}]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sExp[10:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [11:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_unrecoded_expOut_T} - 12'h401; // @[fNFromRecFN.scala:58:{27,45}]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_2 = _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_1[10:0]; // @[fNFromRecFN.scala:58:45]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_3 = fp_sdq_io_enq_bits_data_unrecoded_isSubnormal ? 11'h0 : _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45]
wire _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_4 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isNaN | fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire [10:0] _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_5 = {11{_fp_sdq_io_enq_bits_data_unrecoded_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}]
wire [10:0] fp_sdq_io_enq_bits_data_unrecoded_expOut = _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_3 | _fp_sdq_io_enq_bits_data_unrecoded_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}]
wire [51:0] _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T = fp_sdq_io_enq_bits_data_unrecoded_rawIn_sig[51:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [51:0] _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T_1 = fp_sdq_io_enq_bits_data_unrecoded_rawIn_isInf ? 52'h0 : _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T; // @[rawFloatFromRecFN.scala:55:23]
wire [51:0] fp_sdq_io_enq_bits_data_unrecoded_fractOut = fp_sdq_io_enq_bits_data_unrecoded_isSubnormal ? fp_sdq_io_enq_bits_data_unrecoded_denormFract : _fp_sdq_io_enq_bits_data_unrecoded_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20]
wire [11:0] fp_sdq_io_enq_bits_data_unrecoded_hi = {fp_sdq_io_enq_bits_data_unrecoded_rawIn_sign, fp_sdq_io_enq_bits_data_unrecoded_expOut}; // @[rawFloatFromRecFN.scala:55:23]
wire [63:0] fp_sdq_io_enq_bits_data_unrecoded = {fp_sdq_io_enq_bits_data_unrecoded_hi, fp_sdq_io_enq_bits_data_unrecoded_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12]
wire _fp_sdq_io_enq_bits_data_prevRecoded_T = io_req_bits_rs2_data_0[31]; // @[FPU.scala:442:10]
wire _fp_sdq_io_enq_bits_data_prevRecoded_T_1 = io_req_bits_rs2_data_0[52]; // @[FPU.scala:443:10]
wire [30:0] _fp_sdq_io_enq_bits_data_prevRecoded_T_2 = io_req_bits_rs2_data_0[30:0]; // @[FPU.scala:444:10]
wire [1:0] fp_sdq_io_enq_bits_data_prevRecoded_hi = {_fp_sdq_io_enq_bits_data_prevRecoded_T, _fp_sdq_io_enq_bits_data_prevRecoded_T_1}; // @[FPU.scala:441:28, :442:10, :443:10]
wire [32:0] fp_sdq_io_enq_bits_data_prevRecoded = {fp_sdq_io_enq_bits_data_prevRecoded_hi, _fp_sdq_io_enq_bits_data_prevRecoded_T_2}; // @[FPU.scala:441:28, :444:10]
wire [8:0] fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp = fp_sdq_io_enq_bits_data_prevRecoded[31:23]; // @[FPU.scala:441:28]
wire [2:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero_0 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial = &_fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T_1 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isNaN = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_1 = ~_fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_2 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isSpecial & _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sign_T = fp_sdq_io_enq_bits_data_prevRecoded[32]; // @[FPU.scala:441:28]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sign = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sExp_T = {1'h0, fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T = ~fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_2 = fp_sdq_io_enq_bits_data_prevRecoded[22:0]; // @[FPU.scala:441:28]
assign _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_3 = {_fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_1, _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig = _fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire fp_sdq_io_enq_bits_data_prevUnrecoded_isSubnormal = $signed(fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp) < 10'sh82; // @[rawFloatFromRecFN.scala:55:23]
wire [4:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [5:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T_1 = 6'h1 - {1'h0, _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}]
wire [4:0] fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist = _fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist_T_1[4:0]; // @[fNFromRecFN.scala:52:35]
wire [23:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig[24:1]; // @[rawFloatFromRecFN.scala:55:23]
wire [23:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T_1 = _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T >> fp_sdq_io_enq_bits_data_prevUnrecoded_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}]
wire [22:0] fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract = _fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract_T_1[22:0]; // @[fNFromRecFN.scala:53:{42,60}]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sExp[7:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [8:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_1 = {1'h0, _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T} - 9'h81; // @[fNFromRecFN.scala:58:{27,45}]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_2 = _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_1[7:0]; // @[fNFromRecFN.scala:58:45]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_3 = fp_sdq_io_enq_bits_data_prevUnrecoded_isSubnormal ? 8'h0 : _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45]
wire _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_4 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isNaN | fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire [7:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_5 = {8{_fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}]
wire [7:0] fp_sdq_io_enq_bits_data_prevUnrecoded_expOut = _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_3 | _fp_sdq_io_enq_bits_data_prevUnrecoded_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}]
wire [22:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sig[22:0]; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T_1 = fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_isInf ? 23'h0 : _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T; // @[rawFloatFromRecFN.scala:55:23]
wire [22:0] fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut = fp_sdq_io_enq_bits_data_prevUnrecoded_isSubnormal ? fp_sdq_io_enq_bits_data_prevUnrecoded_denormFract : _fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20]
wire [8:0] fp_sdq_io_enq_bits_data_prevUnrecoded_hi = {fp_sdq_io_enq_bits_data_prevUnrecoded_rawIn_sign, fp_sdq_io_enq_bits_data_prevUnrecoded_expOut}; // @[rawFloatFromRecFN.scala:55:23]
wire [31:0] fp_sdq_io_enq_bits_data_prevUnrecoded = {fp_sdq_io_enq_bits_data_prevUnrecoded_hi, fp_sdq_io_enq_bits_data_prevUnrecoded_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12]
wire [31:0] _fp_sdq_io_enq_bits_data_T = fp_sdq_io_enq_bits_data_unrecoded[63:32]; // @[FPU.scala:446:21]
wire [2:0] _fp_sdq_io_enq_bits_data_T_1 = io_req_bits_rs2_data_0[63:61]; // @[FPU.scala:249:25]
wire _fp_sdq_io_enq_bits_data_T_2 = &_fp_sdq_io_enq_bits_data_T_1; // @[FPU.scala:249:{25,56}]
wire [31:0] _fp_sdq_io_enq_bits_data_T_3 = fp_sdq_io_enq_bits_data_unrecoded[31:0]; // @[FPU.scala:446:81]
wire [31:0] _fp_sdq_io_enq_bits_data_T_4 = _fp_sdq_io_enq_bits_data_T_2 ? fp_sdq_io_enq_bits_data_prevUnrecoded : _fp_sdq_io_enq_bits_data_T_3; // @[FPU.scala:249:56, :446:{44,81}]
wire [63:0] _fp_sdq_io_enq_bits_data_T_5 = {_fp_sdq_io_enq_bits_data_T, _fp_sdq_io_enq_bits_data_T_4}; // @[FPU.scala:446:{10,21,44}] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_32( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_75( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File INToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import consts._
class INToRecFN(intWidth: Int, expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"INToRecFN_i${intWidth}_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val signedIn = Input(Bool())
val in = Input(Bits(intWidth.W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val intAsRawFloat = rawFloatFromIN(io.signedIn, io.in);
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
intAsRawFloat.expWidth,
intWidth,
expWidth,
sigWidth,
flRoundOpt_sigMSBitAlwaysZero | flRoundOpt_neverUnderflows
))
roundAnyRawFNToRecFN.io.invalidExc := false.B
roundAnyRawFNToRecFN.io.infiniteExc := false.B
roundAnyRawFNToRecFN.io.in := intAsRawFloat
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File rawFloatFromIN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
object rawFloatFromIN
{
def apply(signedIn: Bool, in: Bits): RawFloat =
{
val expWidth = log2Up(in.getWidth) + 1
//*** CHANGE THIS; CAN BE VERY LARGE:
val extIntWidth = 1<<(expWidth - 1)
val sign = signedIn && in(in.getWidth - 1)
val absIn = Mux(sign, -in.asUInt, in.asUInt)
val extAbsIn = (0.U(extIntWidth.W) ## absIn)(extIntWidth - 1, 0)
val adjustedNormDist = countLeadingZeros(extAbsIn)
val sig =
(extAbsIn<<adjustedNormDist)(
extIntWidth - 1, extIntWidth - in.getWidth)
val out = Wire(new RawFloat(expWidth, in.getWidth))
out.isNaN := false.B
out.isInf := false.B
out.isZero := ! sig(in.getWidth - 1)
out.sign := sign
out.sExp := (2.U(2.W) ## ~adjustedNormDist(expWidth - 2, 0)).zext
out.sig := sig
out
}
}
| module INToRecFN_i1_e8_s24_3(); // @[INToRecFN.scala:43:7]
wire [1:0] _intAsRawFloat_absIn_T = 2'h3; // @[rawFloatFromIN.scala:52:31]
wire [2:0] _intAsRawFloat_extAbsIn_T = 3'h1; // @[rawFloatFromIN.scala:53:44]
wire [2:0] _intAsRawFloat_sig_T = 3'h2; // @[rawFloatFromIN.scala:56:22]
wire [2:0] _intAsRawFloat_out_sExp_T_2 = 3'h4; // @[rawFloatFromIN.scala:64:33]
wire [3:0] intAsRawFloat_sExp = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72]
wire [3:0] _intAsRawFloat_out_sExp_T_3 = 4'h4; // @[rawFloatFromIN.scala:59:23, :64:72]
wire [1:0] intAsRawFloat_extAbsIn = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20]
wire [1:0] intAsRawFloat_sig = 2'h1; // @[rawFloatFromIN.scala:53:53, :59:23, :65:20]
wire [4:0] io_exceptionFlags = 5'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15]
wire [32:0] io_out = 33'h80000000; // @[INToRecFN.scala:43:7, :46:16, :60:15]
wire [2:0] io_roundingMode = 3'h0; // @[INToRecFN.scala:43:7, :46:16, :60:15]
wire io_in = 1'h1; // @[Mux.scala:50:70]
wire io_detectTininess = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_sign_T = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_absIn_T_1 = 1'h1; // @[Mux.scala:50:70]
wire intAsRawFloat_absIn = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_adjustedNormDist_T = 1'h1; // @[Mux.scala:50:70]
wire intAsRawFloat_adjustedNormDist = 1'h1; // @[Mux.scala:50:70]
wire intAsRawFloat_sig_0 = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_out_isZero_T = 1'h1; // @[Mux.scala:50:70]
wire _intAsRawFloat_out_sExp_T = 1'h1; // @[Mux.scala:50:70]
wire io_signedIn = 1'h0; // @[INToRecFN.scala:43:7]
wire intAsRawFloat_sign = 1'h0; // @[rawFloatFromIN.scala:51:29]
wire _intAsRawFloat_adjustedNormDist_T_1 = 1'h0; // @[primitives.scala:91:52]
wire intAsRawFloat_isNaN = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire intAsRawFloat_isInf = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire intAsRawFloat_isZero = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire intAsRawFloat_sign_0 = 1'h0; // @[rawFloatFromIN.scala:59:23]
wire _intAsRawFloat_out_isZero_T_1 = 1'h0; // @[rawFloatFromIN.scala:62:23]
wire _intAsRawFloat_out_sExp_T_1 = 1'h0; // @[rawFloatFromIN.scala:64:36]
RoundAnyRawFNToRecFN_ie2_is1_oe8_os24_3 roundAnyRawFNToRecFN (); // @[INToRecFN.scala:60:15]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File SBA.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.devices.debug.systembusaccess
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.amba.{AMBAProt, AMBAProtField}
import freechips.rocketchip.devices.debug.{DebugModuleKey, RWNotify, SBCSFields, WNotifyVal}
import freechips.rocketchip.diplomacy.TransferSizes
import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegFieldGroup, RegFieldWrType}
import freechips.rocketchip.tilelink.{TLClientNode, TLMasterParameters, TLMasterPortParameters}
import freechips.rocketchip.util.property
object SystemBusAccessState extends scala.Enumeration {
type SystemBusAccessState = Value
val Idle, SBReadRequest, SBWriteRequest, SBReadResponse, SBWriteResponse = Value
}
object SBErrorCode extends scala.Enumeration {
type SBErrorCode = Value
val NoError = Value(0)
val Timeout = Value(1)
val BadAddr = Value(2)
val AlgnError = Value(3)
val BadAccess = Value(4)
val OtherError = Value(7)
}
object SystemBusAccessModule
{
def apply(sb2tl: SBToTL, dmactive: Bool, dmAuthenticated: Bool)(implicit p: Parameters):
(Seq[RegField], Seq[Seq[RegField]], Seq[Seq[RegField]]) =
{
import SBErrorCode._
val cfg = p(DebugModuleKey).get
val anyAddressWrEn = WireInit(false.B).suggestName("anyAddressWrEn")
val anyDataRdEn = WireInit(false.B).suggestName("anyDataRdEn")
val anyDataWrEn = WireInit(false.B).suggestName("anyDataWrEn")
// --- SBCS Status Register ---
val SBCSFieldsReg = Reg(new SBCSFields()).suggestName("SBCSFieldsReg")
val SBCSFieldsRegReset = WireInit(0.U.asTypeOf(new SBCSFields()))
SBCSFieldsRegReset.sbversion := 1.U(1.W) // This code implements a version of the spec after January 1, 2018
SBCSFieldsRegReset.sbbusy := (sb2tl.module.io.sbStateOut =/= SystemBusAccessState.Idle.id.U)
SBCSFieldsRegReset.sbaccess := 2.U
SBCSFieldsRegReset.sbasize := sb2tl.module.edge.bundle.addressBits.U
SBCSFieldsRegReset.sbaccess128 := (cfg.maxSupportedSBAccess == 128).B
SBCSFieldsRegReset.sbaccess64 := (cfg.maxSupportedSBAccess >= 64).B
SBCSFieldsRegReset.sbaccess32 := (cfg.maxSupportedSBAccess >= 32).B
SBCSFieldsRegReset.sbaccess16 := (cfg.maxSupportedSBAccess >= 16).B
SBCSFieldsRegReset.sbaccess8 := (cfg.maxSupportedSBAccess >= 8).B
val SBCSRdData = WireInit(0.U.asTypeOf(new SBCSFields())).suggestName("SBCSRdData")
val SBCSWrDataVal = WireInit(0.U(32.W))
val SBCSWrData = WireInit(SBCSWrDataVal.asTypeOf(new SBCSFields()))
val sberrorWrEn = WireInit(false.B)
val sbreadondataWrEn = WireInit(false.B)
val sbautoincrementWrEn= WireInit(false.B)
val sbaccessWrEn = WireInit(false.B)
val sbreadonaddrWrEn = WireInit(false.B)
val sbbusyerrorWrEn = WireInit(false.B)
val sbcsfields = RegFieldGroup("sbcs", Some("system bus access control and status"), Seq(
RegField.r(1, SBCSRdData.sbaccess8, RegFieldDesc("sbaccess8", "8-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 8) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess16, RegFieldDesc("sbaccess16", "16-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 16) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess32, RegFieldDesc("sbaccess32", "32-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 32) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess64, RegFieldDesc("sbaccess64", "64-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess >= 64) 1 else 0))),
RegField.r(1, SBCSRdData.sbaccess128, RegFieldDesc("sbaccess128", "128-bit accesses supported", reset=Some(if (cfg.maxSupportedSBAccess == 128) 1 else 0))),
RegField.r(7, SBCSRdData.sbasize, RegFieldDesc("sbasize", "bits in address", reset=Some(sb2tl.module.edge.bundle.addressBits))),
WNotifyVal(3, SBCSRdData.sberror, SBCSWrData.sberror, sberrorWrEn,
RegFieldDesc("sberror", "system bus error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
WNotifyVal(1, SBCSRdData.sbreadondata, SBCSWrData.sbreadondata, sbreadondataWrEn,
RegFieldDesc("sbreadondata", "system bus read on data", reset=Some(0))),
WNotifyVal(1, SBCSRdData.sbautoincrement, SBCSWrData.sbautoincrement, sbautoincrementWrEn,
RegFieldDesc("sbautoincrement", "system bus auto-increment address", reset=Some(0))),
WNotifyVal(3, SBCSRdData.sbaccess, SBCSWrData.sbaccess, sbaccessWrEn,
RegFieldDesc("sbaccess", "system bus access size", reset=Some(2))),
WNotifyVal(1, SBCSRdData.sbreadonaddr, SBCSWrData.sbreadonaddr, sbreadonaddrWrEn,
RegFieldDesc("sbreadonaddr", "system bus read on data", reset=Some(0))),
RegField.r(1, SBCSRdData.sbbusy, RegFieldDesc("sbbusy", "system bus access is busy", reset=Some(0))),
WNotifyVal(1, SBCSRdData.sbbusyerror, SBCSWrData.sbbusyerror, sbbusyerrorWrEn,
RegFieldDesc("sbbusyerror", "system bus busy error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))),
RegField(6),
RegField.r(3, SBCSRdData.sbversion, RegFieldDesc("sbversion", "system bus access version", reset=Some(1))),
))
// --- System Bus Address Registers ---
// ADDR0 Register is required
// Instantiate ADDR1-3 registers as needed depending on system bus address width
val hasSBAddr1 = (sb2tl.module.edge.bundle.addressBits >= 33)
val hasSBAddr2 = (sb2tl.module.edge.bundle.addressBits >= 65)
val hasSBAddr3 = (sb2tl.module.edge.bundle.addressBits >= 97)
val hasAddr = Seq(true, hasSBAddr1, hasSBAddr2, hasSBAddr3)
val SBADDRESSFieldsReg = Reg(Vec(4, UInt(32.W)))
SBADDRESSFieldsReg.zipWithIndex.foreach { case(a,i) => a.suggestName("SBADDRESS"+i+"FieldsReg")}
val SBADDRESSWrData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
val SBADDRESSRdEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val SBADDRESSWrEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val autoIncrementedAddr = WireInit(0.U(128.W))
autoIncrementedAddr := Cat(SBADDRESSFieldsReg.reverse) + (1.U << SBCSFieldsReg.sbaccess)
autoIncrementedAddr.suggestName("autoIncrementedAddr")
val sbaddrfields: Seq[Seq[RegField]] = SBADDRESSFieldsReg.zipWithIndex.map { case(a,i) =>
if(hasAddr(i)) {
when (~dmactive || ~dmAuthenticated) {
a := 0.U(32.W)
}.otherwise {
a := Mux(SBADDRESSWrEn(i) && !SBCSRdData.sberror && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror, SBADDRESSWrData(i),
Mux((sb2tl.module.io.rdDone || sb2tl.module.io.wrDone) && SBCSFieldsReg.sbautoincrement, autoIncrementedAddr(32*i+31,32*i), a))
}
RegFieldGroup("dmi_sbaddr"+i, Some("SBA Address Register"), Seq(RWNotify(32, a, SBADDRESSWrData(i), SBADDRESSRdEn(i), SBADDRESSWrEn(i),
Some(RegFieldDesc("dmi_sbaddr"+i, "SBA address register", reset=Some(0), volatile=true)))))
} else {
a := DontCare
Seq.empty[RegField]
}
}
sb2tl.module.io.addrIn := Mux(SBADDRESSWrEn(0),
Cat(Cat(SBADDRESSFieldsReg.drop(1).reverse), SBADDRESSWrData(0)),
Cat(SBADDRESSFieldsReg.reverse))
anyAddressWrEn := SBADDRESSWrEn.reduce(_ || _)
// --- System Bus Data Registers ---
// DATA0 Register is required
// DATA1-3 Registers may not be needed depending on implementation
val hasSBData1 = (cfg.maxSupportedSBAccess > 32)
val hasSBData2And3 = (cfg.maxSupportedSBAccess == 128)
val hasData = Seq(true, hasSBData1, hasSBData2And3, hasSBData2And3)
val SBDATAFieldsReg = Reg(Vec(4, Vec(4, UInt(8.W))))
SBDATAFieldsReg.zipWithIndex.foreach { case(d,i) => d.zipWithIndex.foreach { case(d,j) => d.suggestName("SBDATA"+i+"BYTE"+j) }}
val SBDATARdData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
SBDATARdData.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATARdData"+i) }
val SBDATAWrData = WireInit(VecInit(Seq.fill(4) {0.U(32.W)} ))
SBDATAWrData.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATAWrData"+i) }
val SBDATARdEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
val SBDATAWrEn = WireInit(VecInit(Seq.fill(4) {false.B} ))
SBDATAWrEn.zipWithIndex.foreach { case(d,i) => d.suggestName("SBDATAWrEn"+i) }
val sbdatafields: Seq[Seq[RegField]] = SBDATAFieldsReg.zipWithIndex.map { case(d,i) =>
if(hasData(i)) {
// For data registers, load enable per-byte
for (j <- 0 to 3) {
when (~dmactive || ~dmAuthenticated) {
d(j) := 0.U(8.W)
}.otherwise {
d(j) := Mux(SBDATAWrEn(i) && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror, SBDATAWrData(i)(8*j+7,8*j),
Mux(sb2tl.module.io.rdLoad(4*i+j), sb2tl.module.io.dataOut, d(j)))
}
}
SBDATARdData(i) := Cat(d.reverse)
RegFieldGroup("dmi_sbdata"+i, Some("SBA Data Register"), Seq(RWNotify(32, SBDATARdData(i), SBDATAWrData(i), SBDATARdEn(i), SBDATAWrEn(i),
Some(RegFieldDesc("dmi_sbdata"+i, "SBA data register", reset=Some(0), volatile=true)))))
} else {
for (j <- 0 to 3) { d(j) := DontCare }
Seq.empty[RegField]
}
}
sb2tl.module.io.dataIn := Mux(sb2tl.module.io.wrEn,Cat(SBDATAWrData.reverse),Cat(SBDATAFieldsReg.flatten.reverse))
anyDataRdEn := SBDATARdEn.reduce(_ || _)
anyDataWrEn := SBDATAWrEn.reduce(_ || _)
val tryWrEn = SBDATAWrEn(0)
val tryRdEn = (SBADDRESSWrEn(0) && SBCSFieldsReg.sbreadonaddr) || (SBDATARdEn(0) && SBCSFieldsReg.sbreadondata)
val sbAccessError = (SBCSFieldsReg.sbaccess === 0.U) && (SBCSFieldsReg.sbaccess8 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 1.U) && (SBCSFieldsReg.sbaccess16 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 2.U) && (SBCSFieldsReg.sbaccess32 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 3.U) && (SBCSFieldsReg.sbaccess64 =/= 1.U) ||
(SBCSFieldsReg.sbaccess === 4.U) && (SBCSFieldsReg.sbaccess128 =/= 1.U) || (SBCSFieldsReg.sbaccess > 4.U)
val compareAddr = Wire(UInt(32.W)) // Need use written or latched address to detect error case depending on how transaction is initiated
compareAddr := Mux(SBADDRESSWrEn(0),SBADDRESSWrData(0),SBADDRESSFieldsReg(0))
val sbAlignmentError = (SBCSFieldsReg.sbaccess === 1.U) && (compareAddr(0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 2.U) && (compareAddr(1,0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 3.U) && (compareAddr(2,0) =/= 0.U) ||
(SBCSFieldsReg.sbaccess === 4.U) && (compareAddr(3,0) =/= 0.U)
sbAccessError.suggestName("sbAccessError")
sbAlignmentError.suggestName("sbAlignmentError")
sb2tl.module.io.wrEn := dmAuthenticated && tryWrEn && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror && !sbAccessError && !sbAlignmentError
sb2tl.module.io.rdEn := dmAuthenticated && tryRdEn && !SBCSFieldsReg.sbbusy && !SBCSFieldsReg.sbbusyerror && !SBCSRdData.sberror && !sbAccessError && !sbAlignmentError
sb2tl.module.io.sizeIn := SBCSFieldsReg.sbaccess
val sbBusy = (sb2tl.module.io.sbStateOut =/= SystemBusAccessState.Idle.id.U)
when (~dmactive || ~dmAuthenticated) {
SBCSFieldsReg := SBCSFieldsRegReset
}.otherwise {
SBCSFieldsReg.sbbusyerror := Mux(sbbusyerrorWrEn && SBCSWrData.sbbusyerror, false.B, // W1C
Mux(anyAddressWrEn && sbBusy, true.B, // Set if a write to SBADDRESS occurs while busy
Mux((anyDataRdEn || anyDataWrEn) && sbBusy, true.B, SBCSFieldsReg.sbbusyerror))) // Set if any access to SBDATA occurs while busy
SBCSFieldsReg.sbreadonaddr := Mux(sbreadonaddrWrEn, SBCSWrData.sbreadonaddr , SBCSFieldsReg.sbreadonaddr)
SBCSFieldsReg.sbautoincrement := Mux(sbautoincrementWrEn, SBCSWrData.sbautoincrement, SBCSFieldsReg.sbautoincrement)
SBCSFieldsReg.sbreadondata := Mux(sbreadondataWrEn, SBCSWrData.sbreadondata , SBCSFieldsReg.sbreadondata)
SBCSFieldsReg.sbaccess := Mux(sbaccessWrEn, SBCSWrData.sbaccess, SBCSFieldsReg.sbaccess)
SBCSFieldsReg.sbversion := 1.U(1.W) // This code implements a version of the spec after January 1, 2018
}
// sbErrorReg has a per-bit load enable since each bit can be individually cleared by writing a 1 to it
val sbErrorReg = Reg(Vec(4, UInt(1.W)))
when(~dmactive || ~dmAuthenticated) {
for (i <- 0 until 3)
sbErrorReg(i) := 0.U
}.otherwise {
for (i <- 0 until 3)
sbErrorReg(i) := Mux(sberrorWrEn && SBCSWrData.sberror(i) === 1.U, NoError.id.U.extract(i), // W1C
Mux((sb2tl.module.io.wrEn && !sb2tl.module.io.wrLegal) || (sb2tl.module.io.rdEn && !sb2tl.module.io.rdLegal), BadAddr.id.U.extract(i), // Bad address accessed
Mux((tryWrEn || tryRdEn) && sbAlignmentError, AlgnError.id.U.extract(i), // Address alignment error
Mux((tryWrEn || tryRdEn) && sbAccessError, BadAccess.id.U.extract(i), // Access size error
Mux((sb2tl.module.io.rdDone || sb2tl.module.io.wrDone) && sb2tl.module.io.respError, OtherError.id.U.extract(i), sbErrorReg(i)))))) // Response error from TL
}
SBCSRdData := SBCSFieldsReg
SBCSRdData.sbasize := sb2tl.module.edge.bundle.addressBits.U
SBCSRdData.sbaccess128 := (cfg.maxSupportedSBAccess == 128).B
SBCSRdData.sbaccess64 := (cfg.maxSupportedSBAccess >= 64).B
SBCSRdData.sbaccess32 := (cfg.maxSupportedSBAccess >= 32).B
SBCSRdData.sbaccess16 := (cfg.maxSupportedSBAccess >= 16).B
SBCSRdData.sbaccess8 := (cfg.maxSupportedSBAccess >= 8).B
SBCSRdData.sbbusy := sbBusy
SBCSRdData.sberror := sbErrorReg.asUInt
when (~dmAuthenticated) { // Read value must be 0 if not authenticated
SBCSRdData := 0.U.asTypeOf(new SBCSFields())
}
property.cover(SBCSFieldsReg.sbbusyerror, "SBCS Cover", "sberror set")
property.cover(SBCSFieldsReg.sbbusy === 3.U, "SBCS Cover", "sbbusyerror alignment error")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 0.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "8-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 1.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "16-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 2.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "32-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 3.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "64-bit access")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess === 4.U && !sbAccessError && !sbAlignmentError, "SBCS Cover", "128-bit access")
property.cover(SBCSFieldsReg.sbautoincrement && SBCSFieldsReg.sbbusy, "SBCS Cover", "Access with autoincrement set")
property.cover(!SBCSFieldsReg.sbautoincrement && SBCSFieldsReg.sbbusy, "SBCS Cover", "Access without autoincrement set")
property.cover((sb2tl.module.io.wrEn || sb2tl.module.io.rdEn) && SBCSFieldsReg.sbaccess > 4.U, "SBCS Cover", "Invalid sbaccess value")
(sbcsfields, sbaddrfields, sbdatafields)
}
}
class SBToTL(implicit p: Parameters) extends LazyModule {
val cfg = p(DebugModuleKey).get
val node = TLClientNode(Seq(TLMasterPortParameters.v1(
clients = Seq(TLMasterParameters.v1("debug")),
requestFields = Seq(AMBAProtField()))))
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
val io = IO(new Bundle {
val rdEn = Input(Bool())
val wrEn = Input(Bool())
val addrIn = Input(UInt(128.W)) // TODO: Parameterize these widths
val dataIn = Input(UInt(128.W))
val sizeIn = Input(UInt(3.W))
val rdLegal = Output(Bool())
val wrLegal = Output(Bool())
val rdDone = Output(Bool())
val wrDone = Output(Bool())
val respError = Output(Bool())
val dataOut = Output(UInt(8.W))
val rdLoad = Output(Vec(cfg.maxSupportedSBAccess/8, Bool()))
val sbStateOut = Output(UInt(log2Ceil(SystemBusAccessState.maxId).W))
})
val rf_reset = IO(Input(Reset()))
import SystemBusAccessState._
val (tl, edge) = node.out(0)
val sbState = RegInit(0.U)
// --- Drive payloads on bus to TileLink ---
val d = Queue(tl.d, 2) // Add a small buffer since response could arrive on same cycle as request
d.ready := (sbState === SBReadResponse.id.U) || (sbState === SBWriteResponse.id.U)
val muxedData = WireInit(0.U(8.W))
val requestValid = tl.a.valid
val requestReady = tl.a.ready
val responseValid = d.valid
val responseReady = d.ready
val counter = RegInit(0.U((log2Ceil(cfg.maxSupportedSBAccess/8)+1).W))
val vecData = Wire(Vec(cfg.maxSupportedSBAccess/8, UInt(8.W)))
vecData.zipWithIndex.map { case (vd, i) => vd := io.dataIn(8*i+7,8*i) }
muxedData := vecData(counter(log2Ceil(vecData.size)-1,0))
// Need an additional check to determine if address is safe for Get/Put
val rdLegal_addr = edge.manager.supportsGetSafe(io.addrIn, io.sizeIn, Some(TransferSizes(1,cfg.maxSupportedSBAccess/8)))
val wrLegal_addr = edge.manager.supportsPutFullSafe(io.addrIn, io.sizeIn, Some(TransferSizes(1,cfg.maxSupportedSBAccess/8)))
val (_, gbits) = edge.Get(0.U, io.addrIn, io.sizeIn)
val (_, pfbits) = edge.Put(0.U, io.addrIn, io.sizeIn, muxedData)
io.rdLegal := rdLegal_addr
io.wrLegal := wrLegal_addr
io.sbStateOut := sbState
when(sbState === SBReadRequest.id.U) { tl.a.bits := gbits }
.otherwise { tl.a.bits := pfbits }
tl.a.bits.user.lift(AMBAProt).foreach { x =>
x.bufferable := false.B
x.modifiable := false.B
x.readalloc := false.B
x.writealloc := false.B
x.privileged := true.B
x.secure := true.B
x.fetch := false.B
}
val respError = d.bits.denied || d.bits.corrupt
io.respError := respError
val wrTxValid = sbState === SBWriteRequest.id.U && requestValid && requestReady
val rdTxValid = sbState === SBReadResponse.id.U && responseValid && responseReady
val txLast = counter === ((1.U << io.sizeIn) - 1.U)
counter := Mux((wrTxValid || rdTxValid) && txLast, 0.U,
Mux((wrTxValid || rdTxValid) , counter+1.U, counter))
for (i <- 0 until (cfg.maxSupportedSBAccess/8)) {
io.rdLoad(i) := rdTxValid && (counter === i.U)
}
// --- State Machine to interface with TileLink ---
when (sbState === Idle.id.U){
sbState := Mux(io.rdEn && io.rdLegal, SBReadRequest.id.U,
Mux(io.wrEn && io.wrLegal, SBWriteRequest.id.U, sbState))
}.elsewhen (sbState === SBReadRequest.id.U){
sbState := Mux(requestValid && requestReady, SBReadResponse.id.U, sbState)
}.elsewhen (sbState === SBWriteRequest.id.U){
sbState := Mux(wrTxValid && txLast, SBWriteResponse.id.U, sbState)
}.elsewhen (sbState === SBReadResponse.id.U){
sbState := Mux(rdTxValid && txLast, Idle.id.U, sbState)
}.elsewhen (sbState === SBWriteResponse.id.U){
sbState := Mux(responseValid && responseReady, Idle.id.U, sbState)
}
io.rdDone := rdTxValid && txLast
io.wrDone := (sbState === SBWriteResponse.id.U) && responseValid && responseReady
io.dataOut := d.bits.data
tl.a.valid := (sbState === SBReadRequest.id.U) || (sbState === SBWriteRequest.id.U)
// Tie off unused channels
tl.b.ready := false.B
tl.c.valid := false.B
tl.e.valid := false.B
assert (sbState === Idle.id.U ||
sbState === SBReadRequest.id.U ||
sbState === SBWriteRequest.id.U ||
sbState === SBReadResponse.id.U ||
sbState === SBWriteResponse.id.U, "SBA state machine in undefined state")
property.cover (sbState === Idle.id.U, "SBA State Cover", "SBA Access Idle")
property.cover (sbState === SBReadRequest.id.U, "SBA State Cover", "SBA Access Read Req")
property.cover (sbState === SBWriteRequest.id.U, "SBA State Cover", "SBA Access Write Req")
property.cover (sbState === SBReadResponse.id.U, "SBA State Cover", "SBA Access Read Resp")
property.cover (sbState === SBWriteResponse.id.U, "SBA State Cover", "SBA Access Write Resp")
property.cover (io.rdEn && !io.rdLegal, "SB Legality Cover", "SBA Rd Address Illegal")
property.cover (io.wrEn && !io.wrLegal, "SB Legality Cover", "SBA Wr Address Illegal")
}
}
| module SBToTL( // @[SBA.scala:273:9]
input clock, // @[SBA.scala:273:9]
input reset, // @[SBA.scala:273:9]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input io_rdEn, // @[SBA.scala:274:16]
input io_wrEn, // @[SBA.scala:274:16]
input [127:0] io_addrIn, // @[SBA.scala:274:16]
input [127:0] io_dataIn, // @[SBA.scala:274:16]
input [2:0] io_sizeIn, // @[SBA.scala:274:16]
output io_rdLegal, // @[SBA.scala:274:16]
output io_wrLegal, // @[SBA.scala:274:16]
output io_rdDone, // @[SBA.scala:274:16]
output io_wrDone, // @[SBA.scala:274:16]
output io_respError, // @[SBA.scala:274:16]
output [7:0] io_dataOut, // @[SBA.scala:274:16]
output io_rdLoad_0, // @[SBA.scala:274:16]
output io_rdLoad_1, // @[SBA.scala:274:16]
output io_rdLoad_2, // @[SBA.scala:274:16]
output io_rdLoad_3, // @[SBA.scala:274:16]
output io_rdLoad_4, // @[SBA.scala:274:16]
output io_rdLoad_5, // @[SBA.scala:274:16]
output io_rdLoad_6, // @[SBA.scala:274:16]
output io_rdLoad_7, // @[SBA.scala:274:16]
output [2:0] io_sbStateOut // @[SBA.scala:274:16]
);
wire _d_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire _d_q_io_deq_bits_denied; // @[Decoupled.scala:362:21]
wire _d_q_io_deq_bits_corrupt; // @[Decoupled.scala:362:21]
reg [2:0] sbState; // @[SBA.scala:295:26]
wire _rdTxValid_T = sbState == 3'h3; // @[SBA.scala:295:26, :299:25]
wire _io_wrDone_T = sbState == 3'h4; // @[SBA.scala:295:26, :299:62]
wire d_q_io_deq_ready = _rdTxValid_T | _io_wrDone_T; // @[SBA.scala:299:{25,50,62}]
reg [3:0] counter; // @[SBA.scala:307:26]
wire [7:0][7:0] _GEN = {{io_dataIn[63:56]}, {io_dataIn[55:48]}, {io_dataIn[47:40]}, {io_dataIn[39:32]}, {io_dataIn[31:24]}, {io_dataIn[23:16]}, {io_dataIn[15:8]}, {io_dataIn[7:0]}}; // @[SBA.scala:309:63, :310:15]
wire [115:0] _GEN_0 = {io_addrIn[127:14], ~(io_addrIn[13:12])}; // @[Parameters.scala:137:{31,41,46}]
wire [114:0] _GEN_1 = {io_addrIn[127:21], io_addrIn[20:17] ^ 4'h8, io_addrIn[15:12]}; // @[Parameters.scala:137:{31,41,46}]
wire [111:0] _GEN_2 = {io_addrIn[127:26], io_addrIn[25:16] ^ 10'h200}; // @[Parameters.scala:137:{31,41,46}]
wire [115:0] _GEN_3 = {io_addrIn[127:26], io_addrIn[25:12] ^ 14'h2010}; // @[Parameters.scala:137:{31,41,46}]
wire [111:0] _GEN_4 = {io_addrIn[127:28], io_addrIn[27:16] ^ 12'h800}; // @[Parameters.scala:137:{31,41,46}]
wire [101:0] _GEN_5 = {io_addrIn[127:28], ~(io_addrIn[27:26])}; // @[Parameters.scala:137:{31,41,46}]
wire [115:0] _GEN_6 = {io_addrIn[127:29], io_addrIn[28:12] ^ 17'h10020}; // @[Parameters.scala:137:{31,41,46}]
wire [109:0] _GEN_7 = {io_addrIn[127:29], io_addrIn[28:18] ^ 11'h401}; // @[Parameters.scala:137:{31,41,46}]
wire [99:0] _GEN_8 = {io_addrIn[127:32], io_addrIn[31:28] ^ 4'h8}; // @[Parameters.scala:137:{31,41,46}]
wire io_rdLegal_0 = ~(|(io_addrIn[127:13])) | ~(|_GEN_0) | {io_addrIn[127:17], ~(io_addrIn[16])} == 112'h0 | ~(|_GEN_1) | ~(|_GEN_2) | ~(|_GEN_3) | ~(|_GEN_4) | ~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8); // @[Parameters.scala:685:42]
wire io_wrLegal_0 = ~(|(io_addrIn[127:13])) | ~(|_GEN_0) | ~(|_GEN_1) | ~(|_GEN_2) | ~(|_GEN_3) | ~(|_GEN_4) | ~(|_GEN_5) | ~(|_GEN_6) | ~(|_GEN_7) | ~(|_GEN_8); // @[Parameters.scala:685:42]
wire _nodeOut_a_valid_T = sbState == 3'h1; // @[SBA.scala:295:26, :322:18]
wire _nodeOut_a_valid_T_1 = sbState == 3'h2; // @[SBA.scala:295:26, :338:29]
wire rdTxValid = _rdTxValid_T & _d_q_io_deq_valid & d_q_io_deq_ready; // @[Decoupled.scala:362:21]
wire txLast = {4'h0, counter} == (8'h1 << io_sizeIn) - 8'h1; // @[SBA.scala:307:26, :340:{29,39,53}]
wire _GEN_9 = sbState == 3'h0; // @[SBA.scala:295:26, :349:19]
wire nodeOut_a_valid = _nodeOut_a_valid_T | _nodeOut_a_valid_T_1; // @[SBA.scala:322:18, :338:29, :366:52] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_3( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26] |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
File common.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of
the University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
object consts {
/*------------------------------------------------------------------------
| For rounding to integer values, rounding mode 'odd' rounds to minimum
| magnitude instead, same as 'minMag'.
*------------------------------------------------------------------------*/
def round_near_even = "b000".U(3.W)
def round_minMag = "b001".U(3.W)
def round_min = "b010".U(3.W)
def round_max = "b011".U(3.W)
def round_near_maxMag = "b100".U(3.W)
def round_odd = "b110".U(3.W)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def tininess_beforeRounding = 0.U
def tininess_afterRounding = 1.U
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def flRoundOpt_sigMSBitAlwaysZero = 1
def flRoundOpt_subnormsAlwaysExact = 2
def flRoundOpt_neverUnderflows = 4
def flRoundOpt_neverOverflows = 8
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
def divSqrtOpt_twoBitsPerCycle = 16
}
class RawFloat(val expWidth: Int, val sigWidth: Int) extends Bundle
{
val isNaN: Bool = Bool() // overrides all other fields
val isInf: Bool = Bool() // overrides 'isZero', 'sExp', and 'sig'
val isZero: Bool = Bool() // overrides 'sExp' and 'sig'
val sign: Bool = Bool()
val sExp: SInt = SInt((expWidth + 2).W)
val sig: UInt = UInt((sigWidth + 1).W) // 2 m.s. bits cannot both be 0
}
//*** CHANGE THIS INTO A '.isSigNaN' METHOD OF THE 'RawFloat' CLASS:
object isSigNaNRawFloat
{
def apply(in: RawFloat): Bool = in.isNaN && !in.sig(in.sigWidth - 2)
}
| module MulAddRecFNToRaw_preMul_e8_s24_20( // @[MulAddRecFN.scala:71:7]
input [32:0] io_a, // @[MulAddRecFN.scala:74:16]
input [32:0] io_c, // @[MulAddRecFN.scala:74:16]
output [23:0] io_mulAddA, // @[MulAddRecFN.scala:74:16]
output [47:0] io_mulAddC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isSigNaNAny, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isNaNAOrB, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isInfA, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isZeroA, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_signProd, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isNaNC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isInfC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_isZeroC, // @[MulAddRecFN.scala:74:16]
output [9:0] io_toPostMul_sExpSum, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_doSubMags, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_CIsDominant, // @[MulAddRecFN.scala:74:16]
output [4:0] io_toPostMul_CDom_CAlignDist, // @[MulAddRecFN.scala:74:16]
output [25:0] io_toPostMul_highAlignedSigC, // @[MulAddRecFN.scala:74:16]
output io_toPostMul_bit0AlignedSigC // @[MulAddRecFN.scala:74:16]
);
wire rawA_sign; // @[rawFloatFromRecFN.scala:55:23]
wire rawA_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:71:7]
wire [32:0] io_c_0 = io_c; // @[MulAddRecFN.scala:71:7]
wire [8:0] rawB_exp = 9'h100; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawB_isZero_T = 3'h4; // @[rawFloatFromRecFN.scala:52:28]
wire [1:0] _rawB_isSpecial_T = 2'h2; // @[rawFloatFromRecFN.scala:53:28]
wire [9:0] rawB_sExp = 10'h100; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [9:0] _rawB_out_sExp_T = 10'h100; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire [1:0] _rawB_out_sig_T_1 = 2'h1; // @[rawFloatFromRecFN.scala:61:32]
wire [22:0] _rawB_out_sig_T_2 = 23'h0; // @[rawFloatFromRecFN.scala:61:49]
wire [24:0] rawB_sig = 25'h800000; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [24:0] _rawB_out_sig_T_3 = 25'h800000; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire _rawB_out_isInf_T_1 = 1'h1; // @[rawFloatFromRecFN.scala:57:36, :61:35]
wire _rawB_out_sig_T = 1'h1; // @[rawFloatFromRecFN.scala:57:36, :61:35]
wire _io_toPostMul_isSigNaNAny_T_4 = 1'h1; // @[rawFloatFromRecFN.scala:57:36, :61:35]
wire io_toPostMul_isInfB = 1'h0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroB = 1'h0; // @[MulAddRecFN.scala:71:7]
wire rawB_isZero = 1'h0; // @[rawFloatFromRecFN.scala:52:53]
wire rawB_isSpecial = 1'h0; // @[rawFloatFromRecFN.scala:53:53]
wire rawB_isNaN = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_isInf = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_isZero_0 = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire rawB_sign = 1'h0; // @[rawFloatFromRecFN.scala:55:23]
wire _rawB_out_isNaN_T = 1'h0; // @[rawFloatFromRecFN.scala:56:41]
wire _rawB_out_isNaN_T_1 = 1'h0; // @[rawFloatFromRecFN.scala:56:33]
wire _rawB_out_isInf_T = 1'h0; // @[rawFloatFromRecFN.scala:57:41]
wire _rawB_out_isInf_T_2 = 1'h0; // @[rawFloatFromRecFN.scala:57:33]
wire _rawB_out_sign_T = 1'h0; // @[rawFloatFromRecFN.scala:59:25]
wire _signProd_T_1 = 1'h0; // @[MulAddRecFN.scala:97:49]
wire _doSubMags_T_1 = 1'h0; // @[MulAddRecFN.scala:102:49]
wire _io_toPostMul_isSigNaNAny_T_3 = 1'h0; // @[common.scala:82:56]
wire _io_toPostMul_isSigNaNAny_T_5 = 1'h0; // @[common.scala:82:46]
wire [23:0] io_mulAddB = 24'h800000; // @[MulAddRecFN.scala:71:7, :74:16, :142:16]
wire [32:0] io_b = 33'h80000000; // @[MulAddRecFN.scala:71:7, :74:16]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:71:7, :74:16]
wire [47:0] _io_mulAddC_T; // @[MulAddRecFN.scala:143:30]
wire _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:146:58]
wire _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:148:42]
wire rawA_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire signProd; // @[MulAddRecFN.scala:97:42]
wire rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire rawC_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire rawC_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire doSubMags; // @[MulAddRecFN.scala:102:42]
wire CIsDominant; // @[MulAddRecFN.scala:110:23]
wire [4:0] _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:161:47]
wire [25:0] _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:163:20]
wire _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:164:48]
wire io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7]
wire [9:0] io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7]
wire [4:0] io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7]
wire [25:0] io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7]
wire io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7]
wire [23:0] io_mulAddA_0; // @[MulAddRecFN.scala:71:7]
wire [47:0] io_mulAddC_0; // @[MulAddRecFN.scala:71:7]
wire [8:0] rawA_exp = io_a_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawA_isZero_T = rawA_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawA_isZero_0 = _rawA_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
assign rawA_isZero = rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawA_isSpecial_T = rawA_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawA_isSpecial = &_rawA_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
assign _io_toPostMul_isNaNAOrB_T = rawA_isNaN; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isInfA_0 = rawA_isInf; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isZeroA_0 = rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire _isMinCAlign_T = rawA_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire _signProd_T = rawA_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire [9:0] rawA_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawA_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawA_out_isNaN_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawA_out_isInf_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawA_out_isNaN_T_1 = rawA_isSpecial & _rawA_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawA_isNaN = _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawA_out_isInf_T_1 = ~_rawA_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawA_out_isInf_T_2 = rawA_isSpecial & _rawA_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawA_isInf = _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawA_out_sign_T = io_a_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawA_sign = _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawA_out_sExp_T = {1'h0, rawA_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawA_sExp = _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawA_out_sig_T = ~rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawA_out_sig_T_1 = {1'h0, _rawA_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawA_out_sig_T_2 = io_a_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawA_out_sig_T_3 = {_rawA_out_sig_T_1, _rawA_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawA_sig = _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [8:0] rawC_exp = io_c_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _rawC_isZero_T = rawC_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire rawC_isZero_0 = _rawC_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
assign rawC_isZero = rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _rawC_isSpecial_T = rawC_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire rawC_isSpecial = &_rawC_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
assign io_toPostMul_isNaNC_0 = rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
assign io_toPostMul_isInfC_0 = rawC_isInf; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_isZeroC_0 = rawC_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire rawC_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] rawC_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] rawC_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _rawC_out_isNaN_T = rawC_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _rawC_out_isInf_T = rawC_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _rawC_out_isNaN_T_1 = rawC_isSpecial & _rawC_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign rawC_isNaN = _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _rawC_out_isInf_T_1 = ~_rawC_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _rawC_out_isInf_T_2 = rawC_isSpecial & _rawC_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign rawC_isInf = _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _rawC_out_sign_T = io_c_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign rawC_sign = _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _rawC_out_sExp_T = {1'h0, rawC_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign rawC_sExp = _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _rawC_out_sig_T = ~rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _rawC_out_sig_T_1 = {1'h0, _rawC_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _rawC_out_sig_T_2 = io_c_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _rawC_out_sig_T_3 = {_rawC_out_sig_T_1, _rawC_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign rawC_sig = _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
assign signProd = _signProd_T; // @[MulAddRecFN.scala:97:{30,42}]
assign io_toPostMul_signProd_0 = signProd; // @[MulAddRecFN.scala:71:7, :97:42]
wire [10:0] _sExpAlignedProd_T = {rawA_sExp[9], rawA_sExp} + 11'h100; // @[rawFloatFromRecFN.scala:55:23]
wire [11:0] _sExpAlignedProd_T_1 = {_sExpAlignedProd_T[10], _sExpAlignedProd_T} - 12'hE5; // @[MulAddRecFN.scala:100:{19,32}]
wire [10:0] _sExpAlignedProd_T_2 = _sExpAlignedProd_T_1[10:0]; // @[MulAddRecFN.scala:100:32]
wire [10:0] sExpAlignedProd = _sExpAlignedProd_T_2; // @[MulAddRecFN.scala:100:32]
wire _doSubMags_T = signProd ^ rawC_sign; // @[rawFloatFromRecFN.scala:55:23]
assign doSubMags = _doSubMags_T; // @[MulAddRecFN.scala:102:{30,42}]
assign io_toPostMul_doSubMags_0 = doSubMags; // @[MulAddRecFN.scala:71:7, :102:42]
wire [11:0] _GEN = {sExpAlignedProd[10], sExpAlignedProd}; // @[MulAddRecFN.scala:100:32, :106:42]
wire [11:0] _sNatCAlignDist_T = _GEN - {{2{rawC_sExp[9]}}, rawC_sExp}; // @[rawFloatFromRecFN.scala:55:23]
wire [10:0] _sNatCAlignDist_T_1 = _sNatCAlignDist_T[10:0]; // @[MulAddRecFN.scala:106:42]
wire [10:0] sNatCAlignDist = _sNatCAlignDist_T_1; // @[MulAddRecFN.scala:106:42]
wire [9:0] posNatCAlignDist = sNatCAlignDist[9:0]; // @[MulAddRecFN.scala:106:42, :107:42]
wire _isMinCAlign_T_1 = $signed(sNatCAlignDist) < 11'sh0; // @[MulAddRecFN.scala:106:42, :108:69]
wire isMinCAlign = _isMinCAlign_T | _isMinCAlign_T_1; // @[MulAddRecFN.scala:108:{35,50,69}]
wire _CIsDominant_T = ~rawC_isZero; // @[rawFloatFromRecFN.scala:55:23]
wire _CIsDominant_T_1 = posNatCAlignDist < 10'h19; // @[MulAddRecFN.scala:107:42, :110:60]
wire _CIsDominant_T_2 = isMinCAlign | _CIsDominant_T_1; // @[MulAddRecFN.scala:108:50, :110:{39,60}]
assign CIsDominant = _CIsDominant_T & _CIsDominant_T_2; // @[MulAddRecFN.scala:110:{9,23,39}]
assign io_toPostMul_CIsDominant_0 = CIsDominant; // @[MulAddRecFN.scala:71:7, :110:23]
wire _CAlignDist_T = posNatCAlignDist < 10'h4A; // @[MulAddRecFN.scala:107:42, :114:34]
wire [6:0] _CAlignDist_T_1 = posNatCAlignDist[6:0]; // @[MulAddRecFN.scala:107:42, :115:33]
wire [6:0] _CAlignDist_T_2 = _CAlignDist_T ? _CAlignDist_T_1 : 7'h4A; // @[MulAddRecFN.scala:114:{16,34}, :115:33]
wire [6:0] CAlignDist = isMinCAlign ? 7'h0 : _CAlignDist_T_2; // @[MulAddRecFN.scala:108:50, :112:12, :114:16]
wire [24:0] _mainAlignedSigC_T = ~rawC_sig; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] _mainAlignedSigC_T_1 = doSubMags ? _mainAlignedSigC_T : rawC_sig; // @[rawFloatFromRecFN.scala:55:23]
wire [52:0] _mainAlignedSigC_T_2 = {53{doSubMags}}; // @[MulAddRecFN.scala:102:42, :120:53]
wire [77:0] _mainAlignedSigC_T_3 = {_mainAlignedSigC_T_1, _mainAlignedSigC_T_2}; // @[MulAddRecFN.scala:120:{13,46,53}]
wire [77:0] _mainAlignedSigC_T_4 = _mainAlignedSigC_T_3; // @[MulAddRecFN.scala:120:{46,94}]
wire [77:0] mainAlignedSigC = $signed($signed(_mainAlignedSigC_T_4) >>> CAlignDist); // @[MulAddRecFN.scala:112:12, :120:{94,100}]
wire [26:0] _reduced4CExtra_T = {rawC_sig, 2'h0}; // @[rawFloatFromRecFN.scala:55:23]
wire _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_3_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_4_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_5_T_1; // @[primitives.scala:120:54]
wire _reduced4CExtra_reducedVec_6_T_1; // @[primitives.scala:123:57]
wire reduced4CExtra_reducedVec_0; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_1; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_2; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_3; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_4; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_5; // @[primitives.scala:118:30]
wire reduced4CExtra_reducedVec_6; // @[primitives.scala:118:30]
wire [3:0] _reduced4CExtra_reducedVec_0_T = _reduced4CExtra_T[3:0]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_0_T_1 = |_reduced4CExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_0 = _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_1_T = _reduced4CExtra_T[7:4]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_1_T_1 = |_reduced4CExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_1 = _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_2_T = _reduced4CExtra_T[11:8]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_2_T_1 = |_reduced4CExtra_reducedVec_2_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_2 = _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_3_T = _reduced4CExtra_T[15:12]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_3_T_1 = |_reduced4CExtra_reducedVec_3_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_3 = _reduced4CExtra_reducedVec_3_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_4_T = _reduced4CExtra_T[19:16]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_4_T_1 = |_reduced4CExtra_reducedVec_4_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_4 = _reduced4CExtra_reducedVec_4_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _reduced4CExtra_reducedVec_5_T = _reduced4CExtra_T[23:20]; // @[primitives.scala:120:33]
assign _reduced4CExtra_reducedVec_5_T_1 = |_reduced4CExtra_reducedVec_5_T; // @[primitives.scala:120:{33,54}]
assign reduced4CExtra_reducedVec_5 = _reduced4CExtra_reducedVec_5_T_1; // @[primitives.scala:118:30, :120:54]
wire [2:0] _reduced4CExtra_reducedVec_6_T = _reduced4CExtra_T[26:24]; // @[primitives.scala:123:15]
assign _reduced4CExtra_reducedVec_6_T_1 = |_reduced4CExtra_reducedVec_6_T; // @[primitives.scala:123:{15,57}]
assign reduced4CExtra_reducedVec_6 = _reduced4CExtra_reducedVec_6_T_1; // @[primitives.scala:118:30, :123:57]
wire [1:0] reduced4CExtra_lo_hi = {reduced4CExtra_reducedVec_2, reduced4CExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20]
wire [2:0] reduced4CExtra_lo = {reduced4CExtra_lo_hi, reduced4CExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20]
wire [1:0] reduced4CExtra_hi_lo = {reduced4CExtra_reducedVec_4, reduced4CExtra_reducedVec_3}; // @[primitives.scala:118:30, :124:20]
wire [1:0] reduced4CExtra_hi_hi = {reduced4CExtra_reducedVec_6, reduced4CExtra_reducedVec_5}; // @[primitives.scala:118:30, :124:20]
wire [3:0] reduced4CExtra_hi = {reduced4CExtra_hi_hi, reduced4CExtra_hi_lo}; // @[primitives.scala:124:20]
wire [6:0] _reduced4CExtra_T_1 = {reduced4CExtra_hi, reduced4CExtra_lo}; // @[primitives.scala:124:20]
wire [4:0] _reduced4CExtra_T_2 = CAlignDist[6:2]; // @[MulAddRecFN.scala:112:12, :124:28]
wire [32:0] reduced4CExtra_shift = $signed(33'sh100000000 >>> _reduced4CExtra_T_2); // @[primitives.scala:76:56]
wire [5:0] _reduced4CExtra_T_3 = reduced4CExtra_shift[19:14]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _reduced4CExtra_T_4 = _reduced4CExtra_T_3[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _reduced4CExtra_T_5 = _reduced4CExtra_T_4[1:0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_6 = _reduced4CExtra_T_5[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_7 = _reduced4CExtra_T_5[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_8 = {_reduced4CExtra_T_6, _reduced4CExtra_T_7}; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_9 = _reduced4CExtra_T_4[3:2]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_10 = _reduced4CExtra_T_9[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_11 = _reduced4CExtra_T_9[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_12 = {_reduced4CExtra_T_10, _reduced4CExtra_T_11}; // @[primitives.scala:77:20]
wire [3:0] _reduced4CExtra_T_13 = {_reduced4CExtra_T_8, _reduced4CExtra_T_12}; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_14 = _reduced4CExtra_T_3[5:4]; // @[primitives.scala:77:20, :78:22]
wire _reduced4CExtra_T_15 = _reduced4CExtra_T_14[0]; // @[primitives.scala:77:20]
wire _reduced4CExtra_T_16 = _reduced4CExtra_T_14[1]; // @[primitives.scala:77:20]
wire [1:0] _reduced4CExtra_T_17 = {_reduced4CExtra_T_15, _reduced4CExtra_T_16}; // @[primitives.scala:77:20]
wire [5:0] _reduced4CExtra_T_18 = {_reduced4CExtra_T_13, _reduced4CExtra_T_17}; // @[primitives.scala:77:20]
wire [6:0] _reduced4CExtra_T_19 = {1'h0, _reduced4CExtra_T_1[5:0] & _reduced4CExtra_T_18}; // @[primitives.scala:77:20, :124:20]
wire reduced4CExtra = |_reduced4CExtra_T_19; // @[MulAddRecFN.scala:122:68, :130:11]
wire [74:0] _alignedSigC_T = mainAlignedSigC[77:3]; // @[MulAddRecFN.scala:120:100, :132:28]
wire [74:0] alignedSigC_hi = _alignedSigC_T; // @[MulAddRecFN.scala:132:{12,28}]
wire [2:0] _alignedSigC_T_1 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32]
wire [2:0] _alignedSigC_T_5 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32, :135:32]
wire _alignedSigC_T_2 = &_alignedSigC_T_1; // @[MulAddRecFN.scala:134:{32,39}]
wire _alignedSigC_T_3 = ~reduced4CExtra; // @[MulAddRecFN.scala:130:11, :134:47]
wire _alignedSigC_T_4 = _alignedSigC_T_2 & _alignedSigC_T_3; // @[MulAddRecFN.scala:134:{39,44,47}]
wire _alignedSigC_T_6 = |_alignedSigC_T_5; // @[MulAddRecFN.scala:135:{32,39}]
wire _alignedSigC_T_7 = _alignedSigC_T_6 | reduced4CExtra; // @[MulAddRecFN.scala:130:11, :135:{39,44}]
wire _alignedSigC_T_8 = doSubMags ? _alignedSigC_T_4 : _alignedSigC_T_7; // @[MulAddRecFN.scala:102:42, :133:16, :134:44, :135:44]
wire [75:0] alignedSigC = {alignedSigC_hi, _alignedSigC_T_8}; // @[MulAddRecFN.scala:132:12, :133:16]
assign io_mulAddA_0 = rawA_sig[23:0]; // @[rawFloatFromRecFN.scala:55:23]
assign _io_mulAddC_T = alignedSigC[48:1]; // @[MulAddRecFN.scala:132:12, :143:30]
assign io_mulAddC_0 = _io_mulAddC_T; // @[MulAddRecFN.scala:71:7, :143:30]
wire _io_toPostMul_isSigNaNAny_T = rawA_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_1 = ~_io_toPostMul_isSigNaNAny_T; // @[common.scala:82:{49,56}]
wire _io_toPostMul_isSigNaNAny_T_2 = rawA_isNaN & _io_toPostMul_isSigNaNAny_T_1; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_6 = _io_toPostMul_isSigNaNAny_T_2; // @[common.scala:82:46]
wire _io_toPostMul_isSigNaNAny_T_7 = rawC_sig[22]; // @[rawFloatFromRecFN.scala:55:23]
wire _io_toPostMul_isSigNaNAny_T_8 = ~_io_toPostMul_isSigNaNAny_T_7; // @[common.scala:82:{49,56}]
wire _io_toPostMul_isSigNaNAny_T_9 = rawC_isNaN & _io_toPostMul_isSigNaNAny_T_8; // @[rawFloatFromRecFN.scala:55:23]
assign _io_toPostMul_isSigNaNAny_T_10 = _io_toPostMul_isSigNaNAny_T_6 | _io_toPostMul_isSigNaNAny_T_9; // @[common.scala:82:46]
assign io_toPostMul_isSigNaNAny_0 = _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:71:7, :146:58]
assign io_toPostMul_isNaNAOrB_0 = _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:71:7, :148:42]
wire [11:0] _io_toPostMul_sExpSum_T = _GEN - 12'h18; // @[MulAddRecFN.scala:106:42, :158:53]
wire [10:0] _io_toPostMul_sExpSum_T_1 = _io_toPostMul_sExpSum_T[10:0]; // @[MulAddRecFN.scala:158:53]
wire [10:0] _io_toPostMul_sExpSum_T_2 = _io_toPostMul_sExpSum_T_1; // @[MulAddRecFN.scala:158:53]
wire [10:0] _io_toPostMul_sExpSum_T_3 = CIsDominant ? {rawC_sExp[9], rawC_sExp} : _io_toPostMul_sExpSum_T_2; // @[rawFloatFromRecFN.scala:55:23]
assign io_toPostMul_sExpSum_0 = _io_toPostMul_sExpSum_T_3[9:0]; // @[MulAddRecFN.scala:71:7, :157:28, :158:12]
assign _io_toPostMul_CDom_CAlignDist_T = CAlignDist[4:0]; // @[MulAddRecFN.scala:112:12, :161:47]
assign io_toPostMul_CDom_CAlignDist_0 = _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:71:7, :161:47]
assign _io_toPostMul_highAlignedSigC_T = alignedSigC[74:49]; // @[MulAddRecFN.scala:132:12, :163:20]
assign io_toPostMul_highAlignedSigC_0 = _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:71:7, :163:20]
assign _io_toPostMul_bit0AlignedSigC_T = alignedSigC[0]; // @[MulAddRecFN.scala:132:12, :164:48]
assign io_toPostMul_bit0AlignedSigC_0 = _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:71:7, :164:48]
assign io_mulAddA = io_mulAddA_0; // @[MulAddRecFN.scala:71:7]
assign io_mulAddC = io_mulAddC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isSigNaNAny = io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isNaNAOrB = io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isInfA = io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isZeroA = io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_signProd = io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isNaNC = io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isInfC = io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_isZeroC = io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_sExpSum = io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_doSubMags = io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_CIsDominant = io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_CDom_CAlignDist = io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_highAlignedSigC = io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7]
assign io_toPostMul_bit0AlignedSigC = io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a32d64s8k1z3u_1( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [7:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire auto_out_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_sink = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_denied = 1'h0; // @[Decoupled.scala:362:21]
wire nodeOut_d_bits_corrupt = 1'h0; // @[Decoupled.scala:362:21]
wire [1:0] auto_out_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire [1:0] nodeOut_d_bits_param = 2'h0; // @[Decoupled.scala:362:21]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [7:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
Queue2_TLBundleA_a32d64s8k1z3u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a32d64s8k1z3u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LocalAddr.scala:
package gemmini
import chisel3._
import chisel3.util._
class LocalAddr(sp_banks: Int, sp_bank_entries: Int, acc_banks: Int, acc_bank_entries: Int) extends Bundle {
private val localAddrBits = 32 // TODO magic number
private val spAddrBits = log2Ceil(sp_banks * sp_bank_entries)
private val accAddrBits = log2Ceil(acc_banks * acc_bank_entries)
private val maxAddrBits = spAddrBits max accAddrBits
private val spBankBits = log2Up(sp_banks)
private val spBankRowBits = log2Up(sp_bank_entries)
private val accBankBits = log2Up(acc_banks)
val accBankRowBits = log2Up(acc_bank_entries)
val spRows = sp_banks * sp_bank_entries
val is_acc_addr = Bool()
val accumulate = Bool()
val read_full_acc_row = Bool()
val norm_cmd = NormCmd()
private val metadata_w = is_acc_addr.getWidth + accumulate.getWidth + read_full_acc_row.getWidth + norm_cmd.getWidth
assert(maxAddrBits + metadata_w < 32)
val garbage = UInt(((localAddrBits - maxAddrBits - metadata_w - 1) max 0).W)
val garbage_bit = if (localAddrBits - maxAddrBits >= metadata_w + 1) UInt(1.W) else UInt(0.W)
val data = UInt(maxAddrBits.W)
def sp_bank(dummy: Int = 0) = if (spAddrBits == spBankRowBits) 0.U else data(spAddrBits - 1, spBankRowBits)
def sp_row(dummy: Int = 0) = data(spBankRowBits - 1, 0)
def acc_bank(dummy: Int = 0) = if (accAddrBits == accBankRowBits) 0.U else data(accAddrBits - 1, accBankRowBits)
def acc_row(dummy: Int = 0) = data(accBankRowBits - 1, 0)
def full_sp_addr(dummy: Int = 0) = data(spAddrBits - 1, 0)
def full_acc_addr(dummy: Int = 0) = data(accAddrBits - 1, 0)
def is_same_address(other: LocalAddr): Bool = is_acc_addr === other.is_acc_addr && data === other.data
def is_same_address(other: UInt): Bool = is_same_address(other.asTypeOf(this))
def is_garbage(dummy: Int = 0) = is_acc_addr && accumulate && read_full_acc_row && data.andR &&
(if (garbage_bit.getWidth > 0) garbage_bit.asBool else true.B)
def +(other: UInt) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val result = WireInit(this)
result.data := data + other
result
}
def <=(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() <= other.full_acc_addr(), full_sp_addr() <= other.full_sp_addr())
def <(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() < other.full_acc_addr(), full_sp_addr() < other.full_sp_addr())
def >(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() > other.full_acc_addr(), full_sp_addr() > other.full_sp_addr())
def add_with_overflow(other: UInt): Tuple2[LocalAddr, Bool] = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val sum = data +& other
val overflow = Mux(is_acc_addr, sum(accAddrBits), sum(spAddrBits))
val result = WireInit(this)
result.data := sum(maxAddrBits - 1, 0)
(result, overflow)
}
// This function can only be used with non-accumulator addresses. Returns both new address and underflow
def floorSub(other: UInt, floor: UInt): (LocalAddr, Bool) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val underflow = data < (floor +& other)
val result = WireInit(this)
result.data := Mux(underflow, floor, data - other)
(result, underflow)
}
def make_this_garbage(dummy: Int = 0): Unit = {
is_acc_addr := true.B
accumulate := true.B
read_full_acc_row := true.B
garbage_bit := 1.U
data := ~(0.U(maxAddrBits.W))
}
}
object LocalAddr {
def cast_to_local_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This convenience function is basically the same as calling "asTypeOf(local_addr_t)". However, this convenience
// function will also cast unnecessary garbage bits to 0, which may help reduce multiplier/adder bitwidths
val result = WireInit(t.asTypeOf(local_addr_t))
if (result.garbage_bit.getWidth > 0) result.garbage := 0.U
result
}
def cast_to_sp_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := false.B
result.accumulate := false.B
result.read_full_acc_row := false.B
// assert(!result.garbage_bit, "cast_to_sp_addr doesn't work on garbage addresses")
result
}
def cast_to_acc_addr[T <: Data](local_addr_t: LocalAddr, t: T, accumulate: Bool, read_full: Bool): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := true.B
result.accumulate := accumulate
result.read_full_acc_row := read_full
// assert(!result.garbage_bit, "cast_to_acc_addr doesn't work on garbage addresses")
result
}
def garbage_addr(local_addr_t: LocalAddr): LocalAddr = {
val result = Wire(chiselTypeOf(local_addr_t))
result := DontCare
result.make_this_garbage()
result
}
}
File Util.scala:
package gemmini
import chisel3._
import chisel3.util._
object Util {
def wrappingAdd(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
if (max == 0) {
0.U
} else {
assert(n <= max.U, "cannot wrapAdd when n is larger than max")
Mux(u >= max.U - n + 1.U && n =/= 0.U, n - (max.U - u) - 1.U, u + n)
}
}
def wrappingAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
assert(n <= max || max === 0.U, "cannot wrapAdd when n is larger than max, unless max is 0")
/*
Mux(!en, u,
Mux (max === 0.U, 0.U,
Mux(u >= max - n + 1.U && n =/= 0.U, n - (max - u) - 1.U, u + n)))
*/
MuxCase(u + n, Seq(
(!en) -> u,
(max === 0.U) -> 0.U,
(u >= max - n + 1.U && n =/= 0.U) -> (n - (max - u) - 1.U)
))
}
def satAdd(u: UInt, v: UInt, max: UInt): UInt = {
Mux(u +& v > max, max, u + v)
}
def floorAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
MuxCase(u + n, Seq(
(!en) -> u,
((u +& n) > max) -> 0.U
))
}
def sFloorAdd(s: SInt, n: UInt, max_plus_one: SInt, min: SInt, en: Bool = true.B): SInt = {
val max = max_plus_one - 1.S
MuxCase(s + n.zext, Seq(
(!en) -> s,
((s +& n.zext) > max) -> min
))
}
def wrappingSub(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
assert(n <= max.U, "cannot wrapSub when n is larger than max")
Mux(u < n, max.U - (n-u) + 1.U, u - n)
}
def ceilingDivide(numer: Int, denom: Int): Int = {
if (numer % denom == 0) { numer / denom }
else { numer / denom + 1}
}
def closestLowerPowerOf2(u: UInt): UInt = {
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
def closestAlignedLowerPowerOf2(u: UInt, addr: UInt, stride: UInt, rowBytes: Int): UInt = {
val lgRowBytes = log2Ceil(rowBytes)
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b && addr(i + lgRowBytes - 1, 0) === 0.U && stride(i + lgRowBytes - 1, 0) === 0.U, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
// This function will return "next" with a 0-cycle delay when the "enable" signal is high. It's like a queue with
// the "pipe" and "flow" parameters set to "true"
def RegEnableThru[T <: Data](next: T, enable: Bool): T = {
val buf = RegEnable(next, enable)
Mux(enable, next, buf)
}
def RegEnableThru[T <: Data](next: T, init: T, enable: Bool): T = {
val buf = RegEnable(next, init, enable)
Mux(enable, next, buf)
}
def maxOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 > u2, u1, u2)
}
def maxOf[T <: Data](x: T, y: T)(implicit ev: Arithmetic[T]): T = {
import ev._
Mux(x > y, x, y)
}
def minOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 < u2, u1, u2)
}
def accumulateTree[T <: Data](xs: Seq[T])(implicit ev: Arithmetic[T]): T = {
import ev._
assert(xs.nonEmpty, "can't accumulate 0 elements")
if (xs.length == 1) {
xs.head
} else {
val upperRowLen = 1 << log2Ceil(xs.length)
val upperRow = xs.padTo(upperRowLen, xs.head.zero)
val pairs = upperRow.grouped(2)
val lowerRow = pairs.map { case Seq(a, b) => a + b }
accumulateTree(lowerRow.toSeq)
}
}
// An undirectioned Valid bundle
class UDValid[T <: Data](t: T) extends Bundle {
val valid = Bool()
val bits = t.cloneType
def push(b: T): Unit = {
valid := true.B
bits := b
}
def pop(dummy: Int = 0): T = {
valid := false.B
bits
}
}
object UDValid {
def apply[T <: Data](t: T): UDValid[T] = new UDValid(t)
}
// creates a Reg and the next-state Wire, and returns both
def regwire(bits: Int) = {
val wire = Wire(UInt(bits.W))
val reg = RegNext(wire)
wire := reg // default wire to read from reg
(reg, wire)
}
}
File TagQueue.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait TagQueueTag {
def make_this_garbage(dummy: Int = 0): Unit
}
class TagQueue[T <: Data with TagQueueTag](t: T, entries: Int) extends Module {
val io = IO(new Bundle {
val enq = Flipped(Decoupled(t.cloneType))
val deq = Decoupled(t.cloneType)
val all = Output(Vec(entries, t.cloneType))
})
val regs = Reg(Vec(entries, t.cloneType))
val raddr = RegInit(0.U(log2Up(entries).W))
val waddr = RegInit(0.U(log2Up(entries).W))
val len = RegInit(0.U(log2Up(entries+1).W))
val empty = len === 0.U
val full = len === entries.U
io.enq.ready := !full
io.deq.valid := !empty
io.deq.bits := regs(raddr)
io.all := regs
when (io.enq.fire) {
regs(waddr) := io.enq.bits
waddr := wrappingAdd(waddr, 1.U, entries)
}
when (io.deq.fire) {
regs(raddr).make_this_garbage()
raddr := wrappingAdd(raddr, 1.U, entries)
}
when (io.enq.fire && !io.deq.fire) {
len := len + 1.U
}.elsewhen(!io.enq.fire && io.deq.fire) {
len := len - 1.U
}
when (reset.asBool) {
regs.foreach(_.make_this_garbage())
}
assert(len <= entries.U)
}
| module TagQueue( // @[TagQueue.scala:11:7]
input clock, // @[TagQueue.scala:11:7]
input reset, // @[TagQueue.scala:11:7]
output io_enq_ready, // @[TagQueue.scala:12:14]
input io_enq_valid, // @[TagQueue.scala:12:14]
input io_enq_bits_tag_rob_id_valid, // @[TagQueue.scala:12:14]
input [5:0] io_enq_bits_tag_rob_id_bits, // @[TagQueue.scala:12:14]
input io_enq_bits_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
input io_enq_bits_tag_addr_accumulate, // @[TagQueue.scala:12:14]
input io_enq_bits_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
input [2:0] io_enq_bits_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
input [10:0] io_enq_bits_tag_addr_garbage, // @[TagQueue.scala:12:14]
input io_enq_bits_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
input [13:0] io_enq_bits_tag_addr_data, // @[TagQueue.scala:12:14]
input [2:0] io_enq_bits_tag_rows, // @[TagQueue.scala:12:14]
input [2:0] io_enq_bits_tag_cols, // @[TagQueue.scala:12:14]
input [3:0] io_enq_bits_id, // @[TagQueue.scala:12:14]
input io_deq_ready, // @[TagQueue.scala:12:14]
output io_deq_valid, // @[TagQueue.scala:12:14]
output io_deq_bits_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_deq_bits_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_deq_bits_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_deq_bits_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_deq_bits_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_deq_bits_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_deq_bits_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_deq_bits_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_deq_bits_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_deq_bits_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_deq_bits_tag_cols, // @[TagQueue.scala:12:14]
output [3:0] io_deq_bits_id, // @[TagQueue.scala:12:14]
output io_all_0_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_0_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_0_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_0_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_0_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_0_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_0_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_0_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_0_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_0_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_0_tag_cols, // @[TagQueue.scala:12:14]
output io_all_1_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_1_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_1_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_1_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_1_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_1_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_1_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_1_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_1_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_1_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_1_tag_cols, // @[TagQueue.scala:12:14]
output io_all_2_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_2_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_2_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_2_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_2_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_2_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_2_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_2_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_2_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_2_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_2_tag_cols, // @[TagQueue.scala:12:14]
output io_all_3_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_3_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_3_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_3_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_3_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_3_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_3_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_3_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_3_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_3_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_3_tag_cols, // @[TagQueue.scala:12:14]
output io_all_4_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_4_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_4_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_4_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_4_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_4_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_4_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_4_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_4_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_4_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_4_tag_cols, // @[TagQueue.scala:12:14]
output io_all_5_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_5_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_5_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_5_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_5_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_5_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_5_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_5_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_5_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_5_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_5_tag_cols, // @[TagQueue.scala:12:14]
output io_all_6_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_6_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_6_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_6_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_6_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_6_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_6_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_6_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_6_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_6_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_6_tag_cols, // @[TagQueue.scala:12:14]
output io_all_7_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_7_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_7_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_7_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_7_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_7_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_7_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_7_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_7_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_7_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_7_tag_cols, // @[TagQueue.scala:12:14]
output io_all_8_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_8_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_8_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_8_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_8_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_8_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_8_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_8_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_8_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_8_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_8_tag_cols, // @[TagQueue.scala:12:14]
output io_all_9_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_9_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_9_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_9_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_9_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_9_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_9_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_9_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_9_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_9_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_9_tag_cols, // @[TagQueue.scala:12:14]
output io_all_10_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_10_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_10_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_10_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_10_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_10_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_10_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_10_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_10_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_10_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_10_tag_cols, // @[TagQueue.scala:12:14]
output io_all_11_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_11_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_11_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_11_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_11_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_11_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_11_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_11_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_11_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_11_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_11_tag_cols, // @[TagQueue.scala:12:14]
output io_all_12_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_12_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_12_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_12_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_12_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_12_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_12_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_12_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_12_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_12_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_12_tag_cols, // @[TagQueue.scala:12:14]
output io_all_13_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_13_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_13_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_13_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_13_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_13_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_13_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_13_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_13_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_13_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_13_tag_cols, // @[TagQueue.scala:12:14]
output io_all_14_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_14_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_14_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_14_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_14_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_14_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_14_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_14_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_14_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_14_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_14_tag_cols, // @[TagQueue.scala:12:14]
output io_all_15_tag_rob_id_valid, // @[TagQueue.scala:12:14]
output [5:0] io_all_15_tag_rob_id_bits, // @[TagQueue.scala:12:14]
output io_all_15_tag_addr_is_acc_addr, // @[TagQueue.scala:12:14]
output io_all_15_tag_addr_accumulate, // @[TagQueue.scala:12:14]
output io_all_15_tag_addr_read_full_acc_row, // @[TagQueue.scala:12:14]
output [2:0] io_all_15_tag_addr_norm_cmd, // @[TagQueue.scala:12:14]
output [10:0] io_all_15_tag_addr_garbage, // @[TagQueue.scala:12:14]
output io_all_15_tag_addr_garbage_bit, // @[TagQueue.scala:12:14]
output [13:0] io_all_15_tag_addr_data, // @[TagQueue.scala:12:14]
output [2:0] io_all_15_tag_rows, // @[TagQueue.scala:12:14]
output [2:0] io_all_15_tag_cols // @[TagQueue.scala:12:14]
);
wire io_enq_valid_0 = io_enq_valid; // @[TagQueue.scala:11:7]
wire io_enq_bits_tag_rob_id_valid_0 = io_enq_bits_tag_rob_id_valid; // @[TagQueue.scala:11:7]
wire [5:0] io_enq_bits_tag_rob_id_bits_0 = io_enq_bits_tag_rob_id_bits; // @[TagQueue.scala:11:7]
wire io_enq_bits_tag_addr_is_acc_addr_0 = io_enq_bits_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7]
wire io_enq_bits_tag_addr_accumulate_0 = io_enq_bits_tag_addr_accumulate; // @[TagQueue.scala:11:7]
wire io_enq_bits_tag_addr_read_full_acc_row_0 = io_enq_bits_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7]
wire [2:0] io_enq_bits_tag_addr_norm_cmd_0 = io_enq_bits_tag_addr_norm_cmd; // @[TagQueue.scala:11:7]
wire [10:0] io_enq_bits_tag_addr_garbage_0 = io_enq_bits_tag_addr_garbage; // @[TagQueue.scala:11:7]
wire io_enq_bits_tag_addr_garbage_bit_0 = io_enq_bits_tag_addr_garbage_bit; // @[TagQueue.scala:11:7]
wire [13:0] io_enq_bits_tag_addr_data_0 = io_enq_bits_tag_addr_data; // @[TagQueue.scala:11:7]
wire [2:0] io_enq_bits_tag_rows_0 = io_enq_bits_tag_rows; // @[TagQueue.scala:11:7]
wire [2:0] io_enq_bits_tag_cols_0 = io_enq_bits_tag_cols; // @[TagQueue.scala:11:7]
wire [3:0] io_enq_bits_id_0 = io_enq_bits_id; // @[TagQueue.scala:11:7]
wire io_deq_ready_0 = io_deq_ready; // @[TagQueue.scala:11:7]
wire _waddr_T_1 = reset; // @[Util.scala:12:13]
wire _raddr_T_1 = reset; // @[Util.scala:12:13]
wire [13:0] _regs_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_0_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_1_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_2_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_3_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_4_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_5_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_6_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_7_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_8_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_9_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_10_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_11_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_12_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_13_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_14_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [13:0] _regs_15_tag_addr_data_T = 14'h3FFF; // @[LocalAddr.scala:99:13]
wire [2:0] io_enq_bits_total_rows = 3'h0; // @[TagQueue.scala:11:7]
wire _waddr_T = 1'h1; // @[Util.scala:12:16]
wire _waddr_T_9 = 1'h1; // @[Util.scala:13:37]
wire _raddr_T = 1'h1; // @[Util.scala:12:16]
wire _raddr_T_9 = 1'h1; // @[Util.scala:13:37]
wire [3:0] _waddr_T_7 = 4'hF; // @[Util.scala:13:26]
wire [3:0] _raddr_T_7 = 4'hF; // @[Util.scala:13:26]
wire [4:0] _waddr_T_6 = 5'hF; // @[Util.scala:13:26]
wire [4:0] _raddr_T_6 = 5'hF; // @[Util.scala:13:26]
wire [3:0] _waddr_T_5 = 4'hE; // @[Util.scala:13:22]
wire [3:0] _raddr_T_5 = 4'hE; // @[Util.scala:13:22]
wire _waddr_T_3 = 1'h0; // @[Util.scala:12:13]
wire _raddr_T_3 = 1'h0; // @[Util.scala:12:13]
wire [4:0] _waddr_T_4 = 5'hE; // @[Util.scala:13:22]
wire [4:0] _raddr_T_4 = 5'hE; // @[Util.scala:13:22]
wire _io_enq_ready_T; // @[TagQueue.scala:26:19]
wire _io_deq_valid_T; // @[TagQueue.scala:27:19]
wire io_enq_ready_0; // @[TagQueue.scala:11:7]
wire io_deq_bits_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_deq_bits_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_deq_bits_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_deq_bits_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_deq_bits_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_deq_bits_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_deq_bits_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_deq_bits_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_deq_bits_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_deq_bits_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_deq_bits_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_deq_bits_id_0; // @[TagQueue.scala:11:7]
wire [2:0] io_deq_bits_total_rows; // @[TagQueue.scala:11:7]
wire io_deq_valid_0; // @[TagQueue.scala:11:7]
wire io_all_0_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_0_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_0_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_0_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_0_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_0_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_0_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_0_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_0_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_0_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_0_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_0_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_0_total_rows; // @[TagQueue.scala:11:7]
wire io_all_1_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_1_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_1_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_1_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_1_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_1_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_1_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_1_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_1_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_1_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_1_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_1_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_1_total_rows; // @[TagQueue.scala:11:7]
wire io_all_2_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_2_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_2_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_2_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_2_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_2_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_2_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_2_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_2_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_2_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_2_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_2_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_2_total_rows; // @[TagQueue.scala:11:7]
wire io_all_3_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_3_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_3_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_3_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_3_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_3_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_3_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_3_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_3_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_3_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_3_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_3_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_3_total_rows; // @[TagQueue.scala:11:7]
wire io_all_4_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_4_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_4_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_4_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_4_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_4_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_4_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_4_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_4_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_4_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_4_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_4_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_4_total_rows; // @[TagQueue.scala:11:7]
wire io_all_5_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_5_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_5_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_5_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_5_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_5_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_5_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_5_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_5_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_5_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_5_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_5_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_5_total_rows; // @[TagQueue.scala:11:7]
wire io_all_6_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_6_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_6_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_6_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_6_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_6_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_6_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_6_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_6_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_6_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_6_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_6_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_6_total_rows; // @[TagQueue.scala:11:7]
wire io_all_7_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_7_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_7_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_7_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_7_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_7_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_7_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_7_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_7_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_7_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_7_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_7_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_7_total_rows; // @[TagQueue.scala:11:7]
wire io_all_8_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_8_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_8_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_8_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_8_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_8_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_8_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_8_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_8_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_8_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_8_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_8_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_8_total_rows; // @[TagQueue.scala:11:7]
wire io_all_9_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_9_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_9_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_9_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_9_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_9_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_9_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_9_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_9_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_9_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_9_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_9_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_9_total_rows; // @[TagQueue.scala:11:7]
wire io_all_10_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_10_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_10_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_10_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_10_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_10_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_10_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_10_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_10_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_10_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_10_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_10_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_10_total_rows; // @[TagQueue.scala:11:7]
wire io_all_11_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_11_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_11_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_11_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_11_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_11_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_11_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_11_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_11_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_11_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_11_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_11_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_11_total_rows; // @[TagQueue.scala:11:7]
wire io_all_12_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_12_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_12_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_12_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_12_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_12_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_12_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_12_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_12_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_12_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_12_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_12_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_12_total_rows; // @[TagQueue.scala:11:7]
wire io_all_13_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_13_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_13_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_13_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_13_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_13_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_13_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_13_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_13_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_13_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_13_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_13_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_13_total_rows; // @[TagQueue.scala:11:7]
wire io_all_14_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_14_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_14_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_14_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_14_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_14_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_14_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_14_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_14_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_14_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_14_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_14_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_14_total_rows; // @[TagQueue.scala:11:7]
wire io_all_15_tag_rob_id_valid_0; // @[TagQueue.scala:11:7]
wire [5:0] io_all_15_tag_rob_id_bits_0; // @[TagQueue.scala:11:7]
wire io_all_15_tag_addr_is_acc_addr_0; // @[TagQueue.scala:11:7]
wire io_all_15_tag_addr_accumulate_0; // @[TagQueue.scala:11:7]
wire io_all_15_tag_addr_read_full_acc_row_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_15_tag_addr_norm_cmd_0; // @[TagQueue.scala:11:7]
wire [10:0] io_all_15_tag_addr_garbage_0; // @[TagQueue.scala:11:7]
wire io_all_15_tag_addr_garbage_bit_0; // @[TagQueue.scala:11:7]
wire [13:0] io_all_15_tag_addr_data_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_15_tag_rows_0; // @[TagQueue.scala:11:7]
wire [2:0] io_all_15_tag_cols_0; // @[TagQueue.scala:11:7]
wire [3:0] io_all_15_id; // @[TagQueue.scala:11:7]
wire [2:0] io_all_15_total_rows; // @[TagQueue.scala:11:7]
reg regs_0_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_0_tag_rob_id_valid_0 = regs_0_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_0_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_0_tag_rob_id_bits_0 = regs_0_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_0_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_0_tag_addr_is_acc_addr_0 = regs_0_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_0_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_0_tag_addr_accumulate_0 = regs_0_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_0_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_0_tag_addr_read_full_acc_row_0 = regs_0_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_0_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_0_tag_addr_norm_cmd_0 = regs_0_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_0_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_0_tag_addr_garbage_0 = regs_0_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_0_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_0_tag_addr_garbage_bit_0 = regs_0_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_0_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_0_tag_addr_data_0 = regs_0_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_0_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_0_tag_rows_0 = regs_0_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_0_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_0_tag_cols_0 = regs_0_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_0_id; // @[TagQueue.scala:18:17]
assign io_all_0_id = regs_0_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_0_total_rows; // @[TagQueue.scala:18:17]
assign io_all_0_total_rows = regs_0_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_1_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_1_tag_rob_id_valid_0 = regs_1_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_1_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_1_tag_rob_id_bits_0 = regs_1_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_1_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_1_tag_addr_is_acc_addr_0 = regs_1_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_1_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_1_tag_addr_accumulate_0 = regs_1_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_1_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_1_tag_addr_read_full_acc_row_0 = regs_1_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_1_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_1_tag_addr_norm_cmd_0 = regs_1_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_1_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_1_tag_addr_garbage_0 = regs_1_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_1_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_1_tag_addr_garbage_bit_0 = regs_1_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_1_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_1_tag_addr_data_0 = regs_1_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_1_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_1_tag_rows_0 = regs_1_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_1_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_1_tag_cols_0 = regs_1_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_1_id; // @[TagQueue.scala:18:17]
assign io_all_1_id = regs_1_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_1_total_rows; // @[TagQueue.scala:18:17]
assign io_all_1_total_rows = regs_1_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_2_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_2_tag_rob_id_valid_0 = regs_2_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_2_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_2_tag_rob_id_bits_0 = regs_2_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_2_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_2_tag_addr_is_acc_addr_0 = regs_2_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_2_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_2_tag_addr_accumulate_0 = regs_2_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_2_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_2_tag_addr_read_full_acc_row_0 = regs_2_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_2_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_2_tag_addr_norm_cmd_0 = regs_2_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_2_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_2_tag_addr_garbage_0 = regs_2_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_2_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_2_tag_addr_garbage_bit_0 = regs_2_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_2_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_2_tag_addr_data_0 = regs_2_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_2_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_2_tag_rows_0 = regs_2_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_2_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_2_tag_cols_0 = regs_2_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_2_id; // @[TagQueue.scala:18:17]
assign io_all_2_id = regs_2_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_2_total_rows; // @[TagQueue.scala:18:17]
assign io_all_2_total_rows = regs_2_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_3_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_3_tag_rob_id_valid_0 = regs_3_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_3_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_3_tag_rob_id_bits_0 = regs_3_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_3_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_3_tag_addr_is_acc_addr_0 = regs_3_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_3_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_3_tag_addr_accumulate_0 = regs_3_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_3_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_3_tag_addr_read_full_acc_row_0 = regs_3_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_3_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_3_tag_addr_norm_cmd_0 = regs_3_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_3_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_3_tag_addr_garbage_0 = regs_3_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_3_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_3_tag_addr_garbage_bit_0 = regs_3_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_3_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_3_tag_addr_data_0 = regs_3_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_3_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_3_tag_rows_0 = regs_3_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_3_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_3_tag_cols_0 = regs_3_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_3_id; // @[TagQueue.scala:18:17]
assign io_all_3_id = regs_3_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_3_total_rows; // @[TagQueue.scala:18:17]
assign io_all_3_total_rows = regs_3_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_4_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_4_tag_rob_id_valid_0 = regs_4_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_4_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_4_tag_rob_id_bits_0 = regs_4_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_4_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_4_tag_addr_is_acc_addr_0 = regs_4_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_4_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_4_tag_addr_accumulate_0 = regs_4_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_4_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_4_tag_addr_read_full_acc_row_0 = regs_4_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_4_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_4_tag_addr_norm_cmd_0 = regs_4_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_4_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_4_tag_addr_garbage_0 = regs_4_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_4_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_4_tag_addr_garbage_bit_0 = regs_4_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_4_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_4_tag_addr_data_0 = regs_4_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_4_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_4_tag_rows_0 = regs_4_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_4_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_4_tag_cols_0 = regs_4_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_4_id; // @[TagQueue.scala:18:17]
assign io_all_4_id = regs_4_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_4_total_rows; // @[TagQueue.scala:18:17]
assign io_all_4_total_rows = regs_4_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_5_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_5_tag_rob_id_valid_0 = regs_5_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_5_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_5_tag_rob_id_bits_0 = regs_5_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_5_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_5_tag_addr_is_acc_addr_0 = regs_5_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_5_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_5_tag_addr_accumulate_0 = regs_5_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_5_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_5_tag_addr_read_full_acc_row_0 = regs_5_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_5_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_5_tag_addr_norm_cmd_0 = regs_5_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_5_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_5_tag_addr_garbage_0 = regs_5_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_5_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_5_tag_addr_garbage_bit_0 = regs_5_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_5_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_5_tag_addr_data_0 = regs_5_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_5_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_5_tag_rows_0 = regs_5_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_5_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_5_tag_cols_0 = regs_5_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_5_id; // @[TagQueue.scala:18:17]
assign io_all_5_id = regs_5_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_5_total_rows; // @[TagQueue.scala:18:17]
assign io_all_5_total_rows = regs_5_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_6_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_6_tag_rob_id_valid_0 = regs_6_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_6_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_6_tag_rob_id_bits_0 = regs_6_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_6_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_6_tag_addr_is_acc_addr_0 = regs_6_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_6_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_6_tag_addr_accumulate_0 = regs_6_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_6_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_6_tag_addr_read_full_acc_row_0 = regs_6_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_6_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_6_tag_addr_norm_cmd_0 = regs_6_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_6_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_6_tag_addr_garbage_0 = regs_6_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_6_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_6_tag_addr_garbage_bit_0 = regs_6_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_6_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_6_tag_addr_data_0 = regs_6_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_6_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_6_tag_rows_0 = regs_6_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_6_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_6_tag_cols_0 = regs_6_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_6_id; // @[TagQueue.scala:18:17]
assign io_all_6_id = regs_6_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_6_total_rows; // @[TagQueue.scala:18:17]
assign io_all_6_total_rows = regs_6_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_7_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_7_tag_rob_id_valid_0 = regs_7_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_7_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_7_tag_rob_id_bits_0 = regs_7_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_7_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_7_tag_addr_is_acc_addr_0 = regs_7_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_7_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_7_tag_addr_accumulate_0 = regs_7_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_7_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_7_tag_addr_read_full_acc_row_0 = regs_7_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_7_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_7_tag_addr_norm_cmd_0 = regs_7_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_7_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_7_tag_addr_garbage_0 = regs_7_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_7_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_7_tag_addr_garbage_bit_0 = regs_7_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_7_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_7_tag_addr_data_0 = regs_7_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_7_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_7_tag_rows_0 = regs_7_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_7_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_7_tag_cols_0 = regs_7_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_7_id; // @[TagQueue.scala:18:17]
assign io_all_7_id = regs_7_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_7_total_rows; // @[TagQueue.scala:18:17]
assign io_all_7_total_rows = regs_7_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_8_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_8_tag_rob_id_valid_0 = regs_8_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_8_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_8_tag_rob_id_bits_0 = regs_8_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_8_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_8_tag_addr_is_acc_addr_0 = regs_8_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_8_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_8_tag_addr_accumulate_0 = regs_8_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_8_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_8_tag_addr_read_full_acc_row_0 = regs_8_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_8_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_8_tag_addr_norm_cmd_0 = regs_8_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_8_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_8_tag_addr_garbage_0 = regs_8_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_8_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_8_tag_addr_garbage_bit_0 = regs_8_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_8_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_8_tag_addr_data_0 = regs_8_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_8_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_8_tag_rows_0 = regs_8_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_8_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_8_tag_cols_0 = regs_8_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_8_id; // @[TagQueue.scala:18:17]
assign io_all_8_id = regs_8_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_8_total_rows; // @[TagQueue.scala:18:17]
assign io_all_8_total_rows = regs_8_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_9_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_9_tag_rob_id_valid_0 = regs_9_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_9_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_9_tag_rob_id_bits_0 = regs_9_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_9_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_9_tag_addr_is_acc_addr_0 = regs_9_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_9_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_9_tag_addr_accumulate_0 = regs_9_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_9_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_9_tag_addr_read_full_acc_row_0 = regs_9_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_9_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_9_tag_addr_norm_cmd_0 = regs_9_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_9_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_9_tag_addr_garbage_0 = regs_9_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_9_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_9_tag_addr_garbage_bit_0 = regs_9_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_9_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_9_tag_addr_data_0 = regs_9_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_9_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_9_tag_rows_0 = regs_9_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_9_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_9_tag_cols_0 = regs_9_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_9_id; // @[TagQueue.scala:18:17]
assign io_all_9_id = regs_9_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_9_total_rows; // @[TagQueue.scala:18:17]
assign io_all_9_total_rows = regs_9_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_10_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_10_tag_rob_id_valid_0 = regs_10_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_10_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_10_tag_rob_id_bits_0 = regs_10_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_10_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_10_tag_addr_is_acc_addr_0 = regs_10_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_10_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_10_tag_addr_accumulate_0 = regs_10_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_10_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_10_tag_addr_read_full_acc_row_0 = regs_10_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_10_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_10_tag_addr_norm_cmd_0 = regs_10_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_10_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_10_tag_addr_garbage_0 = regs_10_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_10_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_10_tag_addr_garbage_bit_0 = regs_10_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_10_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_10_tag_addr_data_0 = regs_10_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_10_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_10_tag_rows_0 = regs_10_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_10_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_10_tag_cols_0 = regs_10_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_10_id; // @[TagQueue.scala:18:17]
assign io_all_10_id = regs_10_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_10_total_rows; // @[TagQueue.scala:18:17]
assign io_all_10_total_rows = regs_10_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_11_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_11_tag_rob_id_valid_0 = regs_11_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_11_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_11_tag_rob_id_bits_0 = regs_11_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_11_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_11_tag_addr_is_acc_addr_0 = regs_11_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_11_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_11_tag_addr_accumulate_0 = regs_11_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_11_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_11_tag_addr_read_full_acc_row_0 = regs_11_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_11_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_11_tag_addr_norm_cmd_0 = regs_11_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_11_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_11_tag_addr_garbage_0 = regs_11_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_11_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_11_tag_addr_garbage_bit_0 = regs_11_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_11_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_11_tag_addr_data_0 = regs_11_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_11_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_11_tag_rows_0 = regs_11_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_11_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_11_tag_cols_0 = regs_11_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_11_id; // @[TagQueue.scala:18:17]
assign io_all_11_id = regs_11_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_11_total_rows; // @[TagQueue.scala:18:17]
assign io_all_11_total_rows = regs_11_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_12_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_12_tag_rob_id_valid_0 = regs_12_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_12_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_12_tag_rob_id_bits_0 = regs_12_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_12_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_12_tag_addr_is_acc_addr_0 = regs_12_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_12_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_12_tag_addr_accumulate_0 = regs_12_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_12_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_12_tag_addr_read_full_acc_row_0 = regs_12_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_12_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_12_tag_addr_norm_cmd_0 = regs_12_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_12_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_12_tag_addr_garbage_0 = regs_12_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_12_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_12_tag_addr_garbage_bit_0 = regs_12_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_12_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_12_tag_addr_data_0 = regs_12_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_12_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_12_tag_rows_0 = regs_12_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_12_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_12_tag_cols_0 = regs_12_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_12_id; // @[TagQueue.scala:18:17]
assign io_all_12_id = regs_12_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_12_total_rows; // @[TagQueue.scala:18:17]
assign io_all_12_total_rows = regs_12_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_13_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_13_tag_rob_id_valid_0 = regs_13_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_13_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_13_tag_rob_id_bits_0 = regs_13_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_13_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_13_tag_addr_is_acc_addr_0 = regs_13_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_13_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_13_tag_addr_accumulate_0 = regs_13_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_13_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_13_tag_addr_read_full_acc_row_0 = regs_13_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_13_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_13_tag_addr_norm_cmd_0 = regs_13_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_13_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_13_tag_addr_garbage_0 = regs_13_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_13_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_13_tag_addr_garbage_bit_0 = regs_13_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_13_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_13_tag_addr_data_0 = regs_13_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_13_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_13_tag_rows_0 = regs_13_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_13_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_13_tag_cols_0 = regs_13_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_13_id; // @[TagQueue.scala:18:17]
assign io_all_13_id = regs_13_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_13_total_rows; // @[TagQueue.scala:18:17]
assign io_all_13_total_rows = regs_13_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_14_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_14_tag_rob_id_valid_0 = regs_14_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_14_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_14_tag_rob_id_bits_0 = regs_14_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_14_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_14_tag_addr_is_acc_addr_0 = regs_14_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_14_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_14_tag_addr_accumulate_0 = regs_14_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_14_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_14_tag_addr_read_full_acc_row_0 = regs_14_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_14_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_14_tag_addr_norm_cmd_0 = regs_14_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_14_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_14_tag_addr_garbage_0 = regs_14_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_14_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_14_tag_addr_garbage_bit_0 = regs_14_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_14_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_14_tag_addr_data_0 = regs_14_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_14_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_14_tag_rows_0 = regs_14_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_14_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_14_tag_cols_0 = regs_14_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_14_id; // @[TagQueue.scala:18:17]
assign io_all_14_id = regs_14_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_14_total_rows; // @[TagQueue.scala:18:17]
assign io_all_14_total_rows = regs_14_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg regs_15_tag_rob_id_valid; // @[TagQueue.scala:18:17]
assign io_all_15_tag_rob_id_valid_0 = regs_15_tag_rob_id_valid; // @[TagQueue.scala:11:7, :18:17]
reg [5:0] regs_15_tag_rob_id_bits; // @[TagQueue.scala:18:17]
assign io_all_15_tag_rob_id_bits_0 = regs_15_tag_rob_id_bits; // @[TagQueue.scala:11:7, :18:17]
reg regs_15_tag_addr_is_acc_addr; // @[TagQueue.scala:18:17]
assign io_all_15_tag_addr_is_acc_addr_0 = regs_15_tag_addr_is_acc_addr; // @[TagQueue.scala:11:7, :18:17]
reg regs_15_tag_addr_accumulate; // @[TagQueue.scala:18:17]
assign io_all_15_tag_addr_accumulate_0 = regs_15_tag_addr_accumulate; // @[TagQueue.scala:11:7, :18:17]
reg regs_15_tag_addr_read_full_acc_row; // @[TagQueue.scala:18:17]
assign io_all_15_tag_addr_read_full_acc_row_0 = regs_15_tag_addr_read_full_acc_row; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_15_tag_addr_norm_cmd; // @[TagQueue.scala:18:17]
assign io_all_15_tag_addr_norm_cmd_0 = regs_15_tag_addr_norm_cmd; // @[TagQueue.scala:11:7, :18:17]
reg [10:0] regs_15_tag_addr_garbage; // @[TagQueue.scala:18:17]
assign io_all_15_tag_addr_garbage_0 = regs_15_tag_addr_garbage; // @[TagQueue.scala:11:7, :18:17]
reg regs_15_tag_addr_garbage_bit; // @[TagQueue.scala:18:17]
assign io_all_15_tag_addr_garbage_bit_0 = regs_15_tag_addr_garbage_bit; // @[TagQueue.scala:11:7, :18:17]
reg [13:0] regs_15_tag_addr_data; // @[TagQueue.scala:18:17]
assign io_all_15_tag_addr_data_0 = regs_15_tag_addr_data; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_15_tag_rows; // @[TagQueue.scala:18:17]
assign io_all_15_tag_rows_0 = regs_15_tag_rows; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_15_tag_cols; // @[TagQueue.scala:18:17]
assign io_all_15_tag_cols_0 = regs_15_tag_cols; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] regs_15_id; // @[TagQueue.scala:18:17]
assign io_all_15_id = regs_15_id; // @[TagQueue.scala:11:7, :18:17]
reg [2:0] regs_15_total_rows; // @[TagQueue.scala:18:17]
assign io_all_15_total_rows = regs_15_total_rows; // @[TagQueue.scala:11:7, :18:17]
reg [3:0] raddr; // @[TagQueue.scala:19:22]
reg [3:0] waddr; // @[TagQueue.scala:20:22]
reg [4:0] len; // @[TagQueue.scala:21:20]
wire empty = len == 5'h0; // @[TagQueue.scala:21:20, :23:19]
wire full = len == 5'h10; // @[TagQueue.scala:21:20, :24:18]
assign _io_enq_ready_T = ~full; // @[TagQueue.scala:24:18, :26:19]
assign io_enq_ready_0 = _io_enq_ready_T; // @[TagQueue.scala:11:7, :26:19]
assign _io_deq_valid_T = ~empty; // @[TagQueue.scala:23:19, :27:19]
assign io_deq_valid_0 = _io_deq_valid_T; // @[TagQueue.scala:11:7, :27:19]
wire [15:0] _GEN = {{regs_15_tag_rob_id_valid}, {regs_14_tag_rob_id_valid}, {regs_13_tag_rob_id_valid}, {regs_12_tag_rob_id_valid}, {regs_11_tag_rob_id_valid}, {regs_10_tag_rob_id_valid}, {regs_9_tag_rob_id_valid}, {regs_8_tag_rob_id_valid}, {regs_7_tag_rob_id_valid}, {regs_6_tag_rob_id_valid}, {regs_5_tag_rob_id_valid}, {regs_4_tag_rob_id_valid}, {regs_3_tag_rob_id_valid}, {regs_2_tag_rob_id_valid}, {regs_1_tag_rob_id_valid}, {regs_0_tag_rob_id_valid}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_rob_id_valid_0 = _GEN[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][5:0] _GEN_0 = {{regs_15_tag_rob_id_bits}, {regs_14_tag_rob_id_bits}, {regs_13_tag_rob_id_bits}, {regs_12_tag_rob_id_bits}, {regs_11_tag_rob_id_bits}, {regs_10_tag_rob_id_bits}, {regs_9_tag_rob_id_bits}, {regs_8_tag_rob_id_bits}, {regs_7_tag_rob_id_bits}, {regs_6_tag_rob_id_bits}, {regs_5_tag_rob_id_bits}, {regs_4_tag_rob_id_bits}, {regs_3_tag_rob_id_bits}, {regs_2_tag_rob_id_bits}, {regs_1_tag_rob_id_bits}, {regs_0_tag_rob_id_bits}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_rob_id_bits_0 = _GEN_0[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0] _GEN_1 = {{regs_15_tag_addr_is_acc_addr}, {regs_14_tag_addr_is_acc_addr}, {regs_13_tag_addr_is_acc_addr}, {regs_12_tag_addr_is_acc_addr}, {regs_11_tag_addr_is_acc_addr}, {regs_10_tag_addr_is_acc_addr}, {regs_9_tag_addr_is_acc_addr}, {regs_8_tag_addr_is_acc_addr}, {regs_7_tag_addr_is_acc_addr}, {regs_6_tag_addr_is_acc_addr}, {regs_5_tag_addr_is_acc_addr}, {regs_4_tag_addr_is_acc_addr}, {regs_3_tag_addr_is_acc_addr}, {regs_2_tag_addr_is_acc_addr}, {regs_1_tag_addr_is_acc_addr}, {regs_0_tag_addr_is_acc_addr}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_addr_is_acc_addr_0 = _GEN_1[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0] _GEN_2 = {{regs_15_tag_addr_accumulate}, {regs_14_tag_addr_accumulate}, {regs_13_tag_addr_accumulate}, {regs_12_tag_addr_accumulate}, {regs_11_tag_addr_accumulate}, {regs_10_tag_addr_accumulate}, {regs_9_tag_addr_accumulate}, {regs_8_tag_addr_accumulate}, {regs_7_tag_addr_accumulate}, {regs_6_tag_addr_accumulate}, {regs_5_tag_addr_accumulate}, {regs_4_tag_addr_accumulate}, {regs_3_tag_addr_accumulate}, {regs_2_tag_addr_accumulate}, {regs_1_tag_addr_accumulate}, {regs_0_tag_addr_accumulate}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_addr_accumulate_0 = _GEN_2[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0] _GEN_3 = {{regs_15_tag_addr_read_full_acc_row}, {regs_14_tag_addr_read_full_acc_row}, {regs_13_tag_addr_read_full_acc_row}, {regs_12_tag_addr_read_full_acc_row}, {regs_11_tag_addr_read_full_acc_row}, {regs_10_tag_addr_read_full_acc_row}, {regs_9_tag_addr_read_full_acc_row}, {regs_8_tag_addr_read_full_acc_row}, {regs_7_tag_addr_read_full_acc_row}, {regs_6_tag_addr_read_full_acc_row}, {regs_5_tag_addr_read_full_acc_row}, {regs_4_tag_addr_read_full_acc_row}, {regs_3_tag_addr_read_full_acc_row}, {regs_2_tag_addr_read_full_acc_row}, {regs_1_tag_addr_read_full_acc_row}, {regs_0_tag_addr_read_full_acc_row}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_addr_read_full_acc_row_0 = _GEN_3[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][2:0] _GEN_4 = {{regs_15_tag_addr_norm_cmd}, {regs_14_tag_addr_norm_cmd}, {regs_13_tag_addr_norm_cmd}, {regs_12_tag_addr_norm_cmd}, {regs_11_tag_addr_norm_cmd}, {regs_10_tag_addr_norm_cmd}, {regs_9_tag_addr_norm_cmd}, {regs_8_tag_addr_norm_cmd}, {regs_7_tag_addr_norm_cmd}, {regs_6_tag_addr_norm_cmd}, {regs_5_tag_addr_norm_cmd}, {regs_4_tag_addr_norm_cmd}, {regs_3_tag_addr_norm_cmd}, {regs_2_tag_addr_norm_cmd}, {regs_1_tag_addr_norm_cmd}, {regs_0_tag_addr_norm_cmd}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_addr_norm_cmd_0 = _GEN_4[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][10:0] _GEN_5 = {{regs_15_tag_addr_garbage}, {regs_14_tag_addr_garbage}, {regs_13_tag_addr_garbage}, {regs_12_tag_addr_garbage}, {regs_11_tag_addr_garbage}, {regs_10_tag_addr_garbage}, {regs_9_tag_addr_garbage}, {regs_8_tag_addr_garbage}, {regs_7_tag_addr_garbage}, {regs_6_tag_addr_garbage}, {regs_5_tag_addr_garbage}, {regs_4_tag_addr_garbage}, {regs_3_tag_addr_garbage}, {regs_2_tag_addr_garbage}, {regs_1_tag_addr_garbage}, {regs_0_tag_addr_garbage}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_addr_garbage_0 = _GEN_5[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0] _GEN_6 = {{regs_15_tag_addr_garbage_bit}, {regs_14_tag_addr_garbage_bit}, {regs_13_tag_addr_garbage_bit}, {regs_12_tag_addr_garbage_bit}, {regs_11_tag_addr_garbage_bit}, {regs_10_tag_addr_garbage_bit}, {regs_9_tag_addr_garbage_bit}, {regs_8_tag_addr_garbage_bit}, {regs_7_tag_addr_garbage_bit}, {regs_6_tag_addr_garbage_bit}, {regs_5_tag_addr_garbage_bit}, {regs_4_tag_addr_garbage_bit}, {regs_3_tag_addr_garbage_bit}, {regs_2_tag_addr_garbage_bit}, {regs_1_tag_addr_garbage_bit}, {regs_0_tag_addr_garbage_bit}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_addr_garbage_bit_0 = _GEN_6[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][13:0] _GEN_7 = {{regs_15_tag_addr_data}, {regs_14_tag_addr_data}, {regs_13_tag_addr_data}, {regs_12_tag_addr_data}, {regs_11_tag_addr_data}, {regs_10_tag_addr_data}, {regs_9_tag_addr_data}, {regs_8_tag_addr_data}, {regs_7_tag_addr_data}, {regs_6_tag_addr_data}, {regs_5_tag_addr_data}, {regs_4_tag_addr_data}, {regs_3_tag_addr_data}, {regs_2_tag_addr_data}, {regs_1_tag_addr_data}, {regs_0_tag_addr_data}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_addr_data_0 = _GEN_7[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][2:0] _GEN_8 = {{regs_15_tag_rows}, {regs_14_tag_rows}, {regs_13_tag_rows}, {regs_12_tag_rows}, {regs_11_tag_rows}, {regs_10_tag_rows}, {regs_9_tag_rows}, {regs_8_tag_rows}, {regs_7_tag_rows}, {regs_6_tag_rows}, {regs_5_tag_rows}, {regs_4_tag_rows}, {regs_3_tag_rows}, {regs_2_tag_rows}, {regs_1_tag_rows}, {regs_0_tag_rows}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_rows_0 = _GEN_8[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][2:0] _GEN_9 = {{regs_15_tag_cols}, {regs_14_tag_cols}, {regs_13_tag_cols}, {regs_12_tag_cols}, {regs_11_tag_cols}, {regs_10_tag_cols}, {regs_9_tag_cols}, {regs_8_tag_cols}, {regs_7_tag_cols}, {regs_6_tag_cols}, {regs_5_tag_cols}, {regs_4_tag_cols}, {regs_3_tag_cols}, {regs_2_tag_cols}, {regs_1_tag_cols}, {regs_0_tag_cols}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_tag_cols_0 = _GEN_9[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][3:0] _GEN_10 = {{regs_15_id}, {regs_14_id}, {regs_13_id}, {regs_12_id}, {regs_11_id}, {regs_10_id}, {regs_9_id}, {regs_8_id}, {regs_7_id}, {regs_6_id}, {regs_5_id}, {regs_4_id}, {regs_3_id}, {regs_2_id}, {regs_1_id}, {regs_0_id}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_id_0 = _GEN_10[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire [15:0][2:0] _GEN_11 = {{regs_15_total_rows}, {regs_14_total_rows}, {regs_13_total_rows}, {regs_12_total_rows}, {regs_11_total_rows}, {regs_10_total_rows}, {regs_9_total_rows}, {regs_8_total_rows}, {regs_7_total_rows}, {regs_6_total_rows}, {regs_5_total_rows}, {regs_4_total_rows}, {regs_3_total_rows}, {regs_2_total_rows}, {regs_1_total_rows}, {regs_0_total_rows}}; // @[TagQueue.scala:18:17, :28:15]
assign io_deq_bits_total_rows = _GEN_11[raddr]; // @[TagQueue.scala:11:7, :19:22, :28:15]
wire _waddr_T_2 = ~_waddr_T_1; // @[Util.scala:12:13]
wire _waddr_T_8 = &waddr; // @[Util.scala:13:13]
wire _waddr_T_10 = _waddr_T_8; // @[Util.scala:13:{13,32}]
wire [4:0] _GEN_12 = {1'h0, waddr}; // @[Util.scala:13:57]
wire [4:0] _waddr_T_11 = 5'hF - _GEN_12; // @[Util.scala:13:57]
wire [3:0] _waddr_T_12 = _waddr_T_11[3:0]; // @[Util.scala:13:57]
wire [4:0] _waddr_T_13 = 5'h1 - {1'h0, _waddr_T_12}; // @[Util.scala:13:{48,57}]
wire [3:0] _waddr_T_14 = _waddr_T_13[3:0]; // @[Util.scala:13:48]
wire [4:0] _waddr_T_15 = {1'h0, _waddr_T_14} - 5'h1; // @[Util.scala:13:{48,62}]
wire [3:0] _waddr_T_16 = _waddr_T_15[3:0]; // @[Util.scala:13:62]
wire [4:0] _waddr_T_17 = _GEN_12 + 5'h1; // @[Util.scala:13:{57,71}]
wire [3:0] _waddr_T_18 = _waddr_T_17[3:0]; // @[Util.scala:13:71]
wire [3:0] _waddr_T_19 = _waddr_T_10 ? _waddr_T_16 : _waddr_T_18; // @[Util.scala:13:{10,32,62,71}]
wire _raddr_T_2 = ~_raddr_T_1; // @[Util.scala:12:13]
wire _raddr_T_8 = &raddr; // @[Util.scala:13:13]
wire _raddr_T_10 = _raddr_T_8; // @[Util.scala:13:{13,32}]
wire [4:0] _GEN_13 = {1'h0, raddr}; // @[Util.scala:13:57]
wire [4:0] _raddr_T_11 = 5'hF - _GEN_13; // @[Util.scala:13:57]
wire [3:0] _raddr_T_12 = _raddr_T_11[3:0]; // @[Util.scala:13:57]
wire [4:0] _raddr_T_13 = 5'h1 - {1'h0, _raddr_T_12}; // @[Util.scala:13:{48,57}]
wire [3:0] _raddr_T_14 = _raddr_T_13[3:0]; // @[Util.scala:13:48]
wire [4:0] _raddr_T_15 = {1'h0, _raddr_T_14} - 5'h1; // @[Util.scala:13:{48,62}]
wire [3:0] _raddr_T_16 = _raddr_T_15[3:0]; // @[Util.scala:13:62]
wire [4:0] _raddr_T_17 = _GEN_13 + 5'h1; // @[Util.scala:13:{57,71}]
wire [3:0] _raddr_T_18 = _raddr_T_17[3:0]; // @[Util.scala:13:71]
wire [3:0] _raddr_T_19 = _raddr_T_10 ? _raddr_T_16 : _raddr_T_18; // @[Util.scala:13:{10,32,62,71}]
wire [5:0] _GEN_14 = {1'h0, len}; // @[TagQueue.scala:21:20, :42:16]
wire [5:0] _len_T = _GEN_14 + 6'h1; // @[TagQueue.scala:42:16]
wire [4:0] _len_T_1 = _len_T[4:0]; // @[TagQueue.scala:42:16]
wire [5:0] _len_T_2 = _GEN_14 - 6'h1; // @[TagQueue.scala:42:16, :44:16]
wire [4:0] _len_T_3 = _len_T_2[4:0]; // @[TagQueue.scala:44:16] |
Generate the Verilog code corresponding to the following Chisel files.
File Decode.scala:
// See LICENSE.Berkeley for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.BitPat
import chisel3.util.experimental.decode._
object DecodeLogic
{
// TODO This should be a method on BitPat
private def hasDontCare(bp: BitPat): Boolean = bp.mask.bitCount != bp.width
// Pads BitPats that are safe to pad (no don't cares), errors otherwise
private def padBP(bp: BitPat, width: Int): BitPat = {
if (bp.width == width) bp
else {
require(!hasDontCare(bp), s"Cannot pad '$bp' to '$width' bits because it has don't cares")
val diff = width - bp.width
require(diff > 0, s"Cannot pad '$bp' to '$width' because it is already '${bp.width}' bits wide!")
BitPat(0.U(diff.W)) ## bp
}
}
def apply(addr: UInt, default: BitPat, mapping: Iterable[(BitPat, BitPat)]): UInt =
chisel3.util.experimental.decode.decoder(QMCMinimizer, addr, TruthTable(mapping, default))
def apply(addr: UInt, default: Seq[BitPat], mappingIn: Iterable[(BitPat, Seq[BitPat])]): Seq[UInt] = {
val nElts = default.size
require(mappingIn.forall(_._2.size == nElts),
s"All Seq[BitPat] must be of the same length, got $nElts vs. ${mappingIn.find(_._2.size != nElts).get}"
)
val elementsGrouped = mappingIn.map(_._2).transpose
val elementWidths = elementsGrouped.zip(default).map { case (elts, default) =>
(default :: elts.toList).map(_.getWidth).max
}
val resultWidth = elementWidths.sum
val elementIndices = elementWidths.scan(resultWidth - 1) { case (l, r) => l - r }
// All BitPats that correspond to a given element in the result must have the same width in the
// chisel3 decoder. We will zero pad any BitPats that are too small so long as they dont have
// any don't cares. If there are don't cares, it is an error and the user needs to pad the
// BitPat themselves
val defaultsPadded = default.zip(elementWidths).map { case (bp, w) => padBP(bp, w) }
val mappingInPadded = mappingIn.map { case (in, elts) =>
in -> elts.zip(elementWidths).map { case (bp, w) => padBP(bp, w) }
}
val decoded = apply(addr, defaultsPadded.reduce(_ ## _), mappingInPadded.map { case (in, out) => (in, out.reduce(_ ## _)) })
elementIndices.zip(elementIndices.tail).map { case (msb, lsb) => decoded(msb, lsb + 1) }.toList
}
def apply(addr: UInt, default: Seq[BitPat], mappingIn: List[(UInt, Seq[BitPat])]): Seq[UInt] =
apply(addr, default, mappingIn.map(m => (BitPat(m._1), m._2)).asInstanceOf[Iterable[(BitPat, Seq[BitPat])]])
def apply(addr: UInt, trues: Iterable[UInt], falses: Iterable[UInt]): Bool =
apply(addr, BitPat.dontCare(1), trues.map(BitPat(_) -> BitPat("b1")) ++ falses.map(BitPat(_) -> BitPat("b0"))).asBool
}
File fpu.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import chisel3.experimental.dataview._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.FPConstants._
import freechips.rocketchip.tile.{FPUCtrlSigs, HasFPUParameters}
import freechips.rocketchip.tile
import freechips.rocketchip.rocket
import freechips.rocketchip.util.uintToBitPat
import boom.v3.common._
import boom.v3.util.{ImmGenRm, ImmGenTyp}
/**
* FP Decoder for the FPU
*
* TODO get rid of this decoder and move into the Decode stage? Or the RRd stage?
* most of these signals are already created, just need to be translated
* to the Rocket FPU-speak
*/
class UOPCodeFPUDecoder(implicit p: Parameters) extends BoomModule with HasFPUParameters
{
val io = IO(new Bundle {
val uopc = Input(Bits(UOPC_SZ.W))
val sigs = Output(new FPUCtrlSigs())
})
// TODO change N,Y,X to BitPat("b1"), BitPat("b0"), and BitPat("b?")
val N = false.B
val Y = true.B
val X = false.B
val default: List[BitPat] = List(X,X,X,X,X, X,X,X,X,X,X,X, X,X,X,X)
val f_table: Array[(BitPat, List[BitPat])] =
// Note: not all of these signals are used or necessary, but we're
// constrained by the need to fit the rocket.FPU units' ctrl signals.
// swap12 fma
// | swap32 | div
// | | typeTagIn | | sqrt
// ldst | | | typeTagOut | | wflags
// | wen | | | | from_int | | |
// | | ren1 | | | | | to_int | | |
// | | | ren2 | | | | | | fastpipe |
// | | | | ren3 | | | | | | | | | |
// | | | | | | | | | | | | | | | |
Array(
BitPat(uopFCLASS_S) -> List(X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,N),
BitPat(uopFMV_W_X) -> List(X,X,N,N,N, X,X,S,D,Y,N,N, N,N,N,N),
BitPat(uopFMV_X_W) -> List(X,X,Y,N,N, N,X,D,S,N,Y,N, N,N,N,N),
BitPat(uopFCVT_S_X) -> List(X,X,N,N,N, X,X,S,S,Y,N,N, N,N,N,Y),
BitPat(uopFCVT_X_S) -> List(X,X,Y,N,N, N,X,S,S,N,Y,N, N,N,N,Y),
BitPat(uopCMPR_S) -> List(X,X,Y,Y,N, N,N,S,S,N,Y,N, N,N,N,Y),
BitPat(uopFSGNJ_S) -> List(X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,N),
BitPat(uopFMINMAX_S)-> List(X,X,Y,Y,N, N,N,S,S,N,N,Y, N,N,N,Y),
BitPat(uopFADD_S) -> List(X,X,Y,Y,N, N,Y,S,S,N,N,N, Y,N,N,Y),
BitPat(uopFSUB_S) -> List(X,X,Y,Y,N, N,Y,S,S,N,N,N, Y,N,N,Y),
BitPat(uopFMUL_S) -> List(X,X,Y,Y,N, N,N,S,S,N,N,N, Y,N,N,Y),
BitPat(uopFMADD_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y),
BitPat(uopFMSUB_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y),
BitPat(uopFNMADD_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y),
BitPat(uopFNMSUB_S) -> List(X,X,Y,Y,Y, N,N,S,S,N,N,N, Y,N,N,Y)
)
val d_table: Array[(BitPat, List[BitPat])] =
Array(
BitPat(uopFCLASS_D) -> List(X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,N),
BitPat(uopFMV_D_X) -> List(X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,N),
BitPat(uopFMV_X_D) -> List(X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,N),
BitPat(uopFCVT_S_D) -> List(X,X,Y,N,N, N,X,D,S,N,N,Y, N,N,N,Y),
BitPat(uopFCVT_D_S) -> List(X,X,Y,N,N, N,X,S,D,N,N,Y, N,N,N,Y),
BitPat(uopFCVT_D_X) -> List(X,X,N,N,N, X,X,D,D,Y,N,N, N,N,N,Y),
BitPat(uopFCVT_X_D) -> List(X,X,Y,N,N, N,X,D,D,N,Y,N, N,N,N,Y),
BitPat(uopCMPR_D) -> List(X,X,Y,Y,N, N,N,D,D,N,Y,N, N,N,N,Y),
BitPat(uopFSGNJ_D) -> List(X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,N),
BitPat(uopFMINMAX_D)-> List(X,X,Y,Y,N, N,N,D,D,N,N,Y, N,N,N,Y),
BitPat(uopFADD_D) -> List(X,X,Y,Y,N, N,Y,D,D,N,N,N, Y,N,N,Y),
BitPat(uopFSUB_D) -> List(X,X,Y,Y,N, N,Y,D,D,N,N,N, Y,N,N,Y),
BitPat(uopFMUL_D) -> List(X,X,Y,Y,N, N,N,D,D,N,N,N, Y,N,N,Y),
BitPat(uopFMADD_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y),
BitPat(uopFMSUB_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y),
BitPat(uopFNMADD_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y),
BitPat(uopFNMSUB_D) -> List(X,X,Y,Y,Y, N,N,D,D,N,N,N, Y,N,N,Y)
)
// val insns = fLen match {
// case 32 => f_table
// case 64 => f_table ++ d_table
// }
val insns = f_table ++ d_table
val decoder = rocket.DecodeLogic(io.uopc, default, insns)
val s = io.sigs
val sigs = Seq(s.ldst, s.wen, s.ren1, s.ren2, s.ren3, s.swap12,
s.swap23, s.typeTagIn, s.typeTagOut, s.fromint, s.toint, s.fastpipe, s.fma,
s.div, s.sqrt, s.wflags)
sigs zip decoder map {case(s,d) => s := d}
s.vec := false.B
}
/**
* FP fused multiple add decoder for the FPU
*/
class FMADecoder extends Module
{
val io = IO(new Bundle {
val uopc = Input(UInt(UOPC_SZ.W))
val cmd = Output(UInt(2.W))
})
val default: List[BitPat] = List(BitPat("b??"))
val table: Array[(BitPat, List[BitPat])] =
Array(
BitPat(uopFADD_S) -> List(BitPat("b00")),
BitPat(uopFSUB_S) -> List(BitPat("b01")),
BitPat(uopFMUL_S) -> List(BitPat("b00")),
BitPat(uopFMADD_S) -> List(BitPat("b00")),
BitPat(uopFMSUB_S) -> List(BitPat("b01")),
BitPat(uopFNMADD_S) -> List(BitPat("b11")),
BitPat(uopFNMSUB_S) -> List(BitPat("b10")),
BitPat(uopFADD_D) -> List(BitPat("b00")),
BitPat(uopFSUB_D) -> List(BitPat("b01")),
BitPat(uopFMUL_D) -> List(BitPat("b00")),
BitPat(uopFMADD_D) -> List(BitPat("b00")),
BitPat(uopFMSUB_D) -> List(BitPat("b01")),
BitPat(uopFNMADD_D) -> List(BitPat("b11")),
BitPat(uopFNMSUB_D) -> List(BitPat("b10"))
)
val decoder = rocket.DecodeLogic(io.uopc, default, table)
val (cmd: UInt) :: Nil = decoder
io.cmd := cmd
}
/**
* Bundle representing data to be sent to the FPU
*/
class FpuReq()(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp()
val rs1_data = Bits(65.W)
val rs2_data = Bits(65.W)
val rs3_data = Bits(65.W)
val fcsr_rm = Bits(tile.FPConstants.RM_SZ.W)
}
/**
* FPU unit that wraps the RocketChip FPU units (which in turn wrap hardfloat)
*/
class FPU(implicit p: Parameters) extends BoomModule with tile.HasFPUParameters
{
val io = IO(new Bundle {
val req = Flipped(new ValidIO(new FpuReq))
val resp = new ValidIO(new ExeUnitResp(65))
})
io.resp.bits := DontCare
// all FP units are padded out to the same latency for easy scheduling of the write port
val fpu_latency = dfmaLatency
val io_req = io.req.bits
val fp_decoder = Module(new UOPCodeFPUDecoder)
fp_decoder.io.uopc := io_req.uop.uopc
val fp_ctrl = fp_decoder.io.sigs
val fp_rm = Mux(ImmGenRm(io_req.uop.imm_packed) === 7.U, io_req.fcsr_rm, ImmGenRm(io_req.uop.imm_packed))
def fuInput(minT: Option[tile.FType]): tile.FPInput = {
val req = Wire(new tile.FPInput)
val tag = fp_ctrl.typeTagIn
req.viewAsSupertype(new tile.FPUCtrlSigs) := fp_ctrl
req.rm := fp_rm
req.in1 := unbox(io_req.rs1_data, tag, minT)
req.in2 := unbox(io_req.rs2_data, tag, minT)
req.in3 := unbox(io_req.rs3_data, tag, minT)
when (fp_ctrl.swap23) { req.in3 := req.in2 }
req.typ := ImmGenTyp(io_req.uop.imm_packed)
req.fmt := Mux(tag === S, 0.U, 1.U) // TODO support Zfh and avoid special-case below
when (io_req.uop.uopc === uopFMV_X_W) {
req.fmt := 0.U
}
val fma_decoder = Module(new FMADecoder)
fma_decoder.io.uopc := io_req.uop.uopc
req.fmaCmd := fma_decoder.io.cmd // ex_reg_inst(3,2) | (!fp_ctrl.ren3 && ex_reg_inst(27))
req
}
val dfma = Module(new tile.FPUFMAPipe(latency = fpu_latency, t = tile.FType.D))
dfma.io.in.valid := io.req.valid && fp_ctrl.fma && (fp_ctrl.typeTagOut === D)
dfma.io.in.bits := fuInput(Some(dfma.t))
val sfma = Module(new tile.FPUFMAPipe(latency = fpu_latency, t = tile.FType.S))
sfma.io.in.valid := io.req.valid && fp_ctrl.fma && (fp_ctrl.typeTagOut === S)
sfma.io.in.bits := fuInput(Some(sfma.t))
val fpiu = Module(new tile.FPToInt)
fpiu.io.in.valid := io.req.valid && (fp_ctrl.toint || (fp_ctrl.fastpipe && fp_ctrl.wflags))
fpiu.io.in.bits := fuInput(None)
val fpiu_out = Pipe(RegNext(fpiu.io.in.valid && !fp_ctrl.fastpipe),
fpiu.io.out.bits, fpu_latency-1)
val fpiu_result = Wire(new tile.FPResult)
fpiu_result.data := fpiu_out.bits.toint
fpiu_result.exc := fpiu_out.bits.exc
val fpmu = Module(new tile.FPToFP(fpu_latency)) // latency 2 for rocket
fpmu.io.in.valid := io.req.valid && fp_ctrl.fastpipe
fpmu.io.in.bits := fpiu.io.in.bits
fpmu.io.lt := fpiu.io.out.bits.lt
val fpmu_double = Pipe(io.req.valid && fp_ctrl.fastpipe, fp_ctrl.typeTagOut === D, fpu_latency).bits
// Response (all FP units have been padded out to the same latency)
io.resp.valid := fpiu_out.valid ||
fpmu.io.out.valid ||
sfma.io.out.valid ||
dfma.io.out.valid
val fpu_out_data =
Mux(dfma.io.out.valid, box(dfma.io.out.bits.data, true.B),
Mux(sfma.io.out.valid, box(sfma.io.out.bits.data, false.B),
Mux(fpiu_out.valid, fpiu_result.data,
box(fpmu.io.out.bits.data, fpmu_double))))
val fpu_out_exc =
Mux(dfma.io.out.valid, dfma.io.out.bits.exc,
Mux(sfma.io.out.valid, sfma.io.out.bits.exc,
Mux(fpiu_out.valid, fpiu_result.exc,
fpmu.io.out.bits.exc)))
io.resp.bits.data := fpu_out_data
io.resp.bits.fflags.valid := io.resp.valid
io.resp.bits.fflags.bits.flags := fpu_out_exc
}
| module FMADecoder_4( // @[fpu.scala:123:7]
input clock, // @[fpu.scala:123:7]
input reset, // @[fpu.scala:123:7]
input [6:0] io_uopc, // @[fpu.scala:125:14]
output [1:0] io_cmd // @[fpu.scala:125:14]
);
wire [6:0] io_uopc_0 = io_uopc; // @[fpu.scala:123:7]
wire [6:0] decoder_decoded_plaInput = io_uopc_0; // @[pla.scala:77:22]
wire [1:0] decoder_0; // @[Decode.scala:50:77]
wire [1:0] io_cmd_0; // @[fpu.scala:123:7]
wire [6:0] decoder_decoded_invInputs = ~decoder_decoded_plaInput; // @[pla.scala:77:22, :78:21]
wire [1:0] decoder_decoded_invMatrixOutputs; // @[pla.scala:120:37]
wire [1:0] decoder_decoded; // @[pla.scala:81:23]
assign decoder_0 = decoder_decoded; // @[pla.scala:81:23]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0 = decoder_decoded_invInputs[0]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_4 = decoder_decoded_invInputs[0]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1 = decoder_decoded_invInputs[1]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_4 = decoder_decoded_invInputs[1]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2 = decoder_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_5 = decoder_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_6 = decoder_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_1 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_2 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_3 = decoder_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_1 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_2 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_3 = decoder_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_1 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_2 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_3 = decoder_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_6 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_1 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_2 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_6_1 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_4 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_5 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_6_2 = decoder_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi = {decoder_decoded_andMatrixOutputs_andMatrixInput_4, decoder_decoded_andMatrixOutputs_andMatrixInput_5}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo = {decoder_decoded_andMatrixOutputs_lo_hi, decoder_decoded_andMatrixOutputs_andMatrixInput_6}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo = {decoder_decoded_andMatrixOutputs_andMatrixInput_2, decoder_decoded_andMatrixOutputs_andMatrixInput_3}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi = {decoder_decoded_andMatrixOutputs_andMatrixInput_0, decoder_decoded_andMatrixOutputs_andMatrixInput_1}; // @[pla.scala:91:29, :98:53]
wire [3:0] decoder_decoded_andMatrixOutputs_hi = {decoder_decoded_andMatrixOutputs_hi_hi, decoder_decoded_andMatrixOutputs_hi_lo}; // @[pla.scala:98:53]
wire [6:0] _decoder_decoded_andMatrixOutputs_T = {decoder_decoded_andMatrixOutputs_hi, decoder_decoded_andMatrixOutputs_lo}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_6_2 = &_decoder_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_1 = decoder_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_3 = decoder_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_6 = decoder_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_1 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_2 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_3 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_0_5 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_6 = decoder_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_1, decoder_decoded_andMatrixOutputs_andMatrixInput_4_1}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_1 = {decoder_decoded_andMatrixOutputs_lo_hi_1, decoder_decoded_andMatrixOutputs_andMatrixInput_5_1}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_1, decoder_decoded_andMatrixOutputs_andMatrixInput_1_1}; // @[pla.scala:90:45, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_hi_1 = {decoder_decoded_andMatrixOutputs_hi_hi_1, decoder_decoded_andMatrixOutputs_andMatrixInput_2_1}; // @[pla.scala:90:45, :98:53]
wire [5:0] _decoder_decoded_andMatrixOutputs_T_1 = {decoder_decoded_andMatrixOutputs_hi_1, decoder_decoded_andMatrixOutputs_lo_1}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_0_2 = &_decoder_decoded_andMatrixOutputs_T_1; // @[pla.scala:98:{53,70}]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_1_2 = decoder_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_3 = decoder_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_2, decoder_decoded_andMatrixOutputs_andMatrixInput_4_2}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_2 = {decoder_decoded_andMatrixOutputs_lo_hi_2, decoder_decoded_andMatrixOutputs_andMatrixInput_5_2}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_2, decoder_decoded_andMatrixOutputs_andMatrixInput_1_2}; // @[pla.scala:90:45, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_hi_2 = {decoder_decoded_andMatrixOutputs_hi_hi_2, decoder_decoded_andMatrixOutputs_andMatrixInput_2_2}; // @[pla.scala:90:45, :98:53]
wire [5:0] _decoder_decoded_andMatrixOutputs_T_2 = {decoder_decoded_andMatrixOutputs_hi_2, decoder_decoded_andMatrixOutputs_lo_2}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_1_2 = &_decoder_decoded_andMatrixOutputs_T_2; // @[pla.scala:98:{53,70}]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_3 = {decoder_decoded_andMatrixOutputs_andMatrixInput_4_3, decoder_decoded_andMatrixOutputs_andMatrixInput_5_3}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_3 = {decoder_decoded_andMatrixOutputs_lo_hi_3, decoder_decoded_andMatrixOutputs_andMatrixInput_6_1}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo_1 = {decoder_decoded_andMatrixOutputs_andMatrixInput_2_3, decoder_decoded_andMatrixOutputs_andMatrixInput_3_3}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_3 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_3, decoder_decoded_andMatrixOutputs_andMatrixInput_1_3}; // @[pla.scala:90:45, :98:53]
wire [3:0] decoder_decoded_andMatrixOutputs_hi_3 = {decoder_decoded_andMatrixOutputs_hi_hi_3, decoder_decoded_andMatrixOutputs_hi_lo_1}; // @[pla.scala:98:53]
wire [6:0] _decoder_decoded_andMatrixOutputs_T_3 = {decoder_decoded_andMatrixOutputs_hi_3, decoder_decoded_andMatrixOutputs_lo_3}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_2_2 = &_decoder_decoded_andMatrixOutputs_T_3; // @[pla.scala:98:{53,70}]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_4 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_2_5 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_6 = decoder_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_4 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_3_5 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_6 = decoder_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_4 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_4_5 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire decoder_decoded_andMatrixOutputs_andMatrixInput_5_6 = decoder_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_4 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_4, decoder_decoded_andMatrixOutputs_andMatrixInput_4_4}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_4 = {decoder_decoded_andMatrixOutputs_lo_hi_4, decoder_decoded_andMatrixOutputs_andMatrixInput_5_4}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_4 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_4, decoder_decoded_andMatrixOutputs_andMatrixInput_1_4}; // @[pla.scala:91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_hi_4 = {decoder_decoded_andMatrixOutputs_hi_hi_4, decoder_decoded_andMatrixOutputs_andMatrixInput_2_4}; // @[pla.scala:91:29, :98:53]
wire [5:0] _decoder_decoded_andMatrixOutputs_T_4 = {decoder_decoded_andMatrixOutputs_hi_4, decoder_decoded_andMatrixOutputs_lo_4}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_4_2 = &_decoder_decoded_andMatrixOutputs_T_4; // @[pla.scala:98:{53,70}]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_5 = {decoder_decoded_andMatrixOutputs_andMatrixInput_3_5, decoder_decoded_andMatrixOutputs_andMatrixInput_4_5}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_5 = {decoder_decoded_andMatrixOutputs_lo_hi_5, decoder_decoded_andMatrixOutputs_andMatrixInput_5_5}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_5 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_5, decoder_decoded_andMatrixOutputs_andMatrixInput_1_5}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_hi_5 = {decoder_decoded_andMatrixOutputs_hi_hi_5, decoder_decoded_andMatrixOutputs_andMatrixInput_2_5}; // @[pla.scala:91:29, :98:53]
wire [5:0] _decoder_decoded_andMatrixOutputs_T_5 = {decoder_decoded_andMatrixOutputs_hi_5, decoder_decoded_andMatrixOutputs_lo_5}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_3_2 = &_decoder_decoded_andMatrixOutputs_T_5; // @[pla.scala:98:{53,70}]
wire [1:0] decoder_decoded_andMatrixOutputs_lo_hi_6 = {decoder_decoded_andMatrixOutputs_andMatrixInput_4_6, decoder_decoded_andMatrixOutputs_andMatrixInput_5_6}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] decoder_decoded_andMatrixOutputs_lo_6 = {decoder_decoded_andMatrixOutputs_lo_hi_6, decoder_decoded_andMatrixOutputs_andMatrixInput_6_2}; // @[pla.scala:90:45, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_lo_2 = {decoder_decoded_andMatrixOutputs_andMatrixInput_2_6, decoder_decoded_andMatrixOutputs_andMatrixInput_3_6}; // @[pla.scala:91:29, :98:53]
wire [1:0] decoder_decoded_andMatrixOutputs_hi_hi_6 = {decoder_decoded_andMatrixOutputs_andMatrixInput_0_6, decoder_decoded_andMatrixOutputs_andMatrixInput_1_6}; // @[pla.scala:90:45, :98:53]
wire [3:0] decoder_decoded_andMatrixOutputs_hi_6 = {decoder_decoded_andMatrixOutputs_hi_hi_6, decoder_decoded_andMatrixOutputs_hi_lo_2}; // @[pla.scala:98:53]
wire [6:0] _decoder_decoded_andMatrixOutputs_T_6 = {decoder_decoded_andMatrixOutputs_hi_6, decoder_decoded_andMatrixOutputs_lo_6}; // @[pla.scala:98:53]
wire decoder_decoded_andMatrixOutputs_5_2 = &_decoder_decoded_andMatrixOutputs_T_6; // @[pla.scala:98:{53,70}]
wire [1:0] decoder_decoded_orMatrixOutputs_lo = {decoder_decoded_andMatrixOutputs_1_2, decoder_decoded_andMatrixOutputs_3_2}; // @[pla.scala:98:70, :114:19]
wire [1:0] decoder_decoded_orMatrixOutputs_hi = {decoder_decoded_andMatrixOutputs_6_2, decoder_decoded_andMatrixOutputs_0_2}; // @[pla.scala:98:70, :114:19]
wire [3:0] _decoder_decoded_orMatrixOutputs_T = {decoder_decoded_orMatrixOutputs_hi, decoder_decoded_orMatrixOutputs_lo}; // @[pla.scala:114:19]
wire _decoder_decoded_orMatrixOutputs_T_1 = |_decoder_decoded_orMatrixOutputs_T; // @[pla.scala:114:{19,36}]
wire [1:0] decoder_decoded_orMatrixOutputs_hi_1 = {decoder_decoded_andMatrixOutputs_2_2, decoder_decoded_andMatrixOutputs_4_2}; // @[pla.scala:98:70, :114:19]
wire [2:0] _decoder_decoded_orMatrixOutputs_T_2 = {decoder_decoded_orMatrixOutputs_hi_1, decoder_decoded_andMatrixOutputs_5_2}; // @[pla.scala:98:70, :114:19]
wire _decoder_decoded_orMatrixOutputs_T_3 = |_decoder_decoded_orMatrixOutputs_T_2; // @[pla.scala:114:{19,36}]
wire [1:0] decoder_decoded_orMatrixOutputs = {_decoder_decoded_orMatrixOutputs_T_3, _decoder_decoded_orMatrixOutputs_T_1}; // @[pla.scala:102:36, :114:36]
wire _decoder_decoded_invMatrixOutputs_T = decoder_decoded_orMatrixOutputs[0]; // @[pla.scala:102:36, :124:31]
wire _decoder_decoded_invMatrixOutputs_T_1 = decoder_decoded_orMatrixOutputs[1]; // @[pla.scala:102:36, :124:31]
assign decoder_decoded_invMatrixOutputs = {_decoder_decoded_invMatrixOutputs_T_1, _decoder_decoded_invMatrixOutputs_T}; // @[pla.scala:120:37, :124:31]
assign decoder_decoded = decoder_decoded_invMatrixOutputs; // @[pla.scala:81:23, :120:37]
assign io_cmd_0 = decoder_0; // @[Decode.scala:50:77]
assign io_cmd = io_cmd_0; // @[fpu.scala:123:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RegisterFile.scala:
package saturn.backend
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.tile.{CoreModule}
import freechips.rocketchip.util._
import saturn.common._
class OldestRRArbiter(val n: Int)(implicit p: Parameters) extends Module {
val io = IO(new ArbiterIO(new VectorReadReq, n))
val arb = Module(new RRArbiter(new VectorReadReq, n))
io <> arb.io
val oldest_oh = io.in.map(i => i.valid && i.bits.oldest)
//assert(PopCount(oldest_oh) <= 1.U)
when (oldest_oh.orR) {
io.chosen := VecInit(oldest_oh).asUInt
io.out.valid := true.B
io.out.bits := Mux1H(oldest_oh, io.in.map(_.bits))
for (i <- 0 until n) {
io.in(i).ready := oldest_oh(i) && io.out.ready
}
}
}
class RegisterReadXbar(n: Int, banks: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val in = Vec(n, Flipped(new VectorReadIO))
val out = Vec(banks, new VectorReadIO)
})
val arbs = Seq.fill(banks) { Module(new OldestRRArbiter(n)) }
for (i <- 0 until banks) {
io.out(i).req <> arbs(i).io.out
}
val bankOffset = log2Ceil(banks)
for (i <- 0 until n) {
val bank_sel = if (bankOffset == 0) true.B else UIntToOH(io.in(i).req.bits.eg(bankOffset-1,0))
for (j <- 0 until banks) {
arbs(j).io.in(i).valid := io.in(i).req.valid && bank_sel(j)
arbs(j).io.in(i).bits.eg := io.in(i).req.bits.eg >> bankOffset
arbs(j).io.in(i).bits.oldest := io.in(i).req.bits.oldest
}
io.in(i).req.ready := Mux1H(bank_sel, arbs.map(_.io.in(i).ready))
io.in(i).resp := Mux1H(bank_sel, io.out.map(_.resp))
}
}
class RegisterFileBank(reads: Int, maskReads: Int, rows: Int, maskRows: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val io = IO(new Bundle {
val read = Vec(reads, Flipped(new VectorReadIO))
val mask_read = Vec(maskReads, Flipped(new VectorReadIO))
val write = Input(Valid(new VectorWrite(dLen)))
val ll_write = Flipped(Decoupled(new VectorWrite(dLen)))
})
val ll_write_valid = RegInit(false.B)
val ll_write_bits = Reg(new VectorWrite(dLen))
val vrf = Mem(rows, Vec(dLen, Bool()))
val v0_mask = Mem(maskRows, Vec(dLen, Bool()))
for (read <- io.read) {
read.req.ready := !(ll_write_valid && read.req.bits.eg === ll_write_bits.eg)
read.resp := DontCare
when (read.req.valid) {
read.resp := vrf.read(read.req.bits.eg).asUInt
}
}
for (mask_read <- io.mask_read) {
mask_read.req.ready := !(ll_write_valid && mask_read.req.bits.eg === ll_write_bits.eg)
mask_read.resp := DontCare
when (mask_read.req.valid) {
mask_read.resp := v0_mask.read(mask_read.req.bits.eg).asUInt
}
}
val write = WireInit(io.write)
io.ll_write.ready := false.B
if (vParams.vrfHiccupBuffer) {
when (!io.write.valid) { // drain hiccup buffer
write.valid := ll_write_valid || io.ll_write.valid
write.bits := Mux(ll_write_valid, ll_write_bits, io.ll_write.bits)
ll_write_valid := false.B
when (io.ll_write.valid && ll_write_valid) {
ll_write_valid := true.B
ll_write_bits := io.ll_write.bits
}
io.ll_write.ready := true.B
} .elsewhen (!ll_write_valid) { // fill hiccup buffer
when (io.ll_write.valid) {
ll_write_valid := true.B
ll_write_bits := io.ll_write.bits
}
io.ll_write.ready := true.B
}
} else {
when (!io.write.valid) {
io.ll_write.ready := true.B
write.valid := io.ll_write.valid
write.bits := io.ll_write.bits
}
}
when (write.valid) {
vrf.write(
write.bits.eg,
VecInit(write.bits.data.asBools),
write.bits.mask.asBools)
when (write.bits.eg < maskRows.U) {
v0_mask.write(
write.bits.eg,
VecInit(write.bits.data.asBools),
write.bits.mask.asBools)
}
}
}
class RegisterFile(reads: Seq[Int], maskReads: Seq[Int], pipeWrites: Int, llWrites: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams {
val nBanks = vParams.vrfBanking
// Support 1, 2, and 4 banks for the VRF
require(nBanks == 1 || nBanks == 2 || nBanks == 4)
val io = IO(new Bundle {
val read = MixedVec(reads.map(rc => Vec(rc, Flipped(new VectorReadIO))))
val mask_read = MixedVec(maskReads.map(rc => Vec(rc, Flipped(new VectorReadIO))))
val pipe_writes = Vec(pipeWrites, Input(Valid(new VectorWrite(dLen))))
val ll_writes = Vec(llWrites, Flipped(Decoupled(new VectorWrite(dLen))))
})
val vrf = Seq.fill(nBanks) { Module(new RegisterFileBank(reads.size, maskReads.size, egsTotal/nBanks, if (egsPerVReg < nBanks) 1 else egsPerVReg / nBanks)) }
reads.zipWithIndex.foreach { case (rc, i) =>
val xbar = Module(new RegisterReadXbar(rc, nBanks))
vrf.zipWithIndex.foreach { case (bank, j) =>
bank.io.read(i) <> xbar.io.out(j)
}
xbar.io.in <> io.read(i)
}
maskReads.zipWithIndex.foreach { case (rc, i) =>
val mask_xbar = Module(new RegisterReadXbar(rc, nBanks))
vrf.zipWithIndex.foreach { case (bank, j) =>
bank.io.mask_read(i) <> mask_xbar.io.out(j)
}
mask_xbar.io.in <> io.mask_read(i)
}
io.ll_writes.foreach(_.ready := false.B)
vrf.zipWithIndex.foreach { case (rf, i) =>
val bank_match = io.pipe_writes.map { w => (w.bits.bankId === i.U) && w.valid }
val bank_write_data = Mux1H(bank_match, io.pipe_writes.map(_.bits.data))
val bank_write_mask = Mux1H(bank_match, io.pipe_writes.map(_.bits.mask))
val bank_write_eg = Mux1H(bank_match, io.pipe_writes.map(_.bits.eg))
val bank_write_valid = bank_match.orR
rf.io.write.valid := bank_write_valid
rf.io.write.bits.data := bank_write_data
rf.io.write.bits.mask := bank_write_mask
rf.io.write.bits.eg := bank_write_eg >> vrfBankBits
when (bank_write_valid) { PopCount(bank_match) === 1.U }
val ll_arb = Module(new Arbiter(new VectorWrite(dLen), llWrites))
rf.io.ll_write <> ll_arb.io.out
io.ll_writes.zipWithIndex.foreach { case (w, j) =>
ll_arb.io.in(j).valid := w.valid && w.bits.bankId === i.U
ll_arb.io.in(j).bits.eg := w.bits.eg >> vrfBankBits
ll_arb.io.in(j).bits.data := w.bits.data
ll_arb.io.in(j).bits.mask := w.bits.mask
when (ll_arb.io.in(j).ready && w.bits.bankId === i.U) {
w.ready := true.B
}
}
}
}
| module OldestRRArbiter( // @[RegisterFile.scala:10:7]
input clock, // @[RegisterFile.scala:10:7]
output io_in_0_ready, // @[RegisterFile.scala:11:14]
input io_in_0_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_0_bits_eg, // @[RegisterFile.scala:11:14]
input io_in_0_bits_oldest, // @[RegisterFile.scala:11:14]
output io_in_1_ready, // @[RegisterFile.scala:11:14]
input io_in_1_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_1_bits_eg, // @[RegisterFile.scala:11:14]
input io_in_1_bits_oldest, // @[RegisterFile.scala:11:14]
output io_in_2_ready, // @[RegisterFile.scala:11:14]
input io_in_2_valid, // @[RegisterFile.scala:11:14]
input [5:0] io_in_2_bits_eg, // @[RegisterFile.scala:11:14]
input io_out_ready, // @[RegisterFile.scala:11:14]
output io_out_valid, // @[RegisterFile.scala:11:14]
output [5:0] io_out_bits_eg // @[RegisterFile.scala:11:14]
);
wire _arb_io_in_0_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_in_1_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_in_2_ready; // @[RegisterFile.scala:13:19]
wire _arb_io_out_valid; // @[RegisterFile.scala:13:19]
wire [5:0] _arb_io_out_bits_eg; // @[RegisterFile.scala:13:19]
wire oldest_oh_0 = io_in_0_valid & io_in_0_bits_oldest; // @[RegisterFile.scala:15:42]
wire oldest_oh_1 = io_in_1_valid & io_in_1_bits_oldest; // @[RegisterFile.scala:15:42]
wire _GEN = oldest_oh_0 | oldest_oh_1; // @[RegisterFile.scala:15:42]
RRArbiter arb ( // @[RegisterFile.scala:13:19]
.clock (clock),
.io_in_0_ready (_arb_io_in_0_ready),
.io_in_0_valid (io_in_0_valid),
.io_in_0_bits_eg (io_in_0_bits_eg),
.io_in_1_ready (_arb_io_in_1_ready),
.io_in_1_valid (io_in_1_valid),
.io_in_1_bits_eg (io_in_1_bits_eg),
.io_in_2_ready (_arb_io_in_2_ready),
.io_in_2_valid (io_in_2_valid),
.io_in_2_bits_eg (io_in_2_bits_eg),
.io_out_ready (io_out_ready),
.io_out_valid (_arb_io_out_valid),
.io_out_bits_eg (_arb_io_out_bits_eg)
); // @[RegisterFile.scala:13:19]
assign io_in_0_ready = _GEN ? oldest_oh_0 & io_out_ready : _arb_io_in_0_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}]
assign io_in_1_ready = _GEN ? oldest_oh_1 & io_out_ready : _arb_io_in_1_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :15:42, :17:24, :22:{22,38}]
assign io_in_2_ready = ~_GEN & _arb_io_in_2_ready; // @[RegisterFile.scala:10:7, :13:19, :14:6, :17:24, :22:22]
assign io_out_valid = _GEN | _arb_io_out_valid; // @[RegisterFile.scala:10:7, :13:19, :14:6, :17:24, :19:18]
assign io_out_bits_eg = _GEN ? (oldest_oh_0 ? io_in_0_bits_eg : 6'h0) | (oldest_oh_1 ? io_in_1_bits_eg : 6'h0) : _arb_io_out_bits_eg; // @[Mux.scala:30:73]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File InclusiveCache.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.subsystem.{SubsystemBankedCoherenceKey}
import freechips.rocketchip.regmapper._
import freechips.rocketchip.tilelink._
class InclusiveCache(
val cache: CacheParameters,
val micro: InclusiveCacheMicroParameters,
control: Option[InclusiveCacheControlParameters] = None
)(implicit p: Parameters)
extends LazyModule
{
val access = TransferSizes(1, cache.blockBytes)
val xfer = TransferSizes(cache.blockBytes, cache.blockBytes)
val atom = TransferSizes(1, cache.beatBytes)
var resourcesOpt: Option[ResourceBindings] = None
val device: SimpleDevice = new SimpleDevice("cache-controller", Seq("sifive,inclusivecache0", "cache")) {
def ofInt(x: Int) = Seq(ResourceInt(BigInt(x)))
override def describe(resources: ResourceBindings): Description = {
resourcesOpt = Some(resources)
val Description(name, mapping) = super.describe(resources)
// Find the outer caches
val outer = node.edges.out
.flatMap(_.manager.managers)
.filter(_.supportsAcquireB)
.flatMap(_.resources.headOption)
.map(_.owner.label)
.distinct
val nextlevel: Option[(String, Seq[ResourceValue])] =
if (outer.isEmpty) {
None
} else {
Some("next-level-cache" -> outer.map(l => ResourceReference(l)).toList)
}
val extra = Map(
"cache-level" -> ofInt(2),
"cache-unified" -> Nil,
"cache-size" -> ofInt(cache.sizeBytes * node.edges.in.size),
"cache-sets" -> ofInt(cache.sets * node.edges.in.size),
"cache-block-size" -> ofInt(cache.blockBytes),
"sifive,mshr-count" -> ofInt(InclusiveCacheParameters.all_mshrs(cache, micro)))
Description(name, mapping ++ extra ++ nextlevel)
}
}
val node: TLAdapterNode = TLAdapterNode(
clientFn = { _ => TLClientPortParameters(Seq(TLClientParameters(
name = s"L${cache.level} InclusiveCache",
sourceId = IdRange(0, InclusiveCacheParameters.out_mshrs(cache, micro)),
supportsProbe = xfer)))
},
managerFn = { m => TLManagerPortParameters(
managers = m.managers.map { m => m.copy(
regionType = if (m.regionType >= RegionType.UNCACHED) RegionType.CACHED else m.regionType,
resources = Resource(device, "caches") +: m.resources,
supportsAcquireB = xfer,
supportsAcquireT = if (m.supportsAcquireT) xfer else TransferSizes.none,
supportsArithmetic = if (m.supportsAcquireT) atom else TransferSizes.none,
supportsLogical = if (m.supportsAcquireT) atom else TransferSizes.none,
supportsGet = access,
supportsPutFull = if (m.supportsAcquireT) access else TransferSizes.none,
supportsPutPartial = if (m.supportsAcquireT) access else TransferSizes.none,
supportsHint = access,
alwaysGrantsT = false,
fifoId = None)
},
beatBytes = cache.beatBytes,
endSinkId = InclusiveCacheParameters.all_mshrs(cache, micro),
minLatency = 2)
})
val ctrls = control.map { c =>
val nCtrls = if (c.bankedControl) p(SubsystemBankedCoherenceKey).nBanks else 1
Seq.tabulate(nCtrls) { i => LazyModule(new InclusiveCacheControl(this,
c.copy(address = c.address + i * InclusiveCacheParameters.L2ControlSize))) }
}.getOrElse(Nil)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
// If you have a control port, you must have at least one cache port
require (ctrls.isEmpty || !node.edges.in.isEmpty)
// Extract the client IdRanges; must be the same on all ports!
val clientIds = node.edges.in.headOption.map(_.client.clients.map(_.sourceId).sortBy(_.start))
node.edges.in.foreach { e => require(e.client.clients.map(_.sourceId).sortBy(_.start) == clientIds.get) }
// Use the natural ordering of clients (just like in Directory)
node.edges.in.headOption.foreach { n =>
println(s"L${cache.level} InclusiveCache Client Map:")
n.client.clients.zipWithIndex.foreach { case (c,i) =>
println(s"\t${i} <= ${c.name}")
}
println("")
}
// Create the L2 Banks
val mods = (node.in zip node.out) map { case ((in, edgeIn), (out, edgeOut)) =>
edgeOut.manager.managers.foreach { m =>
require (m.supportsAcquireB.contains(xfer),
s"All managers behind the L2 must support acquireB($xfer) " +
s"but ${m.name} only supports (${m.supportsAcquireB})!")
if (m.supportsAcquireT) require (m.supportsAcquireT.contains(xfer),
s"Any probing managers behind the L2 must support acquireT($xfer) " +
s"but ${m.name} only supports (${m.supportsAcquireT})!")
}
val params = InclusiveCacheParameters(cache, micro, !ctrls.isEmpty, edgeIn, edgeOut)
val scheduler = Module(new InclusiveCacheBankScheduler(params)).suggestName("inclusive_cache_bank_sched")
scheduler.io.in <> in
out <> scheduler.io.out
scheduler.io.ways := DontCare
scheduler.io.divs := DontCare
// Tie down default values in case there is no controller
scheduler.io.req.valid := false.B
scheduler.io.req.bits.address := 0.U
scheduler.io.resp.ready := true.B
// Fix-up the missing addresses. We do this here so that the Scheduler can be
// deduplicated by Firrtl to make hierarchical place-and-route easier.
out.a.bits.address := params.restoreAddress(scheduler.io.out.a.bits.address)
in .b.bits.address := params.restoreAddress(scheduler.io.in .b.bits.address)
out.c.bits.address := params.restoreAddress(scheduler.io.out.c.bits.address)
scheduler
}
ctrls.foreach { ctrl =>
ctrl.module.io.flush_req.ready := false.B
ctrl.module.io.flush_resp := false.B
ctrl.module.io.flush_match := false.B
}
mods.zip(node.edges.in).zipWithIndex.foreach { case ((sched, edgeIn), i) =>
val ctrl = if (ctrls.size > 1) Some(ctrls(i)) else ctrls.headOption
ctrl.foreach { ctrl => {
val contained = edgeIn.manager.managers.flatMap(_.address)
.map(_.contains(ctrl.module.io.flush_req.bits)).reduce(_||_)
when (contained) { ctrl.module.io.flush_match := true.B }
sched.io.req.valid := contained && ctrl.module.io.flush_req.valid
sched.io.req.bits.address := ctrl.module.io.flush_req.bits
when (contained && sched.io.req.ready) { ctrl.module.io.flush_req.ready := true.B }
when (sched.io.resp.valid) { ctrl.module.io.flush_resp := true.B }
sched.io.resp.ready := true.B
}}
}
def json = s"""{"banks":[${mods.map(_.json).mkString(",")}]}"""
}
}
| module InclusiveCache( // @[InclusiveCache.scala:108:9]
input clock, // @[InclusiveCache.scala:108:9]
input reset, // @[InclusiveCache.scala:108:9]
output auto_ctrls_ctrl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_ctrls_ctrl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrls_ctrl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_ctrls_ctrl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_ctrls_ctrl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [10:0] auto_ctrls_ctrl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_ctrls_ctrl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_ctrls_ctrl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_ctrls_ctrl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_ctrls_ctrl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_ctrls_ctrl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_ctrls_ctrl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_ctrls_ctrl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_ctrls_ctrl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [10:0] auto_ctrls_ctrl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_ctrls_ctrl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_b_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_b_valid, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_b_bits_param, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_in_b_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_in_c_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_c_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_c_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_c_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_c_bits_address, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_c_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_e_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_e_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_c_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_c_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_c_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_e_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_e_bits_sink // @[LazyModuleImp.scala:107:25]
);
wire _inclusive_cache_bank_sched_io_in_a_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_b_valid; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_io_in_b_bits_param; // @[InclusiveCache.scala:137:29]
wire [31:0] _inclusive_cache_bank_sched_io_in_b_bits_address; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_c_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_d_valid; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_io_in_d_bits_opcode; // @[InclusiveCache.scala:137:29]
wire [1:0] _inclusive_cache_bank_sched_io_in_d_bits_param; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_io_in_d_bits_size; // @[InclusiveCache.scala:137:29]
wire [5:0] _inclusive_cache_bank_sched_io_in_d_bits_source; // @[InclusiveCache.scala:137:29]
wire [2:0] _inclusive_cache_bank_sched_io_in_d_bits_sink; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_d_bits_denied; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_in_d_bits_corrupt; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_req_ready; // @[InclusiveCache.scala:137:29]
wire _inclusive_cache_bank_sched_io_resp_valid; // @[InclusiveCache.scala:137:29]
wire _ctrls_io_flush_req_valid; // @[InclusiveCache.scala:103:43]
wire [63:0] _ctrls_io_flush_req_bits; // @[InclusiveCache.scala:103:43]
wire contained = {_ctrls_io_flush_req_bits[63:32], _ctrls_io_flush_req_bits[31:28] ^ 4'h8} == 36'h0 | {_ctrls_io_flush_req_bits[63:28], _ctrls_io_flush_req_bits[27:16] ^ 12'h800} == 48'h0; // @[Parameters.scala:137:{31,41,46,59}]
InclusiveCacheControl ctrls ( // @[InclusiveCache.scala:103:43]
.clock (clock),
.reset (reset),
.auto_ctrl_in_a_ready (auto_ctrls_ctrl_in_a_ready),
.auto_ctrl_in_a_valid (auto_ctrls_ctrl_in_a_valid),
.auto_ctrl_in_a_bits_opcode (auto_ctrls_ctrl_in_a_bits_opcode),
.auto_ctrl_in_a_bits_param (auto_ctrls_ctrl_in_a_bits_param),
.auto_ctrl_in_a_bits_size (auto_ctrls_ctrl_in_a_bits_size),
.auto_ctrl_in_a_bits_source (auto_ctrls_ctrl_in_a_bits_source),
.auto_ctrl_in_a_bits_address (auto_ctrls_ctrl_in_a_bits_address),
.auto_ctrl_in_a_bits_mask (auto_ctrls_ctrl_in_a_bits_mask),
.auto_ctrl_in_a_bits_data (auto_ctrls_ctrl_in_a_bits_data),
.auto_ctrl_in_a_bits_corrupt (auto_ctrls_ctrl_in_a_bits_corrupt),
.auto_ctrl_in_d_ready (auto_ctrls_ctrl_in_d_ready),
.auto_ctrl_in_d_valid (auto_ctrls_ctrl_in_d_valid),
.auto_ctrl_in_d_bits_opcode (auto_ctrls_ctrl_in_d_bits_opcode),
.auto_ctrl_in_d_bits_size (auto_ctrls_ctrl_in_d_bits_size),
.auto_ctrl_in_d_bits_source (auto_ctrls_ctrl_in_d_bits_source),
.auto_ctrl_in_d_bits_data (auto_ctrls_ctrl_in_d_bits_data),
.io_flush_match (contained), // @[InclusiveCache.scala:169:67]
.io_flush_req_ready (contained & _inclusive_cache_bank_sched_io_req_ready), // @[InclusiveCache.scala:137:29, :169:67, :174:25]
.io_flush_req_valid (_ctrls_io_flush_req_valid),
.io_flush_req_bits (_ctrls_io_flush_req_bits),
.io_flush_resp (_inclusive_cache_bank_sched_io_resp_valid) // @[InclusiveCache.scala:137:29]
); // @[InclusiveCache.scala:103:43]
TLMonitor_34 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_io_in_a_ready), // @[InclusiveCache.scala:137:29]
.io_in_a_valid (auto_in_a_valid),
.io_in_a_bits_opcode (auto_in_a_bits_opcode),
.io_in_a_bits_param (auto_in_a_bits_param),
.io_in_a_bits_size (auto_in_a_bits_size),
.io_in_a_bits_source (auto_in_a_bits_source),
.io_in_a_bits_address (auto_in_a_bits_address),
.io_in_a_bits_mask (auto_in_a_bits_mask),
.io_in_a_bits_corrupt (auto_in_a_bits_corrupt),
.io_in_b_ready (auto_in_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_io_in_b_valid), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_param (_inclusive_cache_bank_sched_io_in_b_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_b_bits_address (_inclusive_cache_bank_sched_io_in_b_bits_address), // @[InclusiveCache.scala:137:29]
.io_in_c_ready (_inclusive_cache_bank_sched_io_in_c_ready), // @[InclusiveCache.scala:137:29]
.io_in_c_valid (auto_in_c_valid),
.io_in_c_bits_opcode (auto_in_c_bits_opcode),
.io_in_c_bits_param (auto_in_c_bits_param),
.io_in_c_bits_size (auto_in_c_bits_size),
.io_in_c_bits_source (auto_in_c_bits_source),
.io_in_c_bits_address (auto_in_c_bits_address),
.io_in_c_bits_corrupt (auto_in_c_bits_corrupt),
.io_in_d_ready (auto_in_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_io_in_d_valid), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_io_in_d_bits_opcode), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_param (_inclusive_cache_bank_sched_io_in_d_bits_param), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_size (_inclusive_cache_bank_sched_io_in_d_bits_size), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_source (_inclusive_cache_bank_sched_io_in_d_bits_source), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_sink (_inclusive_cache_bank_sched_io_in_d_bits_sink), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_denied (_inclusive_cache_bank_sched_io_in_d_bits_denied), // @[InclusiveCache.scala:137:29]
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_io_in_d_bits_corrupt), // @[InclusiveCache.scala:137:29]
.io_in_e_valid (auto_in_e_valid),
.io_in_e_bits_sink (auto_in_e_bits_sink)
); // @[Nodes.scala:27:25]
InclusiveCacheBankScheduler inclusive_cache_bank_sched ( // @[InclusiveCache.scala:137:29]
.clock (clock),
.reset (reset),
.io_in_a_ready (_inclusive_cache_bank_sched_io_in_a_ready),
.io_in_a_valid (auto_in_a_valid),
.io_in_a_bits_opcode (auto_in_a_bits_opcode),
.io_in_a_bits_param (auto_in_a_bits_param),
.io_in_a_bits_size (auto_in_a_bits_size),
.io_in_a_bits_source (auto_in_a_bits_source),
.io_in_a_bits_address (auto_in_a_bits_address),
.io_in_a_bits_mask (auto_in_a_bits_mask),
.io_in_a_bits_data (auto_in_a_bits_data),
.io_in_a_bits_corrupt (auto_in_a_bits_corrupt),
.io_in_b_ready (auto_in_b_ready),
.io_in_b_valid (_inclusive_cache_bank_sched_io_in_b_valid),
.io_in_b_bits_param (_inclusive_cache_bank_sched_io_in_b_bits_param),
.io_in_b_bits_address (_inclusive_cache_bank_sched_io_in_b_bits_address),
.io_in_c_ready (_inclusive_cache_bank_sched_io_in_c_ready),
.io_in_c_valid (auto_in_c_valid),
.io_in_c_bits_opcode (auto_in_c_bits_opcode),
.io_in_c_bits_param (auto_in_c_bits_param),
.io_in_c_bits_size (auto_in_c_bits_size),
.io_in_c_bits_source (auto_in_c_bits_source),
.io_in_c_bits_address (auto_in_c_bits_address),
.io_in_c_bits_data (auto_in_c_bits_data),
.io_in_c_bits_corrupt (auto_in_c_bits_corrupt),
.io_in_d_ready (auto_in_d_ready),
.io_in_d_valid (_inclusive_cache_bank_sched_io_in_d_valid),
.io_in_d_bits_opcode (_inclusive_cache_bank_sched_io_in_d_bits_opcode),
.io_in_d_bits_param (_inclusive_cache_bank_sched_io_in_d_bits_param),
.io_in_d_bits_size (_inclusive_cache_bank_sched_io_in_d_bits_size),
.io_in_d_bits_source (_inclusive_cache_bank_sched_io_in_d_bits_source),
.io_in_d_bits_sink (_inclusive_cache_bank_sched_io_in_d_bits_sink),
.io_in_d_bits_denied (_inclusive_cache_bank_sched_io_in_d_bits_denied),
.io_in_d_bits_data (auto_in_d_bits_data),
.io_in_d_bits_corrupt (_inclusive_cache_bank_sched_io_in_d_bits_corrupt),
.io_in_e_valid (auto_in_e_valid),
.io_in_e_bits_sink (auto_in_e_bits_sink),
.io_out_a_ready (auto_out_a_ready),
.io_out_a_valid (auto_out_a_valid),
.io_out_a_bits_opcode (auto_out_a_bits_opcode),
.io_out_a_bits_param (auto_out_a_bits_param),
.io_out_a_bits_size (auto_out_a_bits_size),
.io_out_a_bits_source (auto_out_a_bits_source),
.io_out_a_bits_address (auto_out_a_bits_address),
.io_out_a_bits_mask (auto_out_a_bits_mask),
.io_out_a_bits_data (auto_out_a_bits_data),
.io_out_a_bits_corrupt (auto_out_a_bits_corrupt),
.io_out_c_ready (auto_out_c_ready),
.io_out_c_valid (auto_out_c_valid),
.io_out_c_bits_opcode (auto_out_c_bits_opcode),
.io_out_c_bits_param (auto_out_c_bits_param),
.io_out_c_bits_size (auto_out_c_bits_size),
.io_out_c_bits_source (auto_out_c_bits_source),
.io_out_c_bits_address (auto_out_c_bits_address),
.io_out_c_bits_data (auto_out_c_bits_data),
.io_out_c_bits_corrupt (auto_out_c_bits_corrupt),
.io_out_d_ready (auto_out_d_ready),
.io_out_d_valid (auto_out_d_valid),
.io_out_d_bits_opcode (auto_out_d_bits_opcode),
.io_out_d_bits_param (auto_out_d_bits_param),
.io_out_d_bits_size (auto_out_d_bits_size),
.io_out_d_bits_source (auto_out_d_bits_source),
.io_out_d_bits_sink (auto_out_d_bits_sink),
.io_out_d_bits_denied (auto_out_d_bits_denied),
.io_out_d_bits_data (auto_out_d_bits_data),
.io_out_d_bits_corrupt (auto_out_d_bits_corrupt),
.io_out_e_valid (auto_out_e_valid),
.io_out_e_bits_sink (auto_out_e_bits_sink),
.io_req_ready (_inclusive_cache_bank_sched_io_req_ready),
.io_req_valid (contained & _ctrls_io_flush_req_valid), // @[InclusiveCache.scala:103:43, :169:67, :172:41]
.io_req_bits_address (_ctrls_io_flush_req_bits[31:0]), // @[Parameters.scala:137:31]
.io_resp_valid (_inclusive_cache_bank_sched_io_resp_valid)
); // @[InclusiveCache.scala:137:29]
assign auto_in_a_ready = _inclusive_cache_bank_sched_io_in_a_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_b_valid = _inclusive_cache_bank_sched_io_in_b_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_b_bits_param = _inclusive_cache_bank_sched_io_in_b_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_b_bits_address = _inclusive_cache_bank_sched_io_in_b_bits_address; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_c_ready = _inclusive_cache_bank_sched_io_in_c_ready; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_valid = _inclusive_cache_bank_sched_io_in_d_valid; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_bits_opcode = _inclusive_cache_bank_sched_io_in_d_bits_opcode; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_bits_param = _inclusive_cache_bank_sched_io_in_d_bits_param; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_bits_size = _inclusive_cache_bank_sched_io_in_d_bits_size; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_bits_source = _inclusive_cache_bank_sched_io_in_d_bits_source; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_bits_sink = _inclusive_cache_bank_sched_io_in_d_bits_sink; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_bits_denied = _inclusive_cache_bank_sched_io_in_d_bits_denied; // @[InclusiveCache.scala:108:9, :137:29]
assign auto_in_d_bits_corrupt = _inclusive_cache_bank_sched_io_in_d_bits_corrupt; // @[InclusiveCache.scala:108:9, :137:29]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_24( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [127:0] _GEN_0 = {121'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [127:0] _GEN_3 = {121'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Repeater.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
}
| module Repeater_TLBundleA_a21d64s8k1z3u( // @[Repeater.scala:10:7]
input clock, // @[Repeater.scala:10:7]
input reset, // @[Repeater.scala:10:7]
input io_repeat, // @[Repeater.scala:13:14]
output io_full, // @[Repeater.scala:13:14]
output io_enq_ready, // @[Repeater.scala:13:14]
input io_enq_valid, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_opcode, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_param, // @[Repeater.scala:13:14]
input [2:0] io_enq_bits_size, // @[Repeater.scala:13:14]
input [7:0] io_enq_bits_source, // @[Repeater.scala:13:14]
input [20:0] io_enq_bits_address, // @[Repeater.scala:13:14]
input [7:0] io_enq_bits_mask, // @[Repeater.scala:13:14]
input [63:0] io_enq_bits_data, // @[Repeater.scala:13:14]
input io_enq_bits_corrupt, // @[Repeater.scala:13:14]
input io_deq_ready, // @[Repeater.scala:13:14]
output io_deq_valid, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_opcode, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_param, // @[Repeater.scala:13:14]
output [2:0] io_deq_bits_size, // @[Repeater.scala:13:14]
output [7:0] io_deq_bits_source, // @[Repeater.scala:13:14]
output [20:0] io_deq_bits_address, // @[Repeater.scala:13:14]
output [7:0] io_deq_bits_mask, // @[Repeater.scala:13:14]
output io_deq_bits_corrupt // @[Repeater.scala:13:14]
);
wire io_repeat_0 = io_repeat; // @[Repeater.scala:10:7]
wire io_enq_valid_0 = io_enq_valid; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_opcode_0 = io_enq_bits_opcode; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_param_0 = io_enq_bits_param; // @[Repeater.scala:10:7]
wire [2:0] io_enq_bits_size_0 = io_enq_bits_size; // @[Repeater.scala:10:7]
wire [7:0] io_enq_bits_source_0 = io_enq_bits_source; // @[Repeater.scala:10:7]
wire [20:0] io_enq_bits_address_0 = io_enq_bits_address; // @[Repeater.scala:10:7]
wire [7:0] io_enq_bits_mask_0 = io_enq_bits_mask; // @[Repeater.scala:10:7]
wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[Repeater.scala:10:7]
wire io_enq_bits_corrupt_0 = io_enq_bits_corrupt; // @[Repeater.scala:10:7]
wire io_deq_ready_0 = io_deq_ready; // @[Repeater.scala:10:7]
wire _io_enq_ready_T_1; // @[Repeater.scala:25:32]
wire _io_deq_valid_T; // @[Repeater.scala:24:32]
wire [2:0] _io_deq_bits_T_opcode; // @[Repeater.scala:26:21]
wire [2:0] _io_deq_bits_T_param; // @[Repeater.scala:26:21]
wire [2:0] _io_deq_bits_T_size; // @[Repeater.scala:26:21]
wire [7:0] _io_deq_bits_T_source; // @[Repeater.scala:26:21]
wire [20:0] _io_deq_bits_T_address; // @[Repeater.scala:26:21]
wire [7:0] _io_deq_bits_T_mask; // @[Repeater.scala:26:21]
wire [63:0] _io_deq_bits_T_data; // @[Repeater.scala:26:21]
wire _io_deq_bits_T_corrupt; // @[Repeater.scala:26:21]
wire io_enq_ready_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_opcode_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_param_0; // @[Repeater.scala:10:7]
wire [2:0] io_deq_bits_size_0; // @[Repeater.scala:10:7]
wire [7:0] io_deq_bits_source_0; // @[Repeater.scala:10:7]
wire [20:0] io_deq_bits_address_0; // @[Repeater.scala:10:7]
wire [7:0] io_deq_bits_mask_0; // @[Repeater.scala:10:7]
wire [63:0] io_deq_bits_data; // @[Repeater.scala:10:7]
wire io_deq_bits_corrupt_0; // @[Repeater.scala:10:7]
wire io_deq_valid_0; // @[Repeater.scala:10:7]
wire io_full_0; // @[Repeater.scala:10:7]
reg full; // @[Repeater.scala:20:21]
assign io_full_0 = full; // @[Repeater.scala:10:7, :20:21]
reg [2:0] saved_opcode; // @[Repeater.scala:21:18]
reg [2:0] saved_param; // @[Repeater.scala:21:18]
reg [2:0] saved_size; // @[Repeater.scala:21:18]
reg [7:0] saved_source; // @[Repeater.scala:21:18]
reg [20:0] saved_address; // @[Repeater.scala:21:18]
reg [7:0] saved_mask; // @[Repeater.scala:21:18]
reg [63:0] saved_data; // @[Repeater.scala:21:18]
reg saved_corrupt; // @[Repeater.scala:21:18]
assign _io_deq_valid_T = io_enq_valid_0 | full; // @[Repeater.scala:10:7, :20:21, :24:32]
assign io_deq_valid_0 = _io_deq_valid_T; // @[Repeater.scala:10:7, :24:32]
wire _io_enq_ready_T = ~full; // @[Repeater.scala:20:21, :25:35]
assign _io_enq_ready_T_1 = io_deq_ready_0 & _io_enq_ready_T; // @[Repeater.scala:10:7, :25:{32,35}]
assign io_enq_ready_0 = _io_enq_ready_T_1; // @[Repeater.scala:10:7, :25:32]
assign _io_deq_bits_T_opcode = full ? saved_opcode : io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_param = full ? saved_param : io_enq_bits_param_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_size = full ? saved_size : io_enq_bits_size_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_source = full ? saved_source : io_enq_bits_source_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_address = full ? saved_address : io_enq_bits_address_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_mask = full ? saved_mask : io_enq_bits_mask_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_data = full ? saved_data : io_enq_bits_data_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign _io_deq_bits_T_corrupt = full ? saved_corrupt : io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :20:21, :21:18, :26:21]
assign io_deq_bits_opcode_0 = _io_deq_bits_T_opcode; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_param_0 = _io_deq_bits_T_param; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_size_0 = _io_deq_bits_T_size; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_source_0 = _io_deq_bits_T_source; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_address_0 = _io_deq_bits_T_address; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_mask_0 = _io_deq_bits_T_mask; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_data = _io_deq_bits_T_data; // @[Repeater.scala:10:7, :26:21]
assign io_deq_bits_corrupt_0 = _io_deq_bits_T_corrupt; // @[Repeater.scala:10:7, :26:21]
wire _T_1 = io_enq_ready_0 & io_enq_valid_0 & io_repeat_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[Repeater.scala:10:7]
if (reset) // @[Repeater.scala:10:7]
full <= 1'h0; // @[Repeater.scala:20:21]
else // @[Repeater.scala:10:7]
full <= ~(io_deq_ready_0 & io_deq_valid_0 & ~io_repeat_0) & (_T_1 | full); // @[Decoupled.scala:51:35]
if (_T_1) begin // @[Decoupled.scala:51:35]
saved_opcode <= io_enq_bits_opcode_0; // @[Repeater.scala:10:7, :21:18]
saved_param <= io_enq_bits_param_0; // @[Repeater.scala:10:7, :21:18]
saved_size <= io_enq_bits_size_0; // @[Repeater.scala:10:7, :21:18]
saved_source <= io_enq_bits_source_0; // @[Repeater.scala:10:7, :21:18]
saved_address <= io_enq_bits_address_0; // @[Repeater.scala:10:7, :21:18]
saved_mask <= io_enq_bits_mask_0; // @[Repeater.scala:10:7, :21:18]
saved_data <= io_enq_bits_data_0; // @[Repeater.scala:10:7, :21:18]
saved_corrupt <= io_enq_bits_corrupt_0; // @[Repeater.scala:10:7, :21:18]
end
always @(posedge)
assign io_full = io_full_0; // @[Repeater.scala:10:7]
assign io_enq_ready = io_enq_ready_0; // @[Repeater.scala:10:7]
assign io_deq_valid = io_deq_valid_0; // @[Repeater.scala:10:7]
assign io_deq_bits_opcode = io_deq_bits_opcode_0; // @[Repeater.scala:10:7]
assign io_deq_bits_param = io_deq_bits_param_0; // @[Repeater.scala:10:7]
assign io_deq_bits_size = io_deq_bits_size_0; // @[Repeater.scala:10:7]
assign io_deq_bits_source = io_deq_bits_source_0; // @[Repeater.scala:10:7]
assign io_deq_bits_address = io_deq_bits_address_0; // @[Repeater.scala:10:7]
assign io_deq_bits_mask = io_deq_bits_mask_0; // @[Repeater.scala:10:7]
assign io_deq_bits_corrupt = io_deq_bits_corrupt_0; // @[Repeater.scala:10:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File EgressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
class EgressUnit(coupleSAVA: Boolean, combineSAST: Boolean, inParams: Seq[ChannelParams], ingressParams: Seq[IngressChannelParams], cParam: EgressChannelParams)
(implicit p: Parameters) extends AbstractOutputUnit(inParams, ingressParams, cParam)(p) {
class EgressUnitIO extends AbstractOutputUnitIO(inParams, ingressParams, cParam) {
val out = Decoupled(new EgressFlit(cParam.payloadBits))
}
val io = IO(new EgressUnitIO)
val channel_empty = RegInit(true.B)
val flow = Reg(new FlowRoutingBundle)
val q = Module(new Queue(new EgressFlit(cParam.payloadBits), 3 - (if (combineSAST) 1 else 0), flow=true))
q.io.enq.valid := io.in(0).valid
q.io.enq.bits.head := io.in(0).bits.head
q.io.enq.bits.tail := io.in(0).bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
q.io.enq.bits.ingress_id := 0.U(1.W)
} else {
q.io.enq.bits.ingress_id := Mux1H(
flows.map(f => (f.ingressNode.U === io.in(0).bits.flow.ingress_node &&
f.ingressNodeId.U === io.in(0).bits.flow.ingress_node_id)),
flows.map(f => f.ingressId.U(ingressIdBits.W))
)
}
q.io.enq.bits.payload := io.in(0).bits.payload
io.out <> q.io.deq
assert(!(q.io.enq.valid && !q.io.enq.ready))
io.credit_available(0) := q.io.count === 0.U
io.channel_status(0).occupied := !channel_empty
io.channel_status(0).flow := flow
when (io.credit_alloc(0).alloc && io.credit_alloc(0).tail) {
channel_empty := true.B
if (coupleSAVA) io.channel_status(0).occupied := false.B
}
when (io.allocs(0).alloc) {
channel_empty := false.B
flow := io.allocs(0).flow
}
}
| module EgressUnit_11( // @[EgressUnit.scala:12:7]
input clock, // @[EgressUnit.scala:12:7]
input reset, // @[EgressUnit.scala:12:7]
input io_in_0_valid, // @[EgressUnit.scala:18:14]
input io_in_0_bits_head, // @[EgressUnit.scala:18:14]
input io_in_0_bits_tail, // @[EgressUnit.scala:18:14]
input [72:0] io_in_0_bits_payload, // @[EgressUnit.scala:18:14]
input [4:0] io_in_0_bits_flow_ingress_node, // @[EgressUnit.scala:18:14]
input [1:0] io_in_0_bits_flow_ingress_node_id, // @[EgressUnit.scala:18:14]
output io_credit_available_0, // @[EgressUnit.scala:18:14]
output io_channel_status_0_occupied, // @[EgressUnit.scala:18:14]
input io_allocs_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_tail, // @[EgressUnit.scala:18:14]
input io_out_ready, // @[EgressUnit.scala:18:14]
output io_out_valid, // @[EgressUnit.scala:18:14]
output io_out_bits_head, // @[EgressUnit.scala:18:14]
output io_out_bits_tail, // @[EgressUnit.scala:18:14]
output [72:0] io_out_bits_payload // @[EgressUnit.scala:18:14]
);
wire _q_io_enq_ready; // @[EgressUnit.scala:22:17]
wire [1:0] _q_io_count; // @[EgressUnit.scala:22:17]
reg channel_empty; // @[EgressUnit.scala:20:30]
wire _q_io_enq_bits_ingress_id_T_25 = io_in_0_bits_flow_ingress_node_id == 2'h0; // @[EgressUnit.scala:32:27] |
Generate the Verilog code corresponding to the following Chisel files.
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncQueue_12( // @[AsyncQueue.scala:226:7]
input io_enq_clock, // @[AsyncQueue.scala:227:14]
input io_enq_reset, // @[AsyncQueue.scala:227:14]
output io_enq_ready, // @[AsyncQueue.scala:227:14]
input io_enq_valid, // @[AsyncQueue.scala:227:14]
input [31:0] io_enq_bits_phit, // @[AsyncQueue.scala:227:14]
input io_deq_clock, // @[AsyncQueue.scala:227:14]
input io_deq_reset, // @[AsyncQueue.scala:227:14]
input io_deq_ready, // @[AsyncQueue.scala:227:14]
output io_deq_valid, // @[AsyncQueue.scala:227:14]
output [31:0] io_deq_bits_phit // @[AsyncQueue.scala:227:14]
);
wire [3:0] _sink_io_async_ridx; // @[AsyncQueue.scala:229:70]
wire _sink_io_async_safe_ridx_valid; // @[AsyncQueue.scala:229:70]
wire _sink_io_async_safe_sink_reset_n; // @[AsyncQueue.scala:229:70]
wire [31:0] _source_io_async_mem_0_phit; // @[AsyncQueue.scala:228:70]
wire [31:0] _source_io_async_mem_1_phit; // @[AsyncQueue.scala:228:70]
wire [31:0] _source_io_async_mem_2_phit; // @[AsyncQueue.scala:228:70]
wire [31:0] _source_io_async_mem_3_phit; // @[AsyncQueue.scala:228:70]
wire [31:0] _source_io_async_mem_4_phit; // @[AsyncQueue.scala:228:70]
wire [31:0] _source_io_async_mem_5_phit; // @[AsyncQueue.scala:228:70]
wire [31:0] _source_io_async_mem_6_phit; // @[AsyncQueue.scala:228:70]
wire [31:0] _source_io_async_mem_7_phit; // @[AsyncQueue.scala:228:70]
wire [3:0] _source_io_async_widx; // @[AsyncQueue.scala:228:70]
wire _source_io_async_safe_widx_valid; // @[AsyncQueue.scala:228:70]
wire _source_io_async_safe_source_reset_n; // @[AsyncQueue.scala:228:70]
wire io_enq_clock_0 = io_enq_clock; // @[AsyncQueue.scala:226:7]
wire io_enq_reset_0 = io_enq_reset; // @[AsyncQueue.scala:226:7]
wire io_enq_valid_0 = io_enq_valid; // @[AsyncQueue.scala:226:7]
wire [31:0] io_enq_bits_phit_0 = io_enq_bits_phit; // @[AsyncQueue.scala:226:7]
wire io_deq_clock_0 = io_deq_clock; // @[AsyncQueue.scala:226:7]
wire io_deq_reset_0 = io_deq_reset; // @[AsyncQueue.scala:226:7]
wire io_deq_ready_0 = io_deq_ready; // @[AsyncQueue.scala:226:7]
wire io_enq_ready_0; // @[AsyncQueue.scala:226:7]
wire [31:0] io_deq_bits_phit_0; // @[AsyncQueue.scala:226:7]
wire io_deq_valid_0; // @[AsyncQueue.scala:226:7]
AsyncQueueSource_Phit_12 source ( // @[AsyncQueue.scala:228:70]
.clock (io_enq_clock_0), // @[AsyncQueue.scala:226:7]
.reset (io_enq_reset_0), // @[AsyncQueue.scala:226:7]
.io_enq_ready (io_enq_ready_0),
.io_enq_valid (io_enq_valid_0), // @[AsyncQueue.scala:226:7]
.io_enq_bits_phit (io_enq_bits_phit_0), // @[AsyncQueue.scala:226:7]
.io_async_mem_0_phit (_source_io_async_mem_0_phit),
.io_async_mem_1_phit (_source_io_async_mem_1_phit),
.io_async_mem_2_phit (_source_io_async_mem_2_phit),
.io_async_mem_3_phit (_source_io_async_mem_3_phit),
.io_async_mem_4_phit (_source_io_async_mem_4_phit),
.io_async_mem_5_phit (_source_io_async_mem_5_phit),
.io_async_mem_6_phit (_source_io_async_mem_6_phit),
.io_async_mem_7_phit (_source_io_async_mem_7_phit),
.io_async_ridx (_sink_io_async_ridx), // @[AsyncQueue.scala:229:70]
.io_async_widx (_source_io_async_widx),
.io_async_safe_ridx_valid (_sink_io_async_safe_ridx_valid), // @[AsyncQueue.scala:229:70]
.io_async_safe_widx_valid (_source_io_async_safe_widx_valid),
.io_async_safe_source_reset_n (_source_io_async_safe_source_reset_n),
.io_async_safe_sink_reset_n (_sink_io_async_safe_sink_reset_n) // @[AsyncQueue.scala:229:70]
); // @[AsyncQueue.scala:228:70]
AsyncQueueSink_Phit_12 sink ( // @[AsyncQueue.scala:229:70]
.clock (io_deq_clock_0), // @[AsyncQueue.scala:226:7]
.reset (io_deq_reset_0), // @[AsyncQueue.scala:226:7]
.io_deq_ready (io_deq_ready_0), // @[AsyncQueue.scala:226:7]
.io_deq_valid (io_deq_valid_0),
.io_deq_bits_phit (io_deq_bits_phit_0),
.io_async_mem_0_phit (_source_io_async_mem_0_phit), // @[AsyncQueue.scala:228:70]
.io_async_mem_1_phit (_source_io_async_mem_1_phit), // @[AsyncQueue.scala:228:70]
.io_async_mem_2_phit (_source_io_async_mem_2_phit), // @[AsyncQueue.scala:228:70]
.io_async_mem_3_phit (_source_io_async_mem_3_phit), // @[AsyncQueue.scala:228:70]
.io_async_mem_4_phit (_source_io_async_mem_4_phit), // @[AsyncQueue.scala:228:70]
.io_async_mem_5_phit (_source_io_async_mem_5_phit), // @[AsyncQueue.scala:228:70]
.io_async_mem_6_phit (_source_io_async_mem_6_phit), // @[AsyncQueue.scala:228:70]
.io_async_mem_7_phit (_source_io_async_mem_7_phit), // @[AsyncQueue.scala:228:70]
.io_async_ridx (_sink_io_async_ridx),
.io_async_widx (_source_io_async_widx), // @[AsyncQueue.scala:228:70]
.io_async_safe_ridx_valid (_sink_io_async_safe_ridx_valid),
.io_async_safe_widx_valid (_source_io_async_safe_widx_valid), // @[AsyncQueue.scala:228:70]
.io_async_safe_source_reset_n (_source_io_async_safe_source_reset_n), // @[AsyncQueue.scala:228:70]
.io_async_safe_sink_reset_n (_sink_io_async_safe_sink_reset_n)
); // @[AsyncQueue.scala:229:70]
assign io_enq_ready = io_enq_ready_0; // @[AsyncQueue.scala:226:7]
assign io_deq_valid = io_deq_valid_0; // @[AsyncQueue.scala:226:7]
assign io_deq_bits_phit = io_deq_bits_phit_0; // @[AsyncQueue.scala:226:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_242( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_446 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_483( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SwitchAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
class SwitchAllocReq(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])
(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val tail = Bool()
}
class SwitchArbiter(inN: Int, outN: Int, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Module {
val io = IO(new Bundle {
val in = Flipped(Vec(inN, Decoupled(new SwitchAllocReq(outParams, egressParams))))
val out = Vec(outN, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val chosen_oh = Vec(outN, Output(UInt(inN.W)))
})
val lock = Seq.fill(outN) { RegInit(0.U(inN.W)) }
val unassigned = Cat(io.in.map(_.valid).reverse) & ~(lock.reduce(_|_))
val mask = RegInit(0.U(inN.W))
val choices = Wire(Vec(outN, UInt(inN.W)))
var sel = PriorityEncoderOH(Cat(unassigned, unassigned & ~mask))
for (i <- 0 until outN) {
choices(i) := sel | (sel >> inN)
sel = PriorityEncoderOH(unassigned & ~choices(i))
}
io.in.foreach(_.ready := false.B)
var chosens = 0.U(inN.W)
val in_tails = Cat(io.in.map(_.bits.tail).reverse)
for (i <- 0 until outN) {
val in_valids = Cat((0 until inN).map { j => io.in(j).valid && !chosens(j) }.reverse)
val chosen = Mux((in_valids & lock(i) & ~chosens).orR, lock(i), choices(i))
io.chosen_oh(i) := chosen
io.out(i).valid := (in_valids & chosen).orR
io.out(i).bits := Mux1H(chosen, io.in.map(_.bits))
for (j <- 0 until inN) {
when (chosen(j) && io.out(i).ready) {
io.in(j).ready := true.B
}
}
chosens = chosens | chosen
when (io.out(i).fire) {
lock(i) := chosen & ~in_tails
}
}
when (io.out(0).fire) {
mask := (0 until inN).map { i => (io.chosen_oh(0) >> i) }.reduce(_|_)
} .otherwise {
mask := Mux(~mask === 0.U, 0.U, (mask << 1) | 1.U(1.W))
}
}
class SwitchAllocator(
val routerParams: RouterParams,
val inParams: Seq[ChannelParams],
val outParams: Seq[ChannelParams],
val ingressParams: Seq[IngressChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams {
val io = IO(new Bundle {
val req = MixedVec(allInParams.map(u =>
Vec(u.destSpeedup, Flipped(Decoupled(new SwitchAllocReq(outParams, egressParams))))))
val credit_alloc = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Output(new OutputCreditAlloc))})
val switch_sel = MixedVec(allOutParams.map { o => Vec(o.srcSpeedup,
MixedVec(allInParams.map { i => Vec(i.destSpeedup, Output(Bool())) })) })
})
val nInputChannels = allInParams.map(_.nVirtualChannels).sum
val arbs = allOutParams.map { oP => Module(new SwitchArbiter(
allInParams.map(_.destSpeedup).reduce(_+_),
oP.srcSpeedup,
outParams,
egressParams
))}
arbs.foreach(_.io.out.foreach(_.ready := true.B))
var idx = 0
io.req.foreach(_.foreach { o =>
val fires = Wire(Vec(arbs.size, Bool()))
arbs.zipWithIndex.foreach { case (a,i) =>
a.io.in(idx).valid := o.valid && o.bits.vc_sel(i).reduce(_||_)
a.io.in(idx).bits := o.bits
fires(i) := a.io.in(idx).fire
}
o.ready := fires.reduce(_||_)
idx += 1
})
for (i <- 0 until nAllOutputs) {
for (j <- 0 until allOutParams(i).srcSpeedup) {
idx = 0
for (m <- 0 until nAllInputs) {
for (n <- 0 until allInParams(m).destSpeedup) {
io.switch_sel(i)(j)(m)(n) := arbs(i).io.in(idx).valid && arbs(i).io.chosen_oh(j)(idx) && arbs(i).io.out(j).valid
idx += 1
}
}
}
}
io.credit_alloc.foreach(_.foreach(_.alloc := false.B))
io.credit_alloc.foreach(_.foreach(_.tail := false.B))
(arbs zip io.credit_alloc).zipWithIndex.map { case ((a,i),t) =>
for (j <- 0 until i.size) {
for (k <- 0 until a.io.out.size) {
when (a.io.out(k).valid && a.io.out(k).bits.vc_sel(t)(j)) {
i(j).alloc := true.B
i(j).tail := a.io.out(k).bits.tail
}
}
}
}
}
| module SwitchArbiter_52( // @[SwitchAllocator.scala:17:7]
input clock, // @[SwitchAllocator.scala:17:7]
input reset, // @[SwitchAllocator.scala:17:7]
output io_in_12_ready, // @[SwitchAllocator.scala:18:14]
input io_in_12_valid, // @[SwitchAllocator.scala:18:14]
input io_in_12_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14]
input io_in_12_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14]
input io_in_12_bits_tail, // @[SwitchAllocator.scala:18:14]
output io_in_13_ready, // @[SwitchAllocator.scala:18:14]
input io_in_13_valid, // @[SwitchAllocator.scala:18:14]
input io_in_13_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14]
input io_in_13_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14]
input io_in_13_bits_tail, // @[SwitchAllocator.scala:18:14]
output io_in_16_ready, // @[SwitchAllocator.scala:18:14]
input io_in_16_valid, // @[SwitchAllocator.scala:18:14]
input io_in_16_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14]
input io_in_16_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14]
input io_in_16_bits_tail, // @[SwitchAllocator.scala:18:14]
output io_in_17_ready, // @[SwitchAllocator.scala:18:14]
input io_in_17_valid, // @[SwitchAllocator.scala:18:14]
input io_in_17_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14]
input io_in_17_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14]
input io_in_17_bits_tail, // @[SwitchAllocator.scala:18:14]
output io_in_20_ready, // @[SwitchAllocator.scala:18:14]
input io_in_20_valid, // @[SwitchAllocator.scala:18:14]
input io_in_20_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14]
input io_in_20_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14]
input io_in_20_bits_tail, // @[SwitchAllocator.scala:18:14]
output io_in_21_ready, // @[SwitchAllocator.scala:18:14]
input io_in_21_valid, // @[SwitchAllocator.scala:18:14]
input io_in_21_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14]
input io_in_21_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14]
input io_in_21_bits_tail, // @[SwitchAllocator.scala:18:14]
input io_out_0_ready, // @[SwitchAllocator.scala:18:14]
output io_out_0_valid, // @[SwitchAllocator.scala:18:14]
output io_out_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14]
output io_out_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14]
output io_out_0_bits_tail, // @[SwitchAllocator.scala:18:14]
output [21:0] io_chosen_oh_0 // @[SwitchAllocator.scala:18:14]
);
reg [21:0] lock_0; // @[SwitchAllocator.scala:24:38]
wire [21:0] unassigned = {io_in_21_valid, io_in_20_valid, 2'h0, io_in_17_valid, io_in_16_valid, 2'h0, io_in_13_valid, io_in_12_valid, 12'h0} & ~lock_0; // @[SwitchAllocator.scala:17:7, :24:38, :25:{23,52,54}]
reg [21:0] mask; // @[SwitchAllocator.scala:27:21]
wire [21:0] _sel_T_1 = unassigned & ~mask; // @[SwitchAllocator.scala:25:52, :27:21, :30:{58,60}]
wire [43:0] sel = _sel_T_1[0] ? 44'h1 : _sel_T_1[1] ? 44'h2 : _sel_T_1[2] ? 44'h4 : _sel_T_1[3] ? 44'h8 : _sel_T_1[4] ? 44'h10 : _sel_T_1[5] ? 44'h20 : _sel_T_1[6] ? 44'h40 : _sel_T_1[7] ? 44'h80 : _sel_T_1[8] ? 44'h100 : _sel_T_1[9] ? 44'h200 : _sel_T_1[10] ? 44'h400 : _sel_T_1[11] ? 44'h800 : _sel_T_1[12] ? 44'h1000 : _sel_T_1[13] ? 44'h2000 : _sel_T_1[14] ? 44'h4000 : _sel_T_1[15] ? 44'h8000 : _sel_T_1[16] ? 44'h10000 : _sel_T_1[17] ? 44'h20000 : _sel_T_1[18] ? 44'h40000 : _sel_T_1[19] ? 44'h80000 : _sel_T_1[20] ? 44'h100000 : _sel_T_1[21] ? 44'h200000 : unassigned[0] ? 44'h400000 : unassigned[1] ? 44'h800000 : unassigned[2] ? 44'h1000000 : unassigned[3] ? 44'h2000000 : unassigned[4] ? 44'h4000000 : unassigned[5] ? 44'h8000000 : unassigned[6] ? 44'h10000000 : unassigned[7] ? 44'h20000000 : unassigned[8] ? 44'h40000000 : unassigned[9] ? 44'h80000000 : unassigned[10] ? 44'h100000000 : unassigned[11] ? 44'h200000000 : unassigned[12] ? 44'h400000000 : unassigned[13] ? 44'h800000000 : unassigned[14] ? 44'h1000000000 : unassigned[15] ? 44'h2000000000 : unassigned[16] ? 44'h4000000000 : unassigned[17] ? 44'h8000000000 : unassigned[18] ? 44'h10000000000 : unassigned[19] ? 44'h20000000000 : unassigned[20] ? 44'h40000000000 : {unassigned[21], 43'h0}; // @[OneHot.scala:85:71]
wire [9:0] _GEN = {io_in_21_valid, io_in_20_valid, 2'h0, io_in_17_valid, io_in_16_valid, 2'h0, io_in_13_valid, io_in_12_valid}; // @[SwitchAllocator.scala:17:7, :41:24]
wire [9:0] _chosen_T_2 = _GEN & lock_0[21:12]; // @[SwitchAllocator.scala:24:38, :41:24, :42:33]
wire [21:0] chosen = (|{_chosen_T_2[9:8], _chosen_T_2[5:4], _chosen_T_2[1:0]}) ? lock_0 : sel[21:0] | sel[43:22]; // @[Mux.scala:50:70]
wire [9:0] _io_out_0_valid_T = _GEN & chosen[21:12]; // @[SwitchAllocator.scala:41:24, :42:21, :44:35]
wire [5:0] _GEN_0 = {_io_out_0_valid_T[9:8], _io_out_0_valid_T[5:4], _io_out_0_valid_T[1:0]}; // @[SwitchAllocator.scala:44:35]
wire _GEN_1 = io_out_0_ready & (|_GEN_0); // @[Decoupled.scala:51:35]
wire [20:0] _GEN_2 = chosen[20:0] | chosen[21:1]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [19:0] _GEN_3 = _GEN_2[19:0] | chosen[21:2]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [18:0] _GEN_4 = _GEN_3[18:0] | chosen[21:3]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [17:0] _GEN_5 = _GEN_4[17:0] | chosen[21:4]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [16:0] _GEN_6 = _GEN_5[16:0] | chosen[21:5]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [15:0] _GEN_7 = _GEN_6[15:0] | chosen[21:6]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [14:0] _GEN_8 = _GEN_7[14:0] | chosen[21:7]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [13:0] _GEN_9 = _GEN_8[13:0] | chosen[21:8]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [12:0] _GEN_10 = _GEN_9[12:0] | chosen[21:9]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [11:0] _GEN_11 = _GEN_10[11:0] | chosen[21:10]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [10:0] _GEN_12 = _GEN_11[10:0] | chosen[21:11]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [9:0] _GEN_13 = _GEN_12[9:0] | chosen[21:12]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [8:0] _GEN_14 = _GEN_13[8:0] | chosen[21:13]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [7:0] _GEN_15 = _GEN_14[7:0] | chosen[21:14]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [6:0] _GEN_16 = _GEN_15[6:0] | chosen[21:15]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [5:0] _GEN_17 = _GEN_16[5:0] | chosen[21:16]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [4:0] _GEN_18 = _GEN_17[4:0] | chosen[21:17]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [3:0] _GEN_19 = _GEN_18[3:0] | chosen[21:18]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [2:0] _GEN_20 = _GEN_19[2:0] | chosen[21:19]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
wire [1:0] _GEN_21 = _GEN_20[1:0] | chosen[21:20]; // @[SwitchAllocator.scala:42:21, :58:{55,71}]
always @(posedge clock) begin // @[SwitchAllocator.scala:17:7]
if (reset) begin // @[SwitchAllocator.scala:17:7]
lock_0 <= 22'h0; // @[SwitchAllocator.scala:24:38]
mask <= 22'h0; // @[SwitchAllocator.scala:27:21]
end
else begin // @[SwitchAllocator.scala:17:7]
if (_GEN_1) // @[Decoupled.scala:51:35]
lock_0 <= chosen & {~io_in_21_bits_tail, ~io_in_20_bits_tail, 2'h3, ~io_in_17_bits_tail, ~io_in_16_bits_tail, 2'h3, ~io_in_13_bits_tail, ~io_in_12_bits_tail, 12'hFFF}; // @[SwitchAllocator.scala:24:38, :39:21, :42:21, :53:{25,27}]
mask <= _GEN_1 ? {chosen[21], _GEN_2[20], _GEN_3[19], _GEN_4[18], _GEN_5[17], _GEN_6[16], _GEN_7[15], _GEN_8[14], _GEN_9[13], _GEN_10[12], _GEN_11[11], _GEN_12[10], _GEN_13[9], _GEN_14[8], _GEN_15[7], _GEN_16[6], _GEN_17[5], _GEN_18[4], _GEN_19[3], _GEN_20[2], _GEN_21[1], _GEN_21[0] | chosen[21]} : (&mask) ? 22'h0 : {mask[20:0], 1'h1}; // @[Decoupled.scala:51:35]
end
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module IntSyncSyncCrossingSink_n1x1_8( // @[Crossing.scala:96:9]
input auto_in_sync_0, // @[LazyModuleImp.scala:107:25]
output auto_out_0 // @[LazyModuleImp.scala:107:25]
);
wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:96:9]
wire childClock = 1'h0; // @[LazyModuleImp.scala:155:31]
wire childReset = 1'h0; // @[LazyModuleImp.scala:158:31]
wire _childClock_T = 1'h0; // @[LazyModuleImp.scala:160:25]
wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:96:9]
wire nodeOut_0; // @[MixedNode.scala:542:17]
wire auto_out_0_0; // @[Crossing.scala:96:9]
assign nodeOut_0 = nodeIn_sync_0; // @[MixedNode.scala:542:17, :551:17]
assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:96:9]
assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:96:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
| module TLBuffer_2( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset // @[Buffer.scala:40:9]
);
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Directory.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import MetaData._
import chisel3.experimental.dataview._
import freechips.rocketchip.util.DescribedSRAM
class DirectoryEntry(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val dirty = Bool() // true => TRUNK or TIP
val state = UInt(params.stateBits.W)
val clients = UInt(params.clientBits.W)
val tag = UInt(params.tagBits.W)
}
class DirectoryWrite(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val way = UInt(params.wayBits.W)
val data = new DirectoryEntry(params)
}
class DirectoryRead(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val set = UInt(params.setBits.W)
val tag = UInt(params.tagBits.W)
}
class DirectoryResult(params: InclusiveCacheParameters) extends DirectoryEntry(params)
{
val hit = Bool()
val way = UInt(params.wayBits.W)
}
class Directory(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val write = Flipped(Decoupled(new DirectoryWrite(params)))
val read = Flipped(Valid(new DirectoryRead(params))) // sees same-cycle write
val result = Valid(new DirectoryResult(params))
val ready = Bool() // reset complete; can enable access
})
val codeBits = new DirectoryEntry(params).getWidth
val cc_dir = DescribedSRAM(
name = "cc_dir",
desc = "Directory RAM",
size = params.cache.sets,
data = Vec(params.cache.ways, UInt(codeBits.W))
)
val write = Queue(io.write, 1) // must inspect contents => max size 1
// a flow Q creates a WaR hazard... this MIGHT not cause a problem
// a pipe Q causes combinational loop through the scheduler
// Wiping the Directory with 0s on reset has ultimate priority
val wipeCount = RegInit(0.U((params.setBits + 1).W))
val wipeOff = RegNext(false.B, true.B) // don't wipe tags during reset
val wipeDone = wipeCount(params.setBits)
val wipeSet = wipeCount(params.setBits - 1,0)
io.ready := wipeDone
when (!wipeDone && !wipeOff) { wipeCount := wipeCount + 1.U }
assert (wipeDone || !io.read.valid)
// Be explicit for dumb 1-port inference
val ren = io.read.valid
val wen = (!wipeDone && !wipeOff) || write.valid
assert (!io.read.valid || wipeDone)
require (codeBits <= 256)
write.ready := !io.read.valid
when (!ren && wen) {
cc_dir.write(
Mux(wipeDone, write.bits.set, wipeSet),
VecInit.fill(params.cache.ways) { Mux(wipeDone, write.bits.data.asUInt, 0.U) },
UIntToOH(write.bits.way, params.cache.ways).asBools.map(_ || !wipeDone))
}
val ren1 = RegInit(false.B)
val ren2 = if (params.micro.dirReg) RegInit(false.B) else ren1
ren2 := ren1
ren1 := ren
val bypass_valid = params.dirReg(write.valid)
val bypass = params.dirReg(write.bits, ren1 && write.valid)
val regout = params.dirReg(cc_dir.read(io.read.bits.set, ren), ren1)
val tag = params.dirReg(RegEnable(io.read.bits.tag, ren), ren1)
val set = params.dirReg(RegEnable(io.read.bits.set, ren), ren1)
// Compute the victim way in case of an evicition
val victimLFSR = random.LFSR(width = 16, params.dirReg(ren))(InclusiveCacheParameters.lfsrBits-1, 0)
val victimSums = Seq.tabulate(params.cache.ways) { i => ((1 << InclusiveCacheParameters.lfsrBits)*i / params.cache.ways).U }
val victimLTE = Cat(victimSums.map { _ <= victimLFSR }.reverse)
val victimSimp = Cat(0.U(1.W), victimLTE(params.cache.ways-1, 1), 1.U(1.W))
val victimWayOH = victimSimp(params.cache.ways-1,0) & ~(victimSimp >> 1)
val victimWay = OHToUInt(victimWayOH)
assert (!ren2 || victimLTE(0) === 1.U)
assert (!ren2 || ((victimSimp >> 1) & ~victimSimp) === 0.U) // monotone
assert (!ren2 || PopCount(victimWayOH) === 1.U)
val setQuash = bypass_valid && bypass.set === set
val tagMatch = bypass.data.tag === tag
val wayMatch = bypass.way === victimWay
val ways = regout.map(d => d.asTypeOf(new DirectoryEntry(params)))
val hits = Cat(ways.zipWithIndex.map { case (w, i) =>
w.tag === tag && w.state =/= INVALID && (!setQuash || i.U =/= bypass.way)
}.reverse)
val hit = hits.orR
io.result.valid := ren2
io.result.bits.viewAsSupertype(chiselTypeOf(bypass.data)) := Mux(hit, Mux1H(hits, ways), Mux(setQuash && (tagMatch || wayMatch), bypass.data, Mux1H(victimWayOH, ways)))
io.result.bits.hit := hit || (setQuash && tagMatch && bypass.data.state =/= INVALID)
io.result.bits.way := Mux(hit, OHToUInt(hits), Mux(setQuash && tagMatch, bypass.way, victimWay))
params.ccover(ren2 && setQuash && tagMatch, "DIRECTORY_HIT_BYPASS", "Bypassing write to a directory hit")
params.ccover(ren2 && setQuash && !tagMatch && wayMatch, "DIRECTORY_EVICT_BYPASS", "Bypassing a write to a directory eviction")
def json: String = s"""{"clients":${params.clientBits},"mem":"${cc_dir.pathName}","clean":"${wipeDone.pathName}"}"""
}
File DescribedSRAM.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3.{Data, SyncReadMem, Vec}
import chisel3.util.log2Ceil
object DescribedSRAM {
def apply[T <: Data](
name: String,
desc: String,
size: BigInt, // depth
data: T
): SyncReadMem[T] = {
val mem = SyncReadMem(size, data)
mem.suggestName(name)
val granWidth = data match {
case v: Vec[_] => v.head.getWidth
case d => d.getWidth
}
val uid = 0
Annotated.srams(
component = mem,
name = name,
address_width = log2Ceil(size),
data_width = data.getWidth,
depth = size,
description = desc,
write_mask_granularity = granWidth
)
mem
}
}
| module Directory( // @[Directory.scala:56:7]
input clock, // @[Directory.scala:56:7]
input reset, // @[Directory.scala:56:7]
output io_write_ready, // @[Directory.scala:58:14]
input io_write_valid, // @[Directory.scala:58:14]
input [9:0] io_write_bits_set, // @[Directory.scala:58:14]
input [2:0] io_write_bits_way, // @[Directory.scala:58:14]
input io_write_bits_data_dirty, // @[Directory.scala:58:14]
input [1:0] io_write_bits_data_state, // @[Directory.scala:58:14]
input [11:0] io_write_bits_data_clients, // @[Directory.scala:58:14]
input [10:0] io_write_bits_data_tag, // @[Directory.scala:58:14]
input io_read_valid, // @[Directory.scala:58:14]
input [9:0] io_read_bits_set, // @[Directory.scala:58:14]
input [10:0] io_read_bits_tag, // @[Directory.scala:58:14]
output io_result_bits_dirty, // @[Directory.scala:58:14]
output [1:0] io_result_bits_state, // @[Directory.scala:58:14]
output [11:0] io_result_bits_clients, // @[Directory.scala:58:14]
output [10:0] io_result_bits_tag, // @[Directory.scala:58:14]
output io_result_bits_hit, // @[Directory.scala:58:14]
output [2:0] io_result_bits_way, // @[Directory.scala:58:14]
output io_ready // @[Directory.scala:58:14]
);
wire cc_dir_MPORT_mask_7; // @[Directory.scala:100:65]
wire cc_dir_MPORT_mask_6; // @[Directory.scala:100:65]
wire cc_dir_MPORT_mask_5; // @[Directory.scala:100:65]
wire cc_dir_MPORT_mask_4; // @[Directory.scala:100:65]
wire cc_dir_MPORT_mask_3; // @[Directory.scala:100:65]
wire cc_dir_MPORT_mask_2; // @[Directory.scala:100:65]
wire cc_dir_MPORT_mask_1; // @[Directory.scala:100:65]
wire cc_dir_MPORT_mask_0; // @[Directory.scala:100:65]
wire [25:0] cc_dir_MPORT_data_7; // @[Directory.scala:99:44]
wire [9:0] cc_dir_MPORT_addr; // @[Directory.scala:98:10]
wire cc_dir_MPORT_en; // @[Directory.scala:96:14]
wire _victimLFSR_prng_io_out_0; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_1; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_2; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_3; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_4; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_5; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_6; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_7; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_8; // @[PRNG.scala:91:22]
wire _victimLFSR_prng_io_out_9; // @[PRNG.scala:91:22]
wire _write_q_io_deq_valid; // @[Decoupled.scala:362:21]
wire [9:0] _write_q_io_deq_bits_set; // @[Decoupled.scala:362:21]
wire [2:0] _write_q_io_deq_bits_way; // @[Decoupled.scala:362:21]
wire _write_q_io_deq_bits_data_dirty; // @[Decoupled.scala:362:21]
wire [1:0] _write_q_io_deq_bits_data_state; // @[Decoupled.scala:362:21]
wire [11:0] _write_q_io_deq_bits_data_clients; // @[Decoupled.scala:362:21]
wire [10:0] _write_q_io_deq_bits_data_tag; // @[Decoupled.scala:362:21]
wire [207:0] _cc_dir_RW0_rdata; // @[DescribedSRAM.scala:17:26]
reg [10:0] wipeCount; // @[Directory.scala:79:26]
reg wipeOff; // @[Directory.scala:80:24]
assign cc_dir_MPORT_en = ~io_read_valid & (~(wipeCount[10]) & ~wipeOff | _write_q_io_deq_valid); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_addr = wipeCount[10] ? _write_q_io_deq_bits_set : wipeCount[9:0]; // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_data_7 = wipeCount[10] ? {_write_q_io_deq_bits_data_dirty, _write_q_io_deq_bits_data_state, _write_q_io_deq_bits_data_clients, _write_q_io_deq_bits_data_tag} : 26'h0; // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_0 = _write_q_io_deq_bits_way == 3'h0 | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_1 = _write_q_io_deq_bits_way == 3'h1 | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_2 = _write_q_io_deq_bits_way == 3'h2 | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_3 = _write_q_io_deq_bits_way == 3'h3 | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_4 = _write_q_io_deq_bits_way == 3'h4 | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_5 = _write_q_io_deq_bits_way == 3'h5 | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_6 = _write_q_io_deq_bits_way == 3'h6 | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
assign cc_dir_MPORT_mask_7 = (&_write_q_io_deq_bits_way) | ~(wipeCount[10]); // @[Decoupled.scala:362:21]
reg ren1; // @[Directory.scala:103:21]
reg [10:0] tag; // @[Directory.scala:111:36]
reg [9:0] set; // @[Directory.scala:112:36]
wire [9:0] victimLFSR = {_victimLFSR_prng_io_out_9, _victimLFSR_prng_io_out_8, _victimLFSR_prng_io_out_7, _victimLFSR_prng_io_out_6, _victimLFSR_prng_io_out_5, _victimLFSR_prng_io_out_4, _victimLFSR_prng_io_out_3, _victimLFSR_prng_io_out_2, _victimLFSR_prng_io_out_1, _victimLFSR_prng_io_out_0}; // @[PRNG.scala:91:22]
wire [2:0] _GEN = {_victimLFSR_prng_io_out_9, _victimLFSR_prng_io_out_8, _victimLFSR_prng_io_out_7}; // @[PRNG.scala:91:22]
wire [1:0] _GEN_0 = {_victimLFSR_prng_io_out_9, _victimLFSR_prng_io_out_8}; // @[PRNG.scala:91:22]
wire _victimLTE_T_3 = victimLFSR > 10'h17F; // @[Directory.scala:115:63, :117:43]
wire _victimLTE_T_5 = victimLFSR > 10'h27F; // @[Directory.scala:115:63, :117:43]
wire _victimLTE_T_6 = victimLFSR > 10'h2FF; // @[Directory.scala:115:63, :117:43]
wire _victimLTE_T_7 = victimLFSR > 10'h37F; // @[Directory.scala:115:63, :117:43]
wire [3:0] victimWay_hi = {_victimLTE_T_7, _victimLTE_T_6, _victimLTE_T_5, _victimLFSR_prng_io_out_9} & {1'h1, ~_victimLTE_T_7, ~_victimLTE_T_6, ~_victimLTE_T_5}; // @[PRNG.scala:91:22]
wire [2:0] _victimWay_T_1 = victimWay_hi[3:1] | {_victimLTE_T_3, |_GEN_0, |_GEN} & {~_victimLFSR_prng_io_out_9, ~_victimLTE_T_3, ~(|_GEN_0)}; // @[PRNG.scala:91:22]
wire [2:0] victimWay = {|victimWay_hi, |(_victimWay_T_1[2:1]), _victimWay_T_1[2] | _victimWay_T_1[0]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}]
wire _view__T_71 = (|_GEN) & ~(|_GEN_0); // @[Directory.scala:117:43, :119:{55,57}, :123:28]
wire _view__T_72 = (|_GEN_0) & ~_victimLTE_T_3; // @[Directory.scala:117:43, :119:{55,57}, :123:28]
wire _view__T_73 = _victimLTE_T_3 & ~_victimLFSR_prng_io_out_9; // @[PRNG.scala:91:22]
wire _view__T_74 = _victimLFSR_prng_io_out_9 & ~_victimLTE_T_5; // @[PRNG.scala:91:22]
wire _view__T_75 = _victimLTE_T_5 & ~_victimLTE_T_6; // @[Directory.scala:117:43, :119:{55,57}, :123:28]
wire _view__T_76 = _victimLTE_T_6 & ~_victimLTE_T_7; // @[Directory.scala:117:43, :119:{55,57}, :123:28] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Breakpoint.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.{CoreBundle, HasCoreParameters}
import freechips.rocketchip.util._
class BPControl(implicit p: Parameters) extends CoreBundle()(p) {
val ttype = UInt(4.W)
val dmode = Bool()
val maskmax = UInt(6.W)
val reserved = UInt((xLen - (if (coreParams.useBPWatch) 26 else 24)).W)
val action = UInt((if (coreParams.useBPWatch) 3 else 1).W)
val chain = Bool()
val zero = UInt(2.W)
val tmatch = UInt(2.W)
val m = Bool()
val h = Bool()
val s = Bool()
val u = Bool()
val x = Bool()
val w = Bool()
val r = Bool()
def tType = 2
def maskMax = 4
def enabled(mstatus: MStatus) = !mstatus.debug && Cat(m, h, s, u)(mstatus.prv)
}
class TExtra(implicit p: Parameters) extends CoreBundle()(p) {
def mvalueBits: Int = if (xLen == 32) coreParams.mcontextWidth min 6 else coreParams.mcontextWidth min 13
def svalueBits: Int = if (xLen == 32) coreParams.scontextWidth min 16 else coreParams.scontextWidth min 34
def mselectPos: Int = if (xLen == 32) 25 else 50
def mvaluePos : Int = mselectPos + 1
def sselectPos: Int = 0
def svaluePos : Int = 2
val mvalue = UInt(mvalueBits.W)
val mselect = Bool()
val pad2 = UInt((mselectPos - svalueBits - 2).W)
val svalue = UInt(svalueBits.W)
val pad1 = UInt(1.W)
val sselect = Bool()
}
class BP(implicit p: Parameters) extends CoreBundle()(p) {
val control = new BPControl
val address = UInt(vaddrBits.W)
val textra = new TExtra
def contextMatch(mcontext: UInt, scontext: UInt) =
(if (coreParams.mcontextWidth > 0) (!textra.mselect || (mcontext(textra.mvalueBits-1,0) === textra.mvalue)) else true.B) &&
(if (coreParams.scontextWidth > 0) (!textra.sselect || (scontext(textra.svalueBits-1,0) === textra.svalue)) else true.B)
def mask(dummy: Int = 0) =
(0 until control.maskMax-1).scanLeft(control.tmatch(0))((m, i) => m && address(i)).asUInt
def pow2AddressMatch(x: UInt) =
(~x | mask()) === (~address | mask())
def rangeAddressMatch(x: UInt) =
(x >= address) ^ control.tmatch(0)
def addressMatch(x: UInt) =
Mux(control.tmatch(1), rangeAddressMatch(x), pow2AddressMatch(x))
}
class BPWatch (val n: Int) extends Bundle() {
val valid = Vec(n, Bool())
val rvalid = Vec(n, Bool())
val wvalid = Vec(n, Bool())
val ivalid = Vec(n, Bool())
val action = UInt(3.W)
}
class BreakpointUnit(n: Int)(implicit val p: Parameters) extends Module with HasCoreParameters {
val io = IO(new Bundle {
val status = Input(new MStatus())
val bp = Input(Vec(n, new BP))
val pc = Input(UInt(vaddrBits.W))
val ea = Input(UInt(vaddrBits.W))
val mcontext = Input(UInt(coreParams.mcontextWidth.W))
val scontext = Input(UInt(coreParams.scontextWidth.W))
val xcpt_if = Output(Bool())
val xcpt_ld = Output(Bool())
val xcpt_st = Output(Bool())
val debug_if = Output(Bool())
val debug_ld = Output(Bool())
val debug_st = Output(Bool())
val bpwatch = Output(Vec(n, new BPWatch(1)))
})
io.xcpt_if := false.B
io.xcpt_ld := false.B
io.xcpt_st := false.B
io.debug_if := false.B
io.debug_ld := false.B
io.debug_st := false.B
(io.bpwatch zip io.bp).foldLeft((true.B, true.B, true.B)) { case ((ri, wi, xi), (bpw, bp)) =>
val en = bp.control.enabled(io.status)
val cx = bp.contextMatch(io.mcontext, io.scontext)
val r = en && bp.control.r && bp.addressMatch(io.ea) && cx
val w = en && bp.control.w && bp.addressMatch(io.ea) && cx
val x = en && bp.control.x && bp.addressMatch(io.pc) && cx
val end = !bp.control.chain
val action = bp.control.action
bpw.action := action
bpw.valid(0) := false.B
bpw.rvalid(0) := false.B
bpw.wvalid(0) := false.B
bpw.ivalid(0) := false.B
when (end && r && ri) { io.xcpt_ld := (action === 0.U); io.debug_ld := (action === 1.U); bpw.valid(0) := true.B; bpw.rvalid(0) := true.B }
when (end && w && wi) { io.xcpt_st := (action === 0.U); io.debug_st := (action === 1.U); bpw.valid(0) := true.B; bpw.wvalid(0) := true.B }
when (end && x && xi) { io.xcpt_if := (action === 0.U); io.debug_if := (action === 1.U); bpw.valid(0) := true.B; bpw.ivalid(0) := true.B }
(end || r, end || w, end || x)
}
}
| module BreakpointUnit( // @[Breakpoint.scala:79:7]
input clock, // @[Breakpoint.scala:79:7]
input reset, // @[Breakpoint.scala:79:7]
input io_status_debug, // @[Breakpoint.scala:80:14]
input io_status_cease, // @[Breakpoint.scala:80:14]
input io_status_wfi, // @[Breakpoint.scala:80:14]
input [31:0] io_status_isa, // @[Breakpoint.scala:80:14]
input io_status_dv, // @[Breakpoint.scala:80:14]
input io_status_v, // @[Breakpoint.scala:80:14]
input io_status_sd, // @[Breakpoint.scala:80:14]
input io_status_mpv, // @[Breakpoint.scala:80:14]
input io_status_gva, // @[Breakpoint.scala:80:14]
input [1:0] io_status_fs, // @[Breakpoint.scala:80:14]
input [1:0] io_status_mpp, // @[Breakpoint.scala:80:14]
input io_status_mpie, // @[Breakpoint.scala:80:14]
input io_status_mie, // @[Breakpoint.scala:80:14]
input io_bp_0_control_dmode, // @[Breakpoint.scala:80:14]
input io_bp_0_control_action, // @[Breakpoint.scala:80:14]
input [1:0] io_bp_0_control_tmatch, // @[Breakpoint.scala:80:14]
input io_bp_0_control_x, // @[Breakpoint.scala:80:14]
input io_bp_0_control_w, // @[Breakpoint.scala:80:14]
input io_bp_0_control_r, // @[Breakpoint.scala:80:14]
input [32:0] io_bp_0_address, // @[Breakpoint.scala:80:14]
input [47:0] io_bp_0_textra_pad2, // @[Breakpoint.scala:80:14]
input io_bp_0_textra_pad1, // @[Breakpoint.scala:80:14]
input [32:0] io_pc, // @[Breakpoint.scala:80:14]
input [32:0] io_ea, // @[Breakpoint.scala:80:14]
output io_xcpt_if, // @[Breakpoint.scala:80:14]
output io_xcpt_ld, // @[Breakpoint.scala:80:14]
output io_xcpt_st, // @[Breakpoint.scala:80:14]
output io_debug_if, // @[Breakpoint.scala:80:14]
output io_debug_ld, // @[Breakpoint.scala:80:14]
output io_debug_st, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_rvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_wvalid_0, // @[Breakpoint.scala:80:14]
output io_bpwatch_0_ivalid_0 // @[Breakpoint.scala:80:14]
);
wire io_status_debug_0 = io_status_debug; // @[Breakpoint.scala:79:7]
wire io_status_cease_0 = io_status_cease; // @[Breakpoint.scala:79:7]
wire io_status_wfi_0 = io_status_wfi; // @[Breakpoint.scala:79:7]
wire [31:0] io_status_isa_0 = io_status_isa; // @[Breakpoint.scala:79:7]
wire io_status_dv_0 = io_status_dv; // @[Breakpoint.scala:79:7]
wire io_status_v_0 = io_status_v; // @[Breakpoint.scala:79:7]
wire io_status_sd_0 = io_status_sd; // @[Breakpoint.scala:79:7]
wire io_status_mpv_0 = io_status_mpv; // @[Breakpoint.scala:79:7]
wire io_status_gva_0 = io_status_gva; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_fs_0 = io_status_fs; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_mpp_0 = io_status_mpp; // @[Breakpoint.scala:79:7]
wire io_status_mpie_0 = io_status_mpie; // @[Breakpoint.scala:79:7]
wire io_status_mie_0 = io_status_mie; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_dmode_0 = io_bp_0_control_dmode; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_action_0 = io_bp_0_control_action; // @[Breakpoint.scala:79:7]
wire [1:0] io_bp_0_control_tmatch_0 = io_bp_0_control_tmatch; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_x_0 = io_bp_0_control_x; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_w_0 = io_bp_0_control_w; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_r_0 = io_bp_0_control_r; // @[Breakpoint.scala:79:7]
wire [32:0] io_bp_0_address_0 = io_bp_0_address; // @[Breakpoint.scala:79:7]
wire [47:0] io_bp_0_textra_pad2_0 = io_bp_0_textra_pad2; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_pad1_0 = io_bp_0_textra_pad1; // @[Breakpoint.scala:79:7]
wire [32:0] io_pc_0 = io_pc; // @[Breakpoint.scala:79:7]
wire [32:0] io_ea_0 = io_ea; // @[Breakpoint.scala:79:7]
wire [1:0] en_hi = 2'h2; // @[Breakpoint.scala:30:56]
wire [3:0] _en_T_1 = 4'h8; // @[Breakpoint.scala:30:56]
wire [3:0] _en_T_2 = 4'h1; // @[Breakpoint.scala:30:68]
wire io_bp_0_control_m = 1'h1; // @[Breakpoint.scala:79:7]
wire _en_T_3 = 1'h1; // @[Breakpoint.scala:30:68]
wire cx = 1'h1; // @[Breakpoint.scala:55:126]
wire end_0 = 1'h1; // @[Breakpoint.scala:109:15]
wire [39:0] io_bp_0_control_reserved = 40'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [5:0] io_bp_0_control_maskmax = 6'h4; // @[Breakpoint.scala:79:7, :80:14]
wire [3:0] io_bp_0_control_ttype = 4'h2; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_sxl = 2'h0; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_uxl = 2'h0; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_xs = 2'h0; // @[Breakpoint.scala:79:7]
wire [1:0] io_status_vs = 2'h0; // @[Breakpoint.scala:79:7]
wire [1:0] io_bp_0_control_zero = 2'h0; // @[Breakpoint.scala:79:7]
wire [1:0] en_lo = 2'h0; // @[Breakpoint.scala:30:56]
wire [7:0] io_status_zero1 = 8'h0; // @[Breakpoint.scala:79:7, :80:14]
wire io_status_mbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sbe = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sd_rv32 = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_tsr = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_tw = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_tvm = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_mxr = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sum = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_mprv = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_spp = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_ube = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_spie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_upie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_hie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_sie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_status_uie = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_chain = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_h = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_s = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_control_u = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_mselect = 1'h0; // @[Breakpoint.scala:79:7]
wire io_bp_0_textra_sselect = 1'h0; // @[Breakpoint.scala:79:7]
wire [22:0] io_status_zero2 = 23'h0; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_dprv = 2'h3; // @[Breakpoint.scala:79:7, :80:14]
wire [1:0] io_status_prv = 2'h3; // @[Breakpoint.scala:79:7, :80:14]
wire _io_debug_ld_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:84]
wire _io_debug_st_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :119:84]
wire _io_debug_if_T = io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :120:84]
wire r; // @[Breakpoint.scala:106:58]
wire w; // @[Breakpoint.scala:107:58]
wire x; // @[Breakpoint.scala:108:58]
wire io_bpwatch_0_valid_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
wire io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
wire [2:0] io_bpwatch_0_action; // @[Breakpoint.scala:79:7]
wire io_xcpt_if_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
wire io_xcpt_st_0; // @[Breakpoint.scala:79:7]
wire io_debug_if_0; // @[Breakpoint.scala:79:7]
wire io_debug_ld_0; // @[Breakpoint.scala:79:7]
wire io_debug_st_0; // @[Breakpoint.scala:79:7]
wire _en_T = ~io_status_debug_0; // @[Breakpoint.scala:30:35, :79:7]
wire en = _en_T; // @[Breakpoint.scala:30:{35,50}]
wire _r_T = en & io_bp_0_control_r_0; // @[Breakpoint.scala:30:50, :79:7, :106:16]
wire _r_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _w_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _x_T_1 = io_bp_0_control_tmatch_0[1]; // @[Breakpoint.scala:68:23, :79:7]
wire _GEN = io_ea_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _r_T_2; // @[Breakpoint.scala:65:8]
assign _r_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _w_T_2; // @[Breakpoint.scala:65:8]
assign _w_T_2 = _GEN; // @[Breakpoint.scala:65:8]
wire _r_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _r_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _w_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _w_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_3 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:65:36, :79:7]
wire _x_T_6 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _x_T_16 = io_bp_0_control_tmatch_0[0]; // @[Breakpoint.scala:59:56, :65:36, :79:7]
wire _r_T_4 = _r_T_2 ^ _r_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [32:0] _r_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _r_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_7 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_17 = io_bp_0_address_0[0]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_8 = _r_T_6 & _r_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_9 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_19 = io_bp_0_address_0[1]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_10 = _r_T_8 & _r_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _w_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_11 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _x_T_21 = io_bp_0_address_0[2]; // @[Breakpoint.scala:59:83, :79:7]
wire _r_T_12 = _r_T_10 & _r_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo = {_r_T_8, _r_T_6}; // @[package.scala:45:27]
wire [1:0] r_hi = {_r_T_12, _r_T_10}; // @[package.scala:45:27]
wire [3:0] _r_T_13 = {r_hi, r_lo}; // @[package.scala:45:27]
wire [32:0] _r_T_14 = {_r_T_5[32:4], _r_T_5[3:0] | _r_T_13}; // @[package.scala:45:27]
wire [32:0] _r_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _r_T_18 = _r_T_16 & _r_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _r_T_20 = _r_T_18 & _r_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _r_T_22 = _r_T_20 & _r_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] r_lo_1 = {_r_T_18, _r_T_16}; // @[package.scala:45:27]
wire [1:0] r_hi_1 = {_r_T_22, _r_T_20}; // @[package.scala:45:27]
wire [3:0] _r_T_23 = {r_hi_1, r_lo_1}; // @[package.scala:45:27]
wire [32:0] _r_T_24 = {_r_T_15[32:4], _r_T_15[3:0] | _r_T_23}; // @[package.scala:45:27]
wire _r_T_25 = _r_T_14 == _r_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _r_T_26 = _r_T_1 ? _r_T_4 : _r_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _r_T_27 = _r_T & _r_T_26; // @[Breakpoint.scala:68:8, :106:{16,32}]
assign r = _r_T_27; // @[Breakpoint.scala:106:{32,58}]
assign io_bpwatch_0_rvalid_0_0 = r; // @[Breakpoint.scala:79:7, :106:58]
wire _w_T = en & io_bp_0_control_w_0; // @[Breakpoint.scala:30:50, :79:7, :107:16]
wire _w_T_4 = _w_T_2 ^ _w_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [32:0] _w_T_5 = ~io_ea_0; // @[Breakpoint.scala:62:6, :79:7]
wire _w_T_8 = _w_T_6 & _w_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_10 = _w_T_8 & _w_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_12 = _w_T_10 & _w_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo = {_w_T_8, _w_T_6}; // @[package.scala:45:27]
wire [1:0] w_hi = {_w_T_12, _w_T_10}; // @[package.scala:45:27]
wire [3:0] _w_T_13 = {w_hi, w_lo}; // @[package.scala:45:27]
wire [32:0] _w_T_14 = {_w_T_5[32:4], _w_T_5[3:0] | _w_T_13}; // @[package.scala:45:27]
wire [32:0] _w_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _w_T_18 = _w_T_16 & _w_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _w_T_20 = _w_T_18 & _w_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_22 = _w_T_20 & _w_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] w_lo_1 = {_w_T_18, _w_T_16}; // @[package.scala:45:27]
wire [1:0] w_hi_1 = {_w_T_22, _w_T_20}; // @[package.scala:45:27]
wire [3:0] _w_T_23 = {w_hi_1, w_lo_1}; // @[package.scala:45:27]
wire [32:0] _w_T_24 = {_w_T_15[32:4], _w_T_15[3:0] | _w_T_23}; // @[package.scala:45:27]
wire _w_T_25 = _w_T_14 == _w_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _w_T_26 = _w_T_1 ? _w_T_4 : _w_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _w_T_27 = _w_T & _w_T_26; // @[Breakpoint.scala:68:8, :107:{16,32}]
assign w = _w_T_27; // @[Breakpoint.scala:107:{32,58}]
assign io_bpwatch_0_wvalid_0_0 = w; // @[Breakpoint.scala:79:7, :107:58]
wire _x_T = en & io_bp_0_control_x_0; // @[Breakpoint.scala:30:50, :79:7, :108:16]
wire _x_T_2 = io_pc_0 >= io_bp_0_address_0; // @[Breakpoint.scala:65:8, :79:7]
wire _x_T_4 = _x_T_2 ^ _x_T_3; // @[Breakpoint.scala:65:{8,20,36}]
wire [32:0] _x_T_5 = ~io_pc_0; // @[Breakpoint.scala:62:6, :79:7]
wire _x_T_8 = _x_T_6 & _x_T_7; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_10 = _x_T_8 & _x_T_9; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_12 = _x_T_10 & _x_T_11; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo = {_x_T_8, _x_T_6}; // @[package.scala:45:27]
wire [1:0] x_hi = {_x_T_12, _x_T_10}; // @[package.scala:45:27]
wire [3:0] _x_T_13 = {x_hi, x_lo}; // @[package.scala:45:27]
wire [32:0] _x_T_14 = {_x_T_5[32:4], _x_T_5[3:0] | _x_T_13}; // @[package.scala:45:27]
wire [32:0] _x_T_15 = ~io_bp_0_address_0; // @[Breakpoint.scala:62:24, :79:7]
wire _x_T_18 = _x_T_16 & _x_T_17; // @[Breakpoint.scala:59:{56,73,83}]
wire _x_T_20 = _x_T_18 & _x_T_19; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_22 = _x_T_20 & _x_T_21; // @[Breakpoint.scala:59:{73,83}]
wire [1:0] x_lo_1 = {_x_T_18, _x_T_16}; // @[package.scala:45:27]
wire [1:0] x_hi_1 = {_x_T_22, _x_T_20}; // @[package.scala:45:27]
wire [3:0] _x_T_23 = {x_hi_1, x_lo_1}; // @[package.scala:45:27]
wire [32:0] _x_T_24 = {_x_T_15[32:4], _x_T_15[3:0] | _x_T_23}; // @[package.scala:45:27]
wire _x_T_25 = _x_T_14 == _x_T_24; // @[Breakpoint.scala:62:{9,19,33}]
wire _x_T_26 = _x_T_1 ? _x_T_4 : _x_T_25; // @[Breakpoint.scala:62:19, :65:20, :68:{8,23}]
wire _x_T_27 = _x_T & _x_T_26; // @[Breakpoint.scala:68:8, :108:{16,32}]
assign x = _x_T_27; // @[Breakpoint.scala:108:{32,58}]
assign io_bpwatch_0_ivalid_0_0 = x; // @[Breakpoint.scala:79:7, :108:58]
assign io_bpwatch_0_action = {2'h0, io_bp_0_control_action_0}; // @[Breakpoint.scala:79:7, :112:16]
wire _io_xcpt_ld_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51]
assign io_xcpt_ld_0 = r & _io_xcpt_ld_T; // @[Breakpoint.scala:79:7, :97:14, :106:58, :118:{27,40,51}]
assign io_debug_ld_0 = r & _io_debug_ld_T; // @[Breakpoint.scala:79:7, :100:15, :106:58, :118:{27,73,84}]
wire _io_xcpt_st_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :119:51]
assign io_xcpt_st_0 = w & _io_xcpt_st_T; // @[Breakpoint.scala:79:7, :98:14, :107:58, :119:{27,40,51}]
assign io_debug_st_0 = w & _io_debug_st_T; // @[Breakpoint.scala:79:7, :101:15, :107:58, :119:{27,73,84}]
wire _io_xcpt_if_T = ~io_bp_0_control_action_0; // @[Breakpoint.scala:79:7, :118:51, :120:51]
assign io_xcpt_if_0 = x & _io_xcpt_if_T; // @[Breakpoint.scala:79:7, :96:14, :108:58, :120:{27,40,51}]
assign io_debug_if_0 = x & _io_debug_if_T; // @[Breakpoint.scala:79:7, :99:15, :108:58, :120:{27,73,84}]
assign io_bpwatch_0_valid_0 = x | w | r; // @[Breakpoint.scala:79:7, :106:58, :107:58, :108:58, :118:27, :119:{27,107}, :120:{27,107}]
assign io_xcpt_if = io_xcpt_if_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_ld = io_xcpt_ld_0; // @[Breakpoint.scala:79:7]
assign io_xcpt_st = io_xcpt_st_0; // @[Breakpoint.scala:79:7]
assign io_debug_if = io_debug_if_0; // @[Breakpoint.scala:79:7]
assign io_debug_ld = io_debug_ld_0; // @[Breakpoint.scala:79:7]
assign io_debug_st = io_debug_st_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_rvalid_0 = io_bpwatch_0_rvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_wvalid_0 = io_bpwatch_0_wvalid_0_0; // @[Breakpoint.scala:79:7]
assign io_bpwatch_0_ivalid_0 = io_bpwatch_0_ivalid_0_0; // @[Breakpoint.scala:79:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_81( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_91 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File LoopConv.scala:
package gemmini
import chisel3._
import chisel3.util._
import chisel3.experimental._
import freechips.rocketchip.tile.RoCCCommand
import org.chipsalliance.cde.config.Parameters
import GemminiISA._
import LocalAddr._
import Util._
class LoopConvOuterBounds(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int) extends Bundle {
val batch_size = UInt(large_iterator_bitwidth.W)
val in_row_dim = UInt(small_iterator_bitwidth.W)
val in_col_dim = UInt(small_iterator_bitwidth.W)
val in_channels = UInt(large_iterator_bitwidth.W)
val out_channels = UInt(large_iterator_bitwidth.W)
val out_col_dim = UInt(large_iterator_bitwidth.W)
val out_row_dim = UInt(large_iterator_bitwidth.W)
val out_stride = UInt(large_iterator_bitwidth.W) //stride for output activation
val in_stride = UInt(large_iterator_bitwidth.W) //stride for input activation
val weight_stride = UInt(large_iterator_bitwidth.W) //stride for weight
val pool_out_row_dim = UInt(small_iterator_bitwidth.W)
val pool_out_col_dim = UInt(small_iterator_bitwidth.W)
val stride = UInt(tiny_iterator_bitwidth.W)
val padding = UInt(tiny_iterator_bitwidth.W)
val kernel_dim = UInt(tiny_iterator_bitwidth.W)
val kernel_dilation = UInt(tiny_iterator_bitwidth.W)
val pool_size = UInt(tiny_iterator_bitwidth.W)
val pool_stride = UInt(tiny_iterator_bitwidth.W)
val pool_padding = UInt(tiny_iterator_bitwidth.W)
}
class LoopConvInnerBounds(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int) extends Bundle {
val batches = UInt(large_iterator_bitwidth.W)
val porows = UInt(small_iterator_bitwidth.W)
val pocols = UInt(small_iterator_bitwidth.W)
val pochs = UInt(large_iterator_bitwidth.W)
val krows = UInt(tiny_iterator_bitwidth.W)
val kcols = UInt(tiny_iterator_bitwidth.W)
val kchs = UInt(large_iterator_bitwidth.W)
val lpad = UInt(tiny_iterator_bitwidth.W)
val rpad = UInt(tiny_iterator_bitwidth.W)
val upad = UInt(tiny_iterator_bitwidth.W)
val dpad = UInt(tiny_iterator_bitwidth.W)
val plpad = UInt(tiny_iterator_bitwidth.W)
val prad = UInt(tiny_iterator_bitwidth.W)
val pupad = UInt(tiny_iterator_bitwidth.W)
val pdpad = UInt(tiny_iterator_bitwidth.W)
val orows = UInt(small_iterator_bitwidth.W)
val ocols = UInt(small_iterator_bitwidth.W)
}
class LoopConvDerivedParams(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int) extends Bundle {
val ochs = UInt(large_iterator_bitwidth.W)
val irows = UInt(small_iterator_bitwidth.W)
val icols = UInt(small_iterator_bitwidth.W)
val irows_unpadded = UInt(small_iterator_bitwidth.W)
val icols_unpadded = UInt(small_iterator_bitwidth.W)
val ichs = UInt(large_iterator_bitwidth.W)
val out_channels_per_bank = UInt(small_iterator_bitwidth.W) // TODO this won't work for systolic arrays above 256 in size
val in_channels_per_bank = UInt(small_iterator_bitwidth.W) // TODO this won't work for systolic arrays above 256 in size
val bias_spad_stride = UInt(large_iterator_bitwidth.W)
val input_spad_stride = UInt(large_iterator_bitwidth.W)
val weight_spad_stride = UInt(large_iterator_bitwidth.W)
// val ex_overwrite = Bool()
}
class LoopConvLdBiasReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_start = UInt(log2Up(max_acc_addr).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val no_bias = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvLdBias(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_acc_addr: Int, acc_w: Int,
max_block_len_acc: Int, concurrent_loops: Int, latency: Int,
config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2)(implicit p: Parameters) extends Module {
val MVIN_SCALE_IDENTITY = 0x3f800000.U // TODO get this from configs somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvLdBiasReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val wait_for_prev_loop = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvLdBiasReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops))
import req.inner_bounds._
import req.derived_params._
val acc_addr_start = req.addr_start
// Derived parameters
val max_ochs_per_mvin = Mux(ochs < (max_block_len_acc * block_size).U, ochs, (max_block_len_acc * block_size).U)
val skip = req.dram_addr === 0.U
// Iterators
val b = Reg(UInt(large_iterator_bitwidth.W))
val orow = Reg(UInt(small_iterator_bitwidth.W))
val ocol = Reg(UInt(small_iterator_bitwidth.W))
val och = Reg(UInt(large_iterator_bitwidth.W))
// Addresses
val dram_offset = och * (acc_w/8).U
val dram_addr = Mux(req.no_bias, 0.U, req.dram_addr + LoopConv.castDramOffset(dram_offset))
val spad_addr = acc_addr_start +& (och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
// Sizes
val I = Mux(ocols - ocol > block_size.U, block_size.U, ocols - ocol)
val J = Mux(ochs - och > max_ochs_per_mvin, max_ochs_per_mvin, ochs - och)
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = UInt()
val I = UInt()
val J = UInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_mvin_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.scale := MVIN_SCALE_IDENTITY
config_cmd_rs1.stride := req.derived_params.bias_spad_stride
config_cmd_rs1.pixel_repeats := 1.U
config_cmd_rs1.state_id := 2.U
config_cmd_rs1.shrink := 0.U
config_cmd_rs1._unused := 1.U
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := 0.U
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD3_CMD
mvin_cmd.rs1 := 0.U
mvin_cmd.rs2 := 0.U
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !io.wait_for_prev_loop && !skip
command_p.io.in.bits.cmd := Mux(state === config, config_cmd, mvin_cmd)
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.I := I
command_p.io.in.bits.J := J
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === LOAD3_CMD) {
val o = command_p.io.out.bits
io.cmd.bits.rs1 := o.dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := o.I.asUInt
mvin_cmd_rs2.num_cols := o.J.asUInt
mvin_cmd_rs2.local_addr := cast_to_acc_addr(mvin_cmd_rs2.local_addr, o.spad_addr, accumulate = false.B, read_full = false.B)
io.cmd.bits.rs2 := mvin_cmd_rs2.asUInt
}
// Sending outputs
when (skip) {
state := idle
}.elsewhen(command_p.io.in.fire) {
when (state === config) {
state := ld
}.otherwise {
val next_och = floorAdd(och, max_ochs_per_mvin, ochs)
val next_ocol = floorAdd(ocol, block_size.U, ocols, next_och === 0.U)
val next_orow = floorAdd(orow, 1.U, orows, next_ocol === 0.U && next_och === 0.U)
val next_b = floorAdd(b, 1.U, batches, next_orow === 0.U && next_ocol === 0.U && next_och === 0.U)
och := next_och
ocol := next_ocol
orow := next_orow
b := next_b
state := Mux(next_b === 0.U && next_orow === 0.U && next_ocol === 0.U && next_och === 0.U,
idle, ld)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := config
b := 0.U
orow := 0.U
ocol := 0.U
och := 0.U
}
}
class LoopConvLdInputReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_start = UInt(log2Up(max_acc_addr).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val downsample = Bool()
val max_pixels_per_row = UInt(small_iterator_bitwidth.W)
val input_dilated = Bool()
val trans_input_3120 = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvLdInput(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int,
tiny_iterator_bitwidth: Int, max_addr: Int, input_w: Int, max_block_len: Int,
concurrent_loops: Int, latency: Int, config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2)
(implicit p: Parameters) extends Module {
val MVIN_SCALE_IDENTITY = 0x3f800000.U // TODO get this from configs somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvLdInputReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val wait_for_prev_loop = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvLdInputReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
def undilated(x: UInt): UInt = (x +& req.input_dilated) >> req.input_dilated
// Derived parameters
val max_ichs_per_mvin = Mux(ichs < (max_block_len * block_size).U, ichs, (max_block_len * block_size).U).zext
val max_batches_per_mvin = Mux(batches < (max_block_len * block_size).U, batches, (max_block_len * block_size).U).zext
val max_chs_per_mvin = Mux(req.trans_input_3120, max_batches_per_mvin, max_ichs_per_mvin)
// Iterators
val b = Reg(SInt(large_iterator_bitwidth.W))
val irow = Reg(SInt(small_iterator_bitwidth.W))
val icol = Reg(SInt(small_iterator_bitwidth.W))
val ich = Reg(SInt(large_iterator_bitwidth.W))
// Calculated params
val irow_padded = irow +& undilated(upad).zext
val icol_padded = icol +& undilated(lpad).zext
val is_zeros = irow < 0.S || irow >= irows_unpadded.zext || icol < 0.S || icol >= icols_unpadded.zext
val dram_stride = Mux(req.trans_input_3120, batch_size * (input_w/8).U, in_stride * (input_w/8).U)
// Addresses
val dram_offset = Mux(req.trans_input_3120, (((ich * in_col_dim * in_row_dim +& irow*in_col_dim +& icol) * batches +& b) * (input_w/8).U).asUInt,
(((b * in_row_dim * in_col_dim +& irow*in_col_dim +& icol) * in_stride +& ich) * (input_w/8).U).asUInt)
val dram_addr = Mux(is_zeros, 0.U, req.dram_addr + LoopConv.castDramOffset(dram_offset))
val spad_addr = Mux(req.trans_input_3120,
// To prevent Verilator errors, we replace some "/ block_size.U" calls here with ">> log2Up(block_size)"
req.addr_start.zext +& (b >> log2Up(block_size)) * input_spad_stride +& ich * (irows >> req.downsample) * (icols >> req.downsample) +& (irow_padded >> req.downsample) * (icols >> req.downsample) +& (icol_padded >> req.downsample),
req.addr_start.zext +& (ich >> log2Up(block_size)) * input_spad_stride +& b * (irows >> req.downsample) * (icols >> req.downsample) +& (irow_padded >> req.downsample) * (icols >> req.downsample) +& (icol_padded >> req.downsample))
// Sizes
val block_size_downsampled = (block_size.U << req.downsample).asUInt.zext
val I = MuxCase(
Mux(icols_unpadded.zext -& icol > block_size_downsampled, block_size_downsampled, icols_unpadded.zext -& icol),
Seq(
(icol < 0.S) -> Mux((0.S-&icol) > block_size.S, block_size.S, 0.S-&icol),
(icol >= icols_unpadded.zext) -> Mux(icols_unpadded.zext +& undilated(rpad).zext -& icol > block_size.S, block_size.S, icols_unpadded.zext +& undilated(rpad).zext -& icol)
)
)
val K = Mux(req.trans_input_3120,
Mux(batches.zext -& b > max_chs_per_mvin, max_chs_per_mvin, batches.zext -& b),
Mux(ichs.zext -& ich > max_chs_per_mvin, max_chs_per_mvin, ichs.zext -& ich))
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = SInt()
val I = SInt()
val K = SInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_mvin_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.scale := MVIN_SCALE_IDENTITY
config_cmd_rs1.stride := input_spad_stride
config_cmd_rs1.pixel_repeats := req.max_pixels_per_row
config_cmd_rs1.state_id := 0.U
config_cmd_rs1.shrink := 0.U
config_cmd_rs1._unused := 1.U
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := dram_stride << req.downsample
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD_CMD
mvin_cmd.rs1 := 0.U // dram_addr
mvin_cmd.rs2 := 0.U // mvin_cmd_rs2
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !io.wait_for_prev_loop && (req.dram_addr =/= 0.U)
command_p.io.in.bits.cmd := Mux(state === config, config_cmd, mvin_cmd)
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.I := I
command_p.io.in.bits.K := K
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === LOAD_CMD) {
val o = command_p.io.out.bits
io.cmd.bits.rs1 := o.dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := (o.I >> req.downsample).asUInt
mvin_cmd_rs2.num_cols := o.K.asUInt
mvin_cmd_rs2.local_addr := cast_to_sp_addr(mvin_cmd_rs2.local_addr, o.spad_addr)
io.cmd.bits.rs2 := mvin_cmd_rs2.asUInt
}
// Sending outputs
when(req.dram_addr === 0.U){
state := idle
}.elsewhen(command_p.io.in.fire) {
when (state === config) {
state := ld
}.otherwise {
val b_it = Mux(req.trans_input_3120, max_chs_per_mvin.asUInt, 1.U)
val ich_it = Mux(req.trans_input_3120, 1.U, max_chs_per_mvin.asUInt)
val next_ich = sFloorAdd(ich, ich_it, ichs.zext, 0.S)
val next_icol = sFloorAdd(icol, I.asUInt, (icols_unpadded +& undilated(rpad)).zext, 0.S-&undilated(lpad).zext,
next_ich === 0.S)
val next_irow = sFloorAdd(irow, 1.U << req.downsample, (irows_unpadded +& undilated(dpad)).zext, 0.S-&undilated(upad).zext,
next_icol === 0.S-&undilated(lpad).zext && next_ich === 0.S)
val next_b = sFloorAdd(b, b_it, batches.zext, 0.S,
next_irow === 0.S-&undilated(upad).zext && next_icol === 0.S-&undilated(lpad).zext && next_ich === 0.S)
ich := next_ich
icol := next_icol
irow := next_irow
b := next_b
state := Mux(next_b === 0.S && next_irow === 0.S-&undilated(upad).zext && next_icol === 0.S-&undilated(lpad).zext && next_ich === 0.S,
idle, ld)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := config
b := 0.S
irow := 0.S -& ((io.req.bits.inner_bounds.upad +& io.req.bits.input_dilated) >> io.req.bits.input_dilated).zext
icol := 0.S -& ((io.req.bits.inner_bounds.lpad +& io.req.bits.input_dilated) >> io.req.bits.input_dilated).zext
ich := 0.S
}
}
class LoopConvLdWeightReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_end = UInt(log2Up(max_addr+1).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val trans_weight_1203 = Bool()
val trans_weight_0132 = Bool()
val dw = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvLdWeight(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int,
small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_addr: Int, input_w: Int,
max_block_len: Int, concurrent_loops: Int, latency: Int, config_mvin_rs1_t: ConfigMvinRs1,
mvin_rs2_t: MvinRs2)(implicit p: Parameters) extends Module {
val MVIN_SCALE_IDENTITY = 0x3f800000.U // TODO get this from configs somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvLdWeightReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val wait_for_prev_loop = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, ld = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvLdWeightReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
// Derived parameters
val max_chs_per_mvin = {
val max_ochs_per_mvin = Mux(ochs < (max_block_len * block_size).U, ochs, (max_block_len * block_size).U)
val max_kchs_per_mvin = Mux(kchs < (max_block_len * block_size).U, kchs, (max_block_len * block_size).U)
Mux(req.trans_weight_0132, max_kchs_per_mvin, max_ochs_per_mvin)
}
val B_rows = Mux(req.trans_weight_0132, in_channels_per_bank * kcols * krows * ochs,
out_channels_per_bank * kcols * krows * kchs)
val addr_start = req.addr_end - B_rows
val dram_stride = MuxCase(weight_stride, Seq(
req.dw -> 1.U,
req.trans_weight_1203 -> (kernel_dim * kernel_dim * out_channels),
req.trans_weight_0132 -> in_channels
)) * (input_w/8).U
// Iterators
val och = Reg(UInt(large_iterator_bitwidth.W))
val krow = Reg(UInt(tiny_iterator_bitwidth.W))
val kcol = Reg(UInt(tiny_iterator_bitwidth.W))
val kch = Reg(UInt(large_iterator_bitwidth.W))
// Addresses
val dram_offset = MuxCase(((krow*kernel_dim*in_channels +& kcol*in_channels +& kch) * weight_stride +& och) * (input_w/8).U, Seq(
req.dw -> (krow * kernel_dim +& kcol) * (input_w/8).U,
req.trans_weight_1203 -> (((kch*kernel_dim*kernel_dim +& krow*kernel_dim +& kcol) * out_channels +& och) * (input_w/8).U),
req.trans_weight_0132 -> (((krow*kernel_dim*out_channels +& kcol*out_channels +& och) * in_channels +& kch) * (input_w/8).U)
))
val dram_addr = req.dram_addr + LoopConv.castDramOffset(dram_offset)
val spad_addr = Mux(req.trans_weight_0132,
// The width expansions are added here solely to prevent Verilator's "WIDTH" warnings, despite making the code uglier
addr_start + (kch / block_size.U(kch.getWidth.W)) * krows * kcols * ochs + krow * kcols * ochs + kcol * ochs + och,
addr_start + (och / block_size.U(och.getWidth.W)) * krows * kcols * kchs + krow * kcols * kchs + kcol * kchs + kch)
// Sizes
val J = Mux(req.trans_weight_0132,
Mux(kchs - kch > max_chs_per_mvin, max_chs_per_mvin, kchs - kch),
Mux(ochs - och > max_chs_per_mvin, max_chs_per_mvin, ochs - och))
val K = Mux(req.trans_weight_0132,
Mux(ochs - och > block_size.U, block_size.U, ochs - och),
Mux(kchs - kch > block_size.U, block_size.U, kchs - kch))
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = UInt()
val K = UInt()
val J = UInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_mvin_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.scale := MVIN_SCALE_IDENTITY
config_cmd_rs1.stride := req.derived_params.weight_spad_stride
config_cmd_rs1.pixel_repeats := 1.U
config_cmd_rs1.state_id := 1.U
config_cmd_rs1.shrink := 0.U
config_cmd_rs1._unused := 1.U
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := dram_stride
val mvin_cmd = Wire(new RoCCCommand)
mvin_cmd := DontCare
mvin_cmd.inst.funct := LOAD2_CMD
mvin_cmd.rs1 := 0.U // dram_addr
mvin_cmd.rs2 := 0.U // mvin_cmd_rs2
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !io.wait_for_prev_loop && (req.dram_addr =/= 0.U)
command_p.io.in.bits.cmd := Mux(state === config, config_cmd, mvin_cmd)
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.K := K
command_p.io.in.bits.J := J
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === LOAD2_CMD) {
val o = command_p.io.out.bits
io.cmd.bits.rs1 := o.dram_addr
val mvin_cmd_rs2 = Wire(mvin_rs2_t.cloneType)
mvin_cmd_rs2 := DontCare
mvin_cmd_rs2.num_rows := o.K
mvin_cmd_rs2.num_cols := o.J
mvin_cmd_rs2.local_addr := cast_to_sp_addr(mvin_cmd_rs2.local_addr, o.spad_addr)
io.cmd.bits.rs2 := mvin_cmd_rs2.asUInt
}
// Sending outputs
when(req.dram_addr === 0.U){
state := idle
}.elsewhen(command_p.io.in.fire) {
when (state === config) {
state := ld
}.otherwise {
val och_it = Mux(req.trans_weight_0132, block_size.U, max_chs_per_mvin)
val kch_it = Mux(req.trans_weight_0132, max_chs_per_mvin, block_size.U)
val next_kch = floorAdd(kch, kch_it, kchs)
val next_kcol = floorAdd(kcol, 1.U, kcols, next_kch === 0.U)
val next_krow = floorAdd(krow, 1.U, krows, next_kcol === 0.U && next_kch === 0.U)
val next_och = floorAdd(och, och_it, ochs, next_krow === 0.U && next_kcol === 0.U && next_kch === 0.U)
kch := next_kch
kcol := next_kcol
krow := next_krow
och := next_och
state := Mux(next_och === 0.U && next_krow === 0.U && next_kcol === 0.U && next_kch === 0.U,
idle, ld)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := config
kch := 0.U
kcol := 0.U
krow := 0.U
och := 0.U
}
}
class LoopConvExecuteReq(val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_addr: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val a_addr_start = UInt(log2Up(max_addr).W)
val b_addr_end = UInt(log2Up(max_addr+1).W)
val c_addr_start = UInt(log2Up(max_acc_addr).W)
val wrot180 = Bool()
val downsample = Bool()
val max_pixels_per_row = UInt(small_iterator_bitwidth.W)
val input_dilated = Bool()
val trans_weight_0132 = Bool()
val trans_input_3120 = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvExecute(block_size: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_addr: Int,
max_acc_addr: Int, concurrent_loops: Int, latency: Int,
config_ex_rs1_t: ConfigExRs1, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs)(implicit p: Parameters) extends Module {
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvExecuteReq(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val lda_completed = Input(Bool())
val ldb_completed = Input(Bool())
val ldd_completed = Input(Bool())
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, config, pre, comp = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvExecuteReq(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth,
max_addr, max_acc_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
def undilated(x: UInt): UInt = (x +& req.input_dilated) >> req.input_dilated
// Derived parameters
val B_rows = Mux(req.trans_weight_0132, in_channels_per_bank * kcols * krows * ochs,
out_channels_per_bank * kcols * krows * kchs)
val a_addr_start = req.a_addr_start
val b_addr_start = req.b_addr_end - B_rows
val c_addr_start = /*(BigInt(3) << 30).U |*/ req.c_addr_start
// Iterators
val och = Reg(UInt(large_iterator_bitwidth.W))
val krow = Reg(UInt(tiny_iterator_bitwidth.W))
val kcol = Reg(UInt(tiny_iterator_bitwidth.W))
val kch = Reg(UInt(large_iterator_bitwidth.W))
val b = Reg(UInt(large_iterator_bitwidth.W))
val orow = Reg(UInt(small_iterator_bitwidth.W))
val ocol = Reg(UInt(small_iterator_bitwidth.W))
// TODO kernel-dilation and input-dilation can never be activated at the same time, so we can optimize out some multiplications by kernel_dilation
val skip_iteration = state >= pre && req.input_dilated && (((krow * kernel_dilation +& orow -& upad)(0) & req.input_dilated).asBool ||
((kcol * kernel_dilation +& ocol -& lpad)(0) & req.input_dilated).asBool)
val pixels = Mux(kcols - kcol > req.max_pixels_per_row, req.max_pixels_per_row, kcols - kcol)
val irow = undilated(orow * stride +& krow * kernel_dilation)
val icol = undilated(ocol * stride +& kcol * kernel_dilation)
val I = Mux(req.trans_input_3120,
Mux(batches - b > block_size.U, block_size.U, batches - b),
undilated(Mux(ocols - ocol > (block_size.U << req.input_dilated).asUInt, (block_size.U << req.input_dilated).asUInt, ocols - ocol)))
val J = Mux(ochs - och > block_size.U, block_size.U, ochs - och)
val K = pixels * Mux(kchs - kch > block_size.U, block_size.U, kchs - kch)
// Addresses
val a_addr = Mux(req.trans_input_3120,
a_addr_start +& (b / block_size.U) * input_spad_stride +& kch * (irows >> req.downsample) * (icols >> req.downsample) +& (irow >> req.downsample) * (icols >> req.downsample) +& (icol >> req.downsample),
a_addr_start +& (kch / block_size.U(kch.getWidth.W)) * input_spad_stride +& b * (irows >> req.downsample) * (icols >> req.downsample) +& (irow >> req.downsample) * (icols >> req.downsample) +& (icol >> req.downsample))
// val c_addr = Mux(ex_overwrite && krow === 0.U && kcol === 0.U && kch === 0.U, d_addr_start, c_addr_start) +&
// (och / block_size.U) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
// The width expansions are added here solely to prevent Verilator's "WIDTH" warnings, despite making the code uglier
val c_addr = c_addr_start +&
(och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
// val new_weights = b === 0.U && orow === 0.U && ocol === 0.U
val new_weights = Reg(Bool())
val krow_rot = Mux(req.wrot180, krows - krow - 1.U, krow)
val kcol_rot = Mux(req.wrot180, kcols - kcol - 1.U, kcol)
val b_addr = Mux(req.trans_weight_0132,
b_addr_start +& (kch / block_size.U(och.getWidth.W)) * krows * kcols * ochs +& krow_rot * kcols * ochs +& kcol_rot * ochs +& och,
b_addr_start +& (och / block_size.U(och.getWidth.W)) * krows * kcols * kchs +& krow_rot * kcols * kchs +& kcol_rot * kchs +& kch)
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val a_addr = UInt()
val b_addr = UInt()
val c_addr = UInt()
val I = UInt()
val J = UInt()
val K = UInt()
val new_weights = Bool()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val config_cmd = Wire(new RoCCCommand)
config_cmd := DontCare
config_cmd.inst.funct := CONFIG_CMD
val config_cmd_rs1 = Wire(config_ex_rs1_t.cloneType)
config_cmd_rs1 := DontCare
config_cmd_rs1.a_stride := (irows * icols).asUInt
config_cmd_rs1.set_only_strides := 1.U
config_cmd_rs1.cmd_type := 0.U
val config_cmd_rs2 = Wire(new ConfigExRs2)
config_cmd_rs2 := DontCare
config_cmd_rs2.c_stride := (orows * ocols).asUInt
config_cmd.rs1 := config_cmd_rs1.asUInt
config_cmd.rs2 := config_cmd_rs2.asUInt
val pre_cmd = Wire(new RoCCCommand) // preload
pre_cmd := DontCare
pre_cmd.inst.funct := PRELOAD_CMD
pre_cmd.rs1 := 0.U//(K << 48) | (J << 32) | pre_addr
pre_cmd.rs2 := 0.U//(I << 48) | (J << 32) | c_addr
val comp_cmd = Wire(new RoCCCommand()) // compute.preloaded
comp_cmd := DontCare
comp_cmd.inst.funct := Mux(new_weights, COMPUTE_AND_FLIP_CMD, COMPUTE_AND_STAY_CMD)
comp_cmd.rs1 := 0.U//(I << 48) | (K << 32) | a_addr
comp_cmd.rs2 := 0.U//(I << 48) | (J << 32) | GARBAGE_ADDR
val ld_ahead = io.lda_completed && io.ldb_completed && io.ldd_completed
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !skip_iteration && ld_ahead
command_p.io.in.bits.cmd := MuxCase(config_cmd, Seq((state === pre) -> pre_cmd, (state === comp) -> comp_cmd))
command_p.io.in.bits.a_addr := a_addr
command_p.io.in.bits.b_addr := b_addr
command_p.io.in.bits.c_addr := c_addr
command_p.io.in.bits.I := I
command_p.io.in.bits.J := J
command_p.io.in.bits.K := K
command_p.io.in.bits.new_weights := new_weights
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === PRELOAD_CMD) {
val o = command_p.io.out.bits
val pre_cmd_rs1 = Wire(preload_rs1_t.cloneType)
pre_cmd_rs1 := DontCare
pre_cmd_rs1.num_rows := o.K.asUInt
pre_cmd_rs1.num_cols := o.J.asUInt
pre_cmd_rs1.local_addr := Mux(o.new_weights, cast_to_sp_addr(pre_cmd_rs1.local_addr, o.b_addr),
garbage_addr(pre_cmd_rs1.local_addr))
val pre_cmd_rs2 = Wire(preload_rs2_t.cloneType)
pre_cmd_rs2 := DontCare
pre_cmd_rs2.num_rows := o.I.asUInt
pre_cmd_rs2.num_cols := o.J.asUInt
pre_cmd_rs2.local_addr := cast_to_acc_addr(pre_cmd_rs2.local_addr, o.c_addr, accumulate = true.B, read_full = false.B)
io.cmd.bits.rs1 := pre_cmd_rs1.asUInt
io.cmd.bits.rs2 := pre_cmd_rs2.asUInt
}.elsewhen(command_p.io.out.bits.cmd.inst.funct =/= CONFIG_CMD) {
val o = command_p.io.out.bits
val comp_cmd_rs1 = Wire(compute_rs1_t.cloneType)
comp_cmd_rs1 := DontCare
comp_cmd_rs1.num_rows := o.I.asUInt
comp_cmd_rs1.num_cols := o.K.asUInt
comp_cmd_rs1.local_addr := cast_to_sp_addr(comp_cmd_rs1.local_addr, o.a_addr)
val comp_cmd_rs2 = Wire(compute_rs2_t.cloneType)
comp_cmd_rs2 := DontCare
comp_cmd_rs2.num_rows := o.I.asUInt
comp_cmd_rs2.num_cols := o.J.asUInt
comp_cmd_rs2.local_addr := garbage_addr(comp_cmd_rs2.local_addr)
io.cmd.bits.rs1 := comp_cmd_rs1.asUInt
io.cmd.bits.rs2 := comp_cmd_rs2.asUInt
}
// Updating "new_weights"
when (state === comp && command_p.io.in.fire) {
new_weights := false.B
}
// Sending outputs
when (command_p.io.in.fire || skip_iteration) {
when (state === config) {
state := pre
}.elsewhen (state === pre) {
state := comp
}.otherwise {
val b_it = Mux(req.trans_input_3120, block_size.U, 1.U)
val ocol_it = Mux(skip_iteration || req.trans_input_3120, 1.U, block_size.U << req.input_dilated).asUInt
val next_ocol = floorAdd(ocol, ocol_it, ocols)
val next_orow = floorAdd(orow, 1.U, orows, next_ocol === 0.U)
val next_b = floorAdd(b, b_it, batches, next_orow === 0.U && next_ocol === 0.U)
val next_kch = floorAdd(kch, block_size.U, kchs,
next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
val next_kcol = floorAdd(kcol, req.max_pixels_per_row, kcols,
next_kch === 0.U && next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
val next_krow = floorAdd(krow, 1.U, krows,
next_kcol === 0.U && next_kch === 0.U && next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
val next_och = floorAdd(och, block_size.U, ochs, next_krow === 0.U &&
next_kcol === 0.U && next_kch === 0.U && next_b === 0.U && next_orow === 0.U && next_ocol === 0.U)
ocol := next_ocol
orow := next_orow
b := next_b
kch := next_kch
kcol := next_kcol
krow := next_krow
och := next_och
when (next_b === 0.U && next_orow === 0.U && next_ocol === 0.U) {
new_weights := true.B
}
state := Mux(next_och === 0.U && next_krow === 0.U && next_kcol === 0.U && next_kch === 0.U && next_b === 0.U &&
next_orow === 0.U && next_ocol === 0.U,
idle, pre)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := Mux(io.req.bits.trans_input_3120, config, pre)
b := 0.U
orow := 0.U
ocol := 0.U
och := 0.U
krow := 0.U
kcol := 0.U
kch := 0.U
new_weights := true.B
}
}
class LoopConvStReq(val coreMaxAddrBits: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val max_acc_addr: Int, val concurrent_loops: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val derived_params = new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val addr_start = UInt(log2Up(max_acc_addr).W)
val dram_addr = UInt(coreMaxAddrBits.W)
val no_pool = Bool()
val activation = UInt(2.W) // TODO magic number
val trans_output_1203 = Bool()
val loop_id = UInt(log2Up(concurrent_loops).W)
}
class LoopConvSt(block_size: Int, coreMaxAddrBits: Int, large_iterator_bitwidth: Int, small_iterator_bitwidth: Int, tiny_iterator_bitwidth: Int, max_acc_addr: Int, input_w: Int, concurrent_loops: Int, latency: Int, config_mvout_rs2_t: ConfigMvoutRs2, mvout_rs2_t: MvoutRs2)(implicit p: Parameters) extends Module {
val ACC_SCALE_NO_CHANGE = ~(0.U(32.W)) // TODO get this from ISA description somehow
val io = IO(new Bundle {
val req = Flipped(Decoupled(new LoopConvStReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops)))
val cmd = Decoupled(Output(new RoCCCommand))
val ex_completed = Input(Bool())
val idle = Output(Bool())
val rob_overloaded = Input(Bool())
val loop_id = Output(UInt(log2Up(concurrent_loops).W))
})
object State extends ChiselEnum {
val idle, st, pre_pool_config, pool, post_pool_config = Value
}
import State._
val state = RegInit(idle)
val req = Reg(new LoopConvStReq(coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth: Int, max_acc_addr, concurrent_loops))
import req.outer_bounds._
import req.inner_bounds._
import req.derived_params._
val acc_addr_start = req.addr_start
// Derived parameters
val skip = req.dram_addr === 0.U
// Iterators
val b = Reg(UInt(large_iterator_bitwidth.W))
val orow = Reg(UInt(small_iterator_bitwidth.W))
val ocol = Reg(UInt(small_iterator_bitwidth.W))
val och = Reg(UInt(large_iterator_bitwidth.W))
// Addresses
val dram_offset = Mux(req.trans_output_1203,
((orow*out_col_dim*batch_size +& ocol*batch_size +& b) * out_channels +& och) * (input_w/8).U,
((b*out_row_dim*out_col_dim +& orow*out_col_dim +& ocol) * out_stride +& och) * (input_w/8).U)
val dram_addr = req.dram_addr + LoopConv.castDramOffset(dram_offset)
val spad_addr = acc_addr_start +& (och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols +& orow * ocols +& ocol
val pool_dram_addr = req.dram_addr + ((b * pool_out_col_dim * pool_out_row_dim) * out_stride + och) * (input_w/8).U
val pool_spad_addr = acc_addr_start +& (och / block_size.U(och.getWidth.W)) * batches * orows * ocols +& b * orows * ocols
// Sizes
val I = Mux(ocols - ocol > block_size.U, block_size.U, ocols - ocol)
val J = Mux(ochs - och > block_size.U, block_size.U, ochs - och)
val channels = J
class RoCCCommandWithAddr extends Bundle {
val cmd = new RoCCCommand
val dram_addr = UInt()
val spad_addr = UInt()
val pool_dram_addr = UInt()
val pool_spad_addr = UInt()
val channels = UInt()
val is_pool = Bool()
val I = UInt()
val J = UInt()
}
val command_p = Module(new Pipeline[RoCCCommandWithAddr](new RoCCCommandWithAddr, latency)())
// Commands
val mvout_cmd = Wire(new RoCCCommand)
mvout_cmd := DontCare
mvout_cmd.inst.funct := STORE_CMD
mvout_cmd.rs1 := 0.U // dram_addr
mvout_cmd.rs2 := 0.U // mvout_cmd_rs2
val pre_pool_config_cmd = Wire(new RoCCCommand)
pre_pool_config_cmd := DontCare
pre_pool_config_cmd.inst.funct := CONFIG_CMD
val pre_pool_config_cmd_rs1 = Wire(new ConfigMvoutRs1)
pre_pool_config_cmd_rs1 := DontCare
pre_pool_config_cmd_rs1.ocols := ocols
pre_pool_config_cmd_rs1.orows := orows
pre_pool_config_cmd_rs1.pocols := pocols
pre_pool_config_cmd_rs1.porows := porows
pre_pool_config_cmd_rs1.pool_out_dim := pool_out_col_dim
pre_pool_config_cmd_rs1.lpad := plpad
pre_pool_config_cmd_rs1.upad := pupad
pre_pool_config_cmd_rs1.pool_size := pool_size
pre_pool_config_cmd_rs1.pool_stride := pool_stride
pre_pool_config_cmd_rs1.activation := req.activation
pre_pool_config_cmd_rs1.cmd_type := CONFIG_STORE
pre_pool_config_cmd.rs1 := pre_pool_config_cmd_rs1.asUInt
val pre_pool_config_cmd_rs2 = Wire(config_mvout_rs2_t.cloneType)
pre_pool_config_cmd_rs2 := DontCare
pre_pool_config_cmd_rs2.acc_scale := ACC_SCALE_NO_CHANGE
pre_pool_config_cmd_rs2.stride := out_stride * (input_w / 8).U
pre_pool_config_cmd.rs2 := pre_pool_config_cmd_rs2.asUInt
val post_pool_config_cmd = Wire(new RoCCCommand)
post_pool_config_cmd := DontCare
post_pool_config_cmd.inst.funct := CONFIG_CMD
val post_pool_config_cmd_rs1 = Wire(new ConfigMvoutRs1)
post_pool_config_cmd_rs1 := DontCare
post_pool_config_cmd_rs1.activation := req.activation
post_pool_config_cmd_rs1.cmd_type := CONFIG_STORE
post_pool_config_cmd.rs1 := post_pool_config_cmd_rs1.asUInt
val post_pool_config_cmd_rs2 = Wire(config_mvout_rs2_t.cloneType)
post_pool_config_cmd_rs2 := DontCare
post_pool_config_cmd_rs2.acc_scale := ACC_SCALE_NO_CHANGE
post_pool_config_cmd_rs2.stride := out_stride * (input_w / 8).U
post_pool_config_cmd.rs2 := post_pool_config_cmd_rs2.asUInt
val pool_cmd = Wire(new RoCCCommand)
pool_cmd := DontCare
pool_cmd.inst.funct := STORE_CMD
pool_cmd.rs1 := 0.U//pool_dram_addr
pool_cmd.rs2 := 0.U//(channels << 32.U) | pool_spad_addr
// Inputs and outputs
io.req.ready := state === idle && !command_p.io.busy
io.idle := state === idle && !command_p.io.busy
io.loop_id := req.loop_id
command_p.io.in.valid := state =/= idle && !skip && io.ex_completed
command_p.io.in.bits.cmd := MuxLookup(state.asUInt, mvout_cmd)(Seq(
pre_pool_config.asUInt -> pre_pool_config_cmd,
pool.asUInt -> pool_cmd,
post_pool_config.asUInt -> post_pool_config_cmd)
)
command_p.io.in.bits.is_pool := state === pool
command_p.io.in.bits.dram_addr := dram_addr
command_p.io.in.bits.spad_addr := spad_addr
command_p.io.in.bits.pool_spad_addr := pool_spad_addr
command_p.io.in.bits.pool_dram_addr := pool_dram_addr
command_p.io.in.bits.channels := channels
command_p.io.in.bits.I := I
command_p.io.in.bits.J := J
command_p.io.out.ready := io.cmd.ready && !io.rob_overloaded
io.cmd.valid := command_p.io.out.valid && !io.rob_overloaded
io.cmd.bits := command_p.io.out.bits.cmd
when (command_p.io.out.bits.cmd.inst.funct === STORE_CMD) {
val o = command_p.io.out.bits
when (o.is_pool) {
val pool_mvout_cmd_rs2 = Wire(mvout_rs2_t.cloneType)
pool_mvout_cmd_rs2 := DontCare
pool_mvout_cmd_rs2.num_cols := o.channels
pool_mvout_cmd_rs2.local_addr := cast_to_acc_addr(pool_mvout_cmd_rs2.local_addr, o.pool_spad_addr, accumulate = false.B, read_full = false.B)
io.cmd.bits.rs1 := o.pool_dram_addr
io.cmd.bits.rs2 := pool_mvout_cmd_rs2.asUInt
} .otherwise {
val mvout_cmd_rs2 = Wire(mvout_rs2_t.cloneType)
mvout_cmd_rs2 := DontCare
mvout_cmd_rs2.num_rows := o.I.asUInt
mvout_cmd_rs2.num_cols := o.J.asUInt
mvout_cmd_rs2.local_addr := cast_to_acc_addr(mvout_cmd_rs2.local_addr, o.spad_addr, accumulate = false.B, read_full = false.B)
io.cmd.bits.rs1 := o.dram_addr
io.cmd.bits.rs2 := mvout_cmd_rs2.asUInt
}
}
// Sending outputs
when (skip) {
state := idle
}.elsewhen(command_p.io.in.fire) {
when (req.no_pool) {
val next_och = floorAdd(och, block_size.U, ochs)
val next_ocol = floorAdd(ocol, block_size.U, ocols, next_och === 0.U)
val next_orow = floorAdd(orow, 1.U, orows, next_ocol === 0.U && next_och === 0.U)
val next_b = floorAdd(b, 1.U, batches, next_orow === 0.U && next_ocol === 0.U && next_och === 0.U)
och := next_och
ocol := next_ocol
orow := next_orow
b := next_b
state := Mux(next_b === 0.U && next_orow === 0.U && next_ocol === 0.U && next_och === 0.U,
idle, st)
}.elsewhen(state === pre_pool_config) {
state := pool
}.elsewhen(state === post_pool_config) {
state := idle
}.otherwise {
val next_och = floorAdd(och, block_size.U, ochs)
val next_b = floorAdd(b, 1.U, batches, next_och === 0.U)
och := next_och
b := next_b
state := Mux(next_b === 0.U && next_och === 0.U,
post_pool_config, pool)
}
}
// Accepting requests
when (io.req.fire) {
req := io.req.bits
state := Mux(io.req.bits.no_pool, st, pre_pool_config)
b := 0.U
orow := 0.U
ocol := 0.U
och := 0.U
}
}
class LoopConvState(val block_size: Int, val large_iterator_bitwidth: Int, val small_iterator_bitwidth: Int, val tiny_iterator_bitwidth: Int, val coreMaxAddrBits: Int, val max_addr: Int, val max_acc_addr: Int) extends Bundle {
val outer_bounds = new LoopConvOuterBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val inner_bounds = new LoopConvInnerBounds(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth)
val bias_dram_addr = UInt(coreMaxAddrBits.W)
val weights_dram_addr = UInt(coreMaxAddrBits.W)
val input_dram_addr = UInt(coreMaxAddrBits.W)
val output_dram_addr = UInt(coreMaxAddrBits.W)
val no_bias = Bool()
val wrot180 = Bool()
val no_pool = Bool()
val downsample = Bool()
val input_dilated = Bool()
val activation = UInt(2.W) // TODO magic number
val trans_output_1203 = Bool()
val trans_weight_1203 = Bool()
val trans_weight_0132 = Bool()
val trans_input_3120 = Bool()
val dw = Bool()
val max_pixels_per_row = UInt(small_iterator_bitwidth.W)
val a_ex_spad_id = UInt(2.W)
val b_ex_spad_id = UInt(2.W)
val configured = Bool()
val running = Bool()
val ld_bias_started = Bool()
val ld_input_started = Bool()
val ld_weights_started = Bool()
val ex_started = Bool()
val st_started = Bool()
val ld_bias_completed = Bool()
val ld_input_completed = Bool()
val ld_weights_completed = Bool()
val ex_completed = Bool()
val st_completed = Bool()
def all_completed(dummy: Int=0): Bool = ld_bias_completed && ld_input_completed && ld_weights_completed && ex_completed && st_completed
val a_addr_start = UInt(log2Up(max_addr).W)
val b_addr_end = UInt(log2Up(max_addr+1).W)
def derived_params(dummy: Int=0): LoopConvDerivedParams = {
import outer_bounds.{stride, kernel_dilation}
import inner_bounds.{batches, pochs, orows, ocols, krows, kcols, upad, dpad, lpad, rpad, kchs}
val result = Wire(new LoopConvDerivedParams(large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth))
result.ochs := pochs
val dilated_krows = krows + (kernel_dilation - 1.U)*(krows - 1.U)
val dilated_kcols = kcols + (kernel_dilation - 1.U)*(kcols - 1.U)
val irows_without_dilation = orows * stride +& dilated_krows -& 1.U
val icols_without_dilation = ocols * stride +& dilated_kcols -& 1.U
val irows_unpadded_without_dilation = irows_without_dilation -& upad -& dpad
val icols_unpadded_without_dilation = icols_without_dilation -& lpad -& rpad
def undilated(x: UInt): UInt = (x +& input_dilated) >> input_dilated
val irows_unpadded = undilated(irows_unpadded_without_dilation)
val icols_unpadded = undilated(icols_unpadded_without_dilation)
result.irows := Mux(input_dilated, irows_unpadded +& undilated(upad) +& undilated(dpad), irows_without_dilation)
result.icols := Mux(input_dilated, icols_unpadded +& undilated(lpad) +& undilated(rpad), icols_without_dilation)
result.irows_unpadded := irows_unpadded
result.icols_unpadded := icols_unpadded
result.ichs := kchs
result.out_channels_per_bank := result.ochs / block_size.U(result.ochs.getWidth.W) +& (result.ochs % block_size.U =/= 0.U)
result.in_channels_per_bank := result.ichs / block_size.U(result.ochs.getWidth.W) +& (result.ichs % block_size.U =/= 0.U)
result.bias_spad_stride := batches * orows * ocols
result.input_spad_stride := Mux(trans_input_3120,
result.ichs * (result.irows >> downsample) * (result.icols >> downsample),
batches * (result.irows >> downsample) * (result.icols >> downsample))
result.weight_spad_stride := Mux(trans_weight_0132, krows * kcols * pochs, krows * kcols * kchs)
// result.ex_overwrite := bias_dram_addr =/= 0.U && no_bias
result
}
def reset(): Unit = {
configured := false.B
running := false.B
ld_bias_started := false.B
ld_input_started := false.B
ld_weights_started := false.B
ex_started := false.B
st_started := false.B
ld_bias_completed := false.B
ld_input_completed := false.B
ld_weights_completed := false.B
ex_completed := false.B
st_completed := false.B
}
}
class LoopConv (block_size: Int, coreMaxAddrBits: Int, reservation_station_size: Int, max_lds: Int, max_exs: Int, max_sts: Int,
max_addr: Int, max_acc_addr: Int, input_w: Int, acc_w: Int, dma_max_bytes: Int,
config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2, config_mvout_rs2_t: ConfigMvoutRs2, mvout_rs2_t: MvoutRs2,
config_ex_rs1_t: ConfigExRs1, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs,
has_training_convs: Boolean, has_max_pool: Boolean, has_first_layer_optimizations: Boolean,
has_dw_convs: Boolean)
(implicit p: Parameters) extends Module {
val large_iterator_bitwidth = 16
val small_iterator_bitwidth = 16 // 8
val tiny_iterator_bitwidth = 16 // 4
val max_block_len = (dma_max_bytes / (block_size * (input_w / 8))) max 1
val max_block_len_acc = (dma_max_bytes / (block_size * (acc_w / 8))) max 1
val io = IO(new Bundle {
val in = Flipped(Decoupled(new GemminiCmd(reservation_station_size)))
val out = Decoupled(new GemminiCmd(reservation_station_size))
val ld_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val st_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val ex_completed = Input(UInt(log2Up(reservation_station_size+1).W))
val busy = Output(Bool())
})
// Create states
val concurrent_loops = 2
val loops = Reg(Vec(concurrent_loops, new LoopConvState(block_size, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, coreMaxAddrBits, max_addr, max_acc_addr)))
val head_loop_id = RegInit(0.U(log2Up(concurrent_loops).W))
val tail_loop_id = (~head_loop_id).asUInt // This is the loop that we always try to configure if available
val head_loop = loops(head_loop_id)
val tail_loop = loops(tail_loop_id)
val loop_configured = loops.map(_.configured).reduce(_ || _)
val loop_being_configured_id = Mux(head_loop.configured, tail_loop_id, head_loop_id)
val loop_being_configured = loops(loop_being_configured_id)
// Create inner modules
val latency = 2
val ld_bias = Module(new LoopConvLdBias(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_acc_addr, acc_w, max_block_len_acc, concurrent_loops, latency, config_mvin_rs1_t, mvin_rs2_t))
val ld_input = Module(new LoopConvLdInput(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, input_w, max_block_len, concurrent_loops, latency, config_mvin_rs1_t, mvin_rs2_t))
val ld_weights = Module(new LoopConvLdWeight(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, input_w, max_block_len, concurrent_loops, latency, config_mvin_rs1_t, mvin_rs2_t))
val ex = Module(new LoopConvExecute(block_size, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_addr, max_acc_addr, concurrent_loops, latency, config_ex_rs1_t, preload_rs1_t, preload_rs2_t, compute_rs1_t, compute_rs2_t))
val st = Module(new LoopConvSt(block_size, coreMaxAddrBits, large_iterator_bitwidth, small_iterator_bitwidth, tiny_iterator_bitwidth, max_acc_addr, input_w, concurrent_loops, latency, config_mvout_rs2_t, mvout_rs2_t))
// Create command queue
val cmd = Queue(io.in)
io.busy := cmd.valid || loop_configured
// Create arbiter
val arb = Module(new Arbiter(new RoCCCommand, 5))
arb.io.in(0) <> st.io.cmd
arb.io.in(1) <> ex.io.cmd
arb.io.in(2) <> ld_bias.io.cmd
arb.io.in(3) <> ld_weights.io.cmd
arb.io.in(4) <> ld_input.io.cmd
val unrolled_cmd = arb.io.out
// Create reservation station utilization counters
val ld_utilization = RegInit(0.U(log2Up(max_lds+1).W))
val st_utilization = RegInit(0.U(log2Up(max_sts+1).W))
val ex_utilization = RegInit(0.U(log2Up(max_exs+1).W))
ld_utilization := ld_utilization +& (ld_bias.io.cmd.fire || ld_weights.io.cmd.fire || ld_input.io.cmd.fire) -& io.ld_completed
st_utilization := st_utilization +& st.io.cmd.fire -& io.st_completed
ex_utilization := ex_utilization +& ex.io.cmd.fire -& io.ex_completed
assert(ld_utilization >= io.ld_completed, "ld utilization underflow")
assert(st_utilization >= io.st_completed, "st utilization underflow")
assert(ex_utilization >= io.ex_completed, "ex utilization underflow")
// Wire up unrolled command output
val is_loop_run_cmd = cmd.bits.cmd.inst.funct === LOOP_CONV_WS
val is_loop_config_cmd = cmd.bits.cmd.inst.funct >= LOOP_CONV_WS_CONFIG_1 && cmd.bits.cmd.inst.funct <= LOOP_CONV_WS_CONFIG_6
val is_loop_cmd = is_loop_run_cmd || is_loop_config_cmd
io.out.bits.cmd := Mux(loop_configured, unrolled_cmd.bits, cmd.bits.cmd)
io.out.bits.cmd.status := cmd.bits.cmd.status // TODO This is not guaranteed to be the correct fix! We must fix this
io.out.bits.rob_id := DontCare
io.out.bits.from_matmul_fsm := Mux(loop_configured, false.B, cmd.bits.from_matmul_fsm)
io.out.bits.from_conv_fsm := Mux(loop_configured, true.B, cmd.bits.from_conv_fsm)
io.out.valid := Mux(loop_configured, unrolled_cmd.valid, cmd.valid && !is_loop_config_cmd && !is_loop_run_cmd)
cmd.ready := Mux(is_loop_cmd, !loop_being_configured.configured, !loop_configured && io.out.ready)
arb.io.out.ready := io.out.ready
// Wire up waiting-for-loads signals
val ex_is_waiting_for_loads = loops(ex.io.loop_id).ex_started && !loops(ex.io.loop_id).ex_completed &&
!(loops(ex.io.loop_id).ld_input_completed && loops(ex.io.loop_id).ld_weights_completed &&
loops(ex.io.loop_id).ld_bias_completed)
ld_bias.io.wait_for_prev_loop := ex_is_waiting_for_loads && ld_bias.io.loop_id =/= ex.io.loop_id
ld_weights.io.wait_for_prev_loop := ex_is_waiting_for_loads && ld_weights.io.loop_id =/= ex.io.loop_id
ld_input.io.wait_for_prev_loop := ex_is_waiting_for_loads && ld_input.io.loop_id =/= ex.io.loop_id
// Wire up overloaded signals
ld_bias.io.rob_overloaded := ld_utilization >= max_lds.U
ld_input.io.rob_overloaded := ld_utilization >= max_lds.U
ld_weights.io.rob_overloaded := ld_utilization >= max_lds.U
ex.io.rob_overloaded := ex_utilization >= max_exs.U
st.io.rob_overloaded := st_utilization >= max_sts.U
// Wire up iterator inputs
ex.io.lda_completed := (ld_input.io.loop_id =/= ex.io.loop_id) || ld_input.io.idle
ex.io.ldb_completed := (ld_weights.io.loop_id =/= ex.io.loop_id) || ld_weights.io.idle
ex.io.ldd_completed := (ld_bias.io.loop_id =/= ex.io.loop_id) || ld_bias.io.idle
st.io.ex_completed := (ex.io.loop_id =/= st.io.loop_id) || ex.io.idle
// Create config registers
when(cmd.valid && is_loop_cmd && !loop_being_configured.configured) {
switch (cmd.bits.cmd.inst.funct) {
is (LOOP_CONV_WS_CONFIG_1) {
loop_being_configured.outer_bounds.out_channels := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.outer_bounds.in_channels := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.outer_bounds.in_row_dim := cmd.bits.cmd.rs1(31, 16)
loop_being_configured.outer_bounds.batch_size := cmd.bits.cmd.rs1(15, 0)
loop_being_configured.outer_bounds.padding := cmd.bits.cmd.rs2(63, 56)
loop_being_configured.outer_bounds.stride := cmd.bits.cmd.rs2(55, 48)
loop_being_configured.outer_bounds.out_col_dim := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.outer_bounds.pool_out_row_dim := cmd.bits.cmd.rs2(31, 16)
loop_being_configured.outer_bounds.out_row_dim := cmd.bits.cmd.rs2(15, 0)
}
is (LOOP_CONV_WS_CONFIG_2) {
loop_being_configured.outer_bounds.kernel_dim := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.outer_bounds.pool_out_col_dim := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.outer_bounds.pool_size := (if (!has_max_pool) 1.U else cmd.bits.cmd.rs1(31, 16))
loop_being_configured.outer_bounds.pool_stride := (if (!has_max_pool) 1.U else cmd.bits.cmd.rs1(15, 8))
loop_being_configured.outer_bounds.pool_padding := (if (!has_max_pool) 0.U else cmd.bits.cmd.rs1(7, 0))
loop_being_configured.inner_bounds.batches := cmd.bits.cmd.rs2(63, 48)
loop_being_configured.inner_bounds.porows := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.inner_bounds.pocols := cmd.bits.cmd.rs2(31, 16)
loop_being_configured.inner_bounds.pochs := cmd.bits.cmd.rs2(15, 0)
}
is (LOOP_CONV_WS_CONFIG_3) {
loop_being_configured.inner_bounds.krows := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.inner_bounds.kcols := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.inner_bounds.kchs := cmd.bits.cmd.rs1(31, 16)
loop_being_configured.inner_bounds.lpad := cmd.bits.cmd.rs1(15, 0)
loop_being_configured.inner_bounds.rpad := cmd.bits.cmd.rs2(63, 48)
loop_being_configured.inner_bounds.upad := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.inner_bounds.dpad := cmd.bits.cmd.rs2(31, 24)
loop_being_configured.inner_bounds.plpad := cmd.bits.cmd.rs2(23, 16)
loop_being_configured.outer_bounds.in_col_dim := cmd.bits.cmd.rs2(15, 0)
}
is (LOOP_CONV_WS_CONFIG_4) {
loop_being_configured.inner_bounds.orows := cmd.bits.cmd.rs1(63, 48)
loop_being_configured.inner_bounds.prad := cmd.bits.cmd.rs1(47, 32)
loop_being_configured.inner_bounds.pupad := cmd.bits.cmd.rs1(31, 21)
loop_being_configured.inner_bounds.pdpad := cmd.bits.cmd.rs1(20, 10)
loop_being_configured.outer_bounds.kernel_dilation := cmd.bits.cmd.rs1(9, 0)
loop_being_configured.inner_bounds.ocols := cmd.bits.cmd.rs2(15, 0)
loop_being_configured.outer_bounds.in_stride := cmd.bits.cmd.rs2(63, 48)
loop_being_configured.outer_bounds.weight_stride := cmd.bits.cmd.rs2(47, 32)
loop_being_configured.outer_bounds.out_stride := cmd.bits.cmd.rs2(31, 16)
}
is (LOOP_CONV_WS_CONFIG_5) {
loop_being_configured.weights_dram_addr := cmd.bits.cmd.rs1
loop_being_configured.output_dram_addr := cmd.bits.cmd.rs2
}
is (LOOP_CONV_WS_CONFIG_6) {
loop_being_configured.bias_dram_addr := cmd.bits.cmd.rs1
loop_being_configured.input_dram_addr := cmd.bits.cmd.rs2
}
is (LOOP_CONV_WS) {
loop_being_configured.no_bias := cmd.bits.cmd.rs1(0)
// TODO we added a default value for max_pixels_per_row just to maintain backwards compatibility. we should deprecate and remove it later
val config_max_pixels_per_row = cmd.bits.cmd.rs1(15, 8)
loop_being_configured.max_pixels_per_row := Mux(
!has_first_layer_optimizations.B || config_max_pixels_per_row === 0.U,
1.U, config_max_pixels_per_row)
loop_being_configured.a_ex_spad_id := cmd.bits.cmd.rs1(19, 18)
loop_being_configured.b_ex_spad_id := cmd.bits.cmd.rs1(17, 16)
loop_being_configured.wrot180 := has_training_convs.B && cmd.bits.cmd.rs1(1)
loop_being_configured.input_dilated := has_training_convs.B && cmd.bits.cmd.rs2(2)
loop_being_configured.trans_output_1203 := has_training_convs.B && cmd.bits.cmd.rs1(2)
loop_being_configured.trans_weight_1203 := has_training_convs.B && cmd.bits.cmd.rs1(3)
loop_being_configured.trans_weight_0132 := has_training_convs.B && cmd.bits.cmd.rs1(4)
loop_being_configured.trans_input_3120 := has_training_convs.B && cmd.bits.cmd.rs1(5)
loop_being_configured.dw := has_dw_convs.B && cmd.bits.cmd.rs1(6)
loop_being_configured.no_pool := !has_max_pool.B || cmd.bits.cmd.rs2(0)
loop_being_configured.activation := cmd.bits.cmd.rs2(4,3)
loop_being_configured.downsample := cmd.bits.cmd.rs2(1)
loop_being_configured.configured := true.B
// assert(!loop_being_configured.input_dilated || loop_being_configured.outer_bounds.stride === 1.U)
// assert(!loop_being_configured.downsample || (loop_being_configured.outer_bounds.kernel_dim === 1.U && loop_being_configured.outer_bounds.stride === 2.U)) // TODO add the rest of the conditions that must be true for "downsample" to be enabled
}
}
}
// Wire up request signals
val ld_bias_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val ex_c_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val st_addr_start = RegInit(0.U(log2Up(max_acc_addr).W))
val loop_requesting_ld_bias_id = Mux(head_loop.ld_bias_started, tail_loop_id, head_loop_id)
val loop_requesting_ld_bias = loops(loop_requesting_ld_bias_id)
ld_bias.io.req.bits.outer_bounds := loop_requesting_ld_bias.outer_bounds
ld_bias.io.req.bits.inner_bounds := loop_requesting_ld_bias.inner_bounds
ld_bias.io.req.bits.derived_params := loop_requesting_ld_bias.derived_params()
ld_bias.io.req.bits.addr_start := ld_bias_addr_start
ld_bias.io.req.bits.dram_addr := loop_requesting_ld_bias.bias_dram_addr
ld_bias.io.req.bits.no_bias := loop_requesting_ld_bias.no_bias
ld_bias.io.req.bits.loop_id := loop_requesting_ld_bias_id
ld_bias.io.req.valid := !loop_requesting_ld_bias.ld_bias_started && loop_requesting_ld_bias.configured
when (ld_bias.io.req.fire) {
loop_requesting_ld_bias.running := true.B
loop_requesting_ld_bias.ld_bias_started := true.B
// when (loop_requesting_ld_bias.bias_dram_addr =/= 0.U) {
when (loop_requesting_ld_bias.output_dram_addr =/= 0.U) {
ld_bias_addr_start := floorAdd(ld_bias_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
val loop_requesting_ld_input_id = Mux(head_loop.ld_input_started, tail_loop_id, head_loop_id)
val loop_requesting_ld_input = loops(loop_requesting_ld_input_id)
ld_input.io.req.bits.outer_bounds := loop_requesting_ld_input.outer_bounds
ld_input.io.req.bits.inner_bounds := loop_requesting_ld_input.inner_bounds
ld_input.io.req.bits.derived_params := loop_requesting_ld_input.derived_params()
ld_input.io.req.bits.addr_start := Mux(loop_requesting_ld_input.a_ex_spad_id === 0.U, loop_requesting_ld_input.a_addr_start, (loop_requesting_ld_input.a_ex_spad_id - 1.U) * (max_addr / concurrent_loops).U)
ld_input.io.req.bits.dram_addr := loop_requesting_ld_input.input_dram_addr
ld_input.io.req.bits.downsample := loop_requesting_ld_input.downsample
ld_input.io.req.bits.max_pixels_per_row := loop_requesting_ld_input.max_pixels_per_row
ld_input.io.req.bits.input_dilated := loop_requesting_ld_input.input_dilated
ld_input.io.req.bits.trans_input_3120 := loop_requesting_ld_input.trans_input_3120
ld_input.io.req.bits.loop_id := loop_requesting_ld_input_id
ld_input.io.req.valid := !loop_requesting_ld_input.ld_input_started && loop_requesting_ld_input.configured
when (ld_input.io.req.fire) {
loop_requesting_ld_input.running := true.B
loop_requesting_ld_input.ld_input_started := true.B
}
val loop_requesting_ld_weights_id = Mux(head_loop.ld_weights_started, tail_loop_id, head_loop_id)
val loop_requesting_ld_weights = loops(loop_requesting_ld_weights_id)
ld_weights.io.req.bits.outer_bounds := loop_requesting_ld_weights.outer_bounds
ld_weights.io.req.bits.inner_bounds := loop_requesting_ld_weights.inner_bounds
ld_weights.io.req.bits.derived_params := loop_requesting_ld_weights.derived_params()
ld_weights.io.req.bits.addr_end := Mux(loop_requesting_ld_weights.b_ex_spad_id === 0.U, loop_requesting_ld_weights.b_addr_end, (loop_requesting_ld_weights.b_ex_spad_id) * (max_addr / concurrent_loops).U)
ld_weights.io.req.bits.dram_addr := loop_requesting_ld_weights.weights_dram_addr
ld_weights.io.req.bits.trans_weight_1203 := loop_requesting_ld_weights.trans_weight_1203
ld_weights.io.req.bits.trans_weight_0132 := loop_requesting_ld_weights.trans_weight_0132
ld_weights.io.req.bits.dw := loop_requesting_ld_weights.dw
ld_weights.io.req.bits.loop_id := loop_requesting_ld_weights_id
ld_weights.io.req.valid := !loop_requesting_ld_weights.ld_weights_started && loop_requesting_ld_weights.configured
when (ld_weights.io.req.fire) {
loop_requesting_ld_weights.running := true.B
loop_requesting_ld_weights.ld_weights_started := true.B
}
val loop_requesting_ex_id = Mux(head_loop.ex_started, tail_loop_id, head_loop_id)
val loop_requesting_ex = loops(loop_requesting_ex_id)
ex.io.req.bits.outer_bounds := loop_requesting_ex.outer_bounds
ex.io.req.bits.inner_bounds := loop_requesting_ex.inner_bounds
ex.io.req.bits.derived_params := loop_requesting_ex.derived_params()
ex.io.req.bits.a_addr_start := Mux(loop_requesting_ex.a_ex_spad_id === 0.U, loop_requesting_ex.a_addr_start, (loop_requesting_ex.a_ex_spad_id - 1.U) * (max_addr / concurrent_loops).U)
ex.io.req.bits.b_addr_end := Mux(loop_requesting_ex.b_ex_spad_id === 0.U, loop_requesting_ex.b_addr_end, (loop_requesting_ex.b_ex_spad_id) * (max_addr / concurrent_loops).U)
ex.io.req.bits.c_addr_start := ex_c_addr_start
ex.io.req.bits.wrot180 := loop_requesting_ex.wrot180
ex.io.req.bits.downsample := loop_requesting_ex.downsample
ex.io.req.bits.max_pixels_per_row := loop_requesting_ex.max_pixels_per_row
ex.io.req.bits.input_dilated := loop_requesting_ex.input_dilated
ex.io.req.bits.trans_weight_0132 := loop_requesting_ex.trans_weight_0132
ex.io.req.bits.trans_input_3120 := loop_requesting_ex.trans_input_3120
ex.io.req.bits.loop_id := loop_requesting_ex_id
ex.io.req.valid := !loop_requesting_ex.ex_started && loop_requesting_ex.ld_bias_started &&
loop_requesting_ex.ld_input_started && loop_requesting_ex.ld_weights_started && loop_requesting_ex.configured
when (ex.io.req.fire) {
loop_requesting_ex.running := true.B
loop_requesting_ex.ex_started := true.B
when (loop_requesting_ex.output_dram_addr =/= 0.U) {
ex_c_addr_start := floorAdd(ex_c_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
val loop_requesting_st_id = Mux(head_loop.st_started, tail_loop_id, head_loop_id)
val loop_requesting_st = loops(loop_requesting_st_id)
st.io.req.bits.outer_bounds := loop_requesting_st.outer_bounds
st.io.req.bits.inner_bounds := loop_requesting_st.inner_bounds
st.io.req.bits.derived_params := loop_requesting_st.derived_params()
st.io.req.bits.addr_start := st_addr_start
st.io.req.bits.dram_addr := loop_requesting_st.output_dram_addr
st.io.req.bits.no_pool := loop_requesting_st.no_pool
st.io.req.bits.activation := loop_requesting_st.activation
st.io.req.bits.trans_output_1203 := loop_requesting_st.trans_output_1203
st.io.req.bits.loop_id := loop_requesting_st_id
st.io.req.valid := !loop_requesting_st.st_started && loop_requesting_st.ex_started && loop_requesting_st.configured
when (st.io.req.fire) {
loop_requesting_st.running := true.B
loop_requesting_st.st_started := true.B
when (loop_requesting_st.output_dram_addr =/= 0.U) {
st_addr_start := floorAdd(st_addr_start, (max_acc_addr / concurrent_loops).U, max_acc_addr.U)
}
}
// Handle completed signals
when (ld_bias.io.idle && loops(ld_bias.io.loop_id).running && loops(ld_bias.io.loop_id).ld_bias_started) {
loops(ld_bias.io.loop_id).ld_bias_completed := true.B
}
when (ld_input.io.idle && loops(ld_input.io.loop_id).running && loops(ld_input.io.loop_id).ld_input_started) {
loops(ld_input.io.loop_id).ld_input_completed := true.B
}
when (ld_weights.io.idle && loops(ld_weights.io.loop_id).running && loops(ld_weights.io.loop_id).ld_weights_started) {
loops(ld_weights.io.loop_id).ld_weights_completed := true.B
}
when (ex.io.idle && loops(ex.io.loop_id).running && loops(ex.io.loop_id).ex_started) {
loops(ex.io.loop_id).ex_completed := true.B
}
when (st.io.idle && loops(st.io.loop_id).running && loops(st.io.loop_id).st_started) {
loops(st.io.loop_id).st_completed := true.B
}
when (head_loop.running && head_loop.all_completed()) {
head_loop.reset()
head_loop_id := ~head_loop_id
}
// Resets
when (reset.asBool) {
loops.zipWithIndex.foreach { case (l, i) =>
l.reset()
l.a_addr_start := (i * (max_addr / concurrent_loops)).U
l.b_addr_end := ((i+1) * (max_addr / concurrent_loops)).U
}
}
}
object LoopConv {
def apply(in: DecoupledIO[GemminiCmd], ld_completed: UInt, st_completed: UInt, ex_completed: UInt,
block_size: Int, coreMaxAddrBits: Int, rob_size: Int, max_lds: Int, max_exs: Int, max_sts: Int,
max_addr: Int, max_acc_addr: Int, input_w: Int, acc_w: Int, dma_max_bytes: Int,
config_mvin_rs1_t: ConfigMvinRs1, mvin_rs2_t: MvinRs2, config_mvout_rs2_t: ConfigMvoutRs2,
mvout_rs2_t: MvoutRs2, config_ex_rs1_t: ConfigExRs1, preload_rs1_t: PreloadRs, preload_rs2_t: PreloadRs,
compute_rs1_t: ComputeRs, compute_rs2_t: ComputeRs, has_training_convs: Boolean, has_max_pool: Boolean,
has_first_layer_optimizations: Boolean, has_dw_convs: Boolean)
(implicit p: Parameters): (DecoupledIO[GemminiCmd], Bool) = {
val mod = Module(new LoopConv(block_size, coreMaxAddrBits, rob_size, max_lds, max_exs, max_sts,
max_addr, max_acc_addr, input_w, acc_w, dma_max_bytes,
config_mvin_rs1_t, mvin_rs2_t, config_mvout_rs2_t, mvout_rs2_t, config_ex_rs1_t, preload_rs1_t, preload_rs2_t,
compute_rs1_t, compute_rs2_t, has_training_convs, has_max_pool, has_first_layer_optimizations, has_dw_convs))
mod.io.in <> in
mod.io.ld_completed := ld_completed
mod.io.st_completed := st_completed
mod.io.ex_completed := ex_completed
(mod.io.out, mod.io.busy)
}
def castDramOffset(dram_offset: UInt): UInt = {
// Cast dram offsets to 32 bits max
dram_offset & "hFFFFFFFF".U
}
}
File LocalAddr.scala:
package gemmini
import chisel3._
import chisel3.util._
class LocalAddr(sp_banks: Int, sp_bank_entries: Int, acc_banks: Int, acc_bank_entries: Int) extends Bundle {
private val localAddrBits = 32 // TODO magic number
private val spAddrBits = log2Ceil(sp_banks * sp_bank_entries)
private val accAddrBits = log2Ceil(acc_banks * acc_bank_entries)
private val maxAddrBits = spAddrBits max accAddrBits
private val spBankBits = log2Up(sp_banks)
private val spBankRowBits = log2Up(sp_bank_entries)
private val accBankBits = log2Up(acc_banks)
val accBankRowBits = log2Up(acc_bank_entries)
val spRows = sp_banks * sp_bank_entries
val is_acc_addr = Bool()
val accumulate = Bool()
val read_full_acc_row = Bool()
val norm_cmd = NormCmd()
private val metadata_w = is_acc_addr.getWidth + accumulate.getWidth + read_full_acc_row.getWidth + norm_cmd.getWidth
assert(maxAddrBits + metadata_w < 32)
val garbage = UInt(((localAddrBits - maxAddrBits - metadata_w - 1) max 0).W)
val garbage_bit = if (localAddrBits - maxAddrBits >= metadata_w + 1) UInt(1.W) else UInt(0.W)
val data = UInt(maxAddrBits.W)
def sp_bank(dummy: Int = 0) = if (spAddrBits == spBankRowBits) 0.U else data(spAddrBits - 1, spBankRowBits)
def sp_row(dummy: Int = 0) = data(spBankRowBits - 1, 0)
def acc_bank(dummy: Int = 0) = if (accAddrBits == accBankRowBits) 0.U else data(accAddrBits - 1, accBankRowBits)
def acc_row(dummy: Int = 0) = data(accBankRowBits - 1, 0)
def full_sp_addr(dummy: Int = 0) = data(spAddrBits - 1, 0)
def full_acc_addr(dummy: Int = 0) = data(accAddrBits - 1, 0)
def is_same_address(other: LocalAddr): Bool = is_acc_addr === other.is_acc_addr && data === other.data
def is_same_address(other: UInt): Bool = is_same_address(other.asTypeOf(this))
def is_garbage(dummy: Int = 0) = is_acc_addr && accumulate && read_full_acc_row && data.andR &&
(if (garbage_bit.getWidth > 0) garbage_bit.asBool else true.B)
def +(other: UInt) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val result = WireInit(this)
result.data := data + other
result
}
def <=(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() <= other.full_acc_addr(), full_sp_addr() <= other.full_sp_addr())
def <(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() < other.full_acc_addr(), full_sp_addr() < other.full_sp_addr())
def >(other: LocalAddr) =
is_acc_addr === other.is_acc_addr &&
Mux(is_acc_addr, full_acc_addr() > other.full_acc_addr(), full_sp_addr() > other.full_sp_addr())
def add_with_overflow(other: UInt): Tuple2[LocalAddr, Bool] = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val sum = data +& other
val overflow = Mux(is_acc_addr, sum(accAddrBits), sum(spAddrBits))
val result = WireInit(this)
result.data := sum(maxAddrBits - 1, 0)
(result, overflow)
}
// This function can only be used with non-accumulator addresses. Returns both new address and underflow
def floorSub(other: UInt, floor: UInt): (LocalAddr, Bool) = {
require(isPow2(sp_bank_entries)) // TODO remove this requirement
require(isPow2(acc_bank_entries)) // TODO remove this requirement
val underflow = data < (floor +& other)
val result = WireInit(this)
result.data := Mux(underflow, floor, data - other)
(result, underflow)
}
def make_this_garbage(dummy: Int = 0): Unit = {
is_acc_addr := true.B
accumulate := true.B
read_full_acc_row := true.B
garbage_bit := 1.U
data := ~(0.U(maxAddrBits.W))
}
}
object LocalAddr {
def cast_to_local_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This convenience function is basically the same as calling "asTypeOf(local_addr_t)". However, this convenience
// function will also cast unnecessary garbage bits to 0, which may help reduce multiplier/adder bitwidths
val result = WireInit(t.asTypeOf(local_addr_t))
if (result.garbage_bit.getWidth > 0) result.garbage := 0.U
result
}
def cast_to_sp_addr[T <: Data](local_addr_t: LocalAddr, t: T): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := false.B
result.accumulate := false.B
result.read_full_acc_row := false.B
// assert(!result.garbage_bit, "cast_to_sp_addr doesn't work on garbage addresses")
result
}
def cast_to_acc_addr[T <: Data](local_addr_t: LocalAddr, t: T, accumulate: Bool, read_full: Bool): LocalAddr = {
// This function is a wrapper around cast_to_local_addr, but it assumes that the input will not be the garbage
// address
val result = WireInit(cast_to_local_addr(local_addr_t, t))
result.is_acc_addr := true.B
result.accumulate := accumulate
result.read_full_acc_row := read_full
// assert(!result.garbage_bit, "cast_to_acc_addr doesn't work on garbage addresses")
result
}
def garbage_addr(local_addr_t: LocalAddr): LocalAddr = {
val result = Wire(chiselTypeOf(local_addr_t))
result := DontCare
result.make_this_garbage()
result
}
}
File Util.scala:
package gemmini
import chisel3._
import chisel3.util._
object Util {
def wrappingAdd(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
if (max == 0) {
0.U
} else {
assert(n <= max.U, "cannot wrapAdd when n is larger than max")
Mux(u >= max.U - n + 1.U && n =/= 0.U, n - (max.U - u) - 1.U, u + n)
}
}
def wrappingAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
assert(n <= max || max === 0.U, "cannot wrapAdd when n is larger than max, unless max is 0")
/*
Mux(!en, u,
Mux (max === 0.U, 0.U,
Mux(u >= max - n + 1.U && n =/= 0.U, n - (max - u) - 1.U, u + n)))
*/
MuxCase(u + n, Seq(
(!en) -> u,
(max === 0.U) -> 0.U,
(u >= max - n + 1.U && n =/= 0.U) -> (n - (max - u) - 1.U)
))
}
def satAdd(u: UInt, v: UInt, max: UInt): UInt = {
Mux(u +& v > max, max, u + v)
}
def floorAdd(u: UInt, n: UInt, max_plus_one: UInt, en: Bool = true.B): UInt = {
val max = max_plus_one - 1.U
MuxCase(u + n, Seq(
(!en) -> u,
((u +& n) > max) -> 0.U
))
}
def sFloorAdd(s: SInt, n: UInt, max_plus_one: SInt, min: SInt, en: Bool = true.B): SInt = {
val max = max_plus_one - 1.S
MuxCase(s + n.zext, Seq(
(!en) -> s,
((s +& n.zext) > max) -> min
))
}
def wrappingSub(u: UInt, n: UInt, max_plus_one: Int): UInt = {
val max = max_plus_one - 1
assert(n <= max.U, "cannot wrapSub when n is larger than max")
Mux(u < n, max.U - (n-u) + 1.U, u - n)
}
def ceilingDivide(numer: Int, denom: Int): Int = {
if (numer % denom == 0) { numer / denom }
else { numer / denom + 1}
}
def closestLowerPowerOf2(u: UInt): UInt = {
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
def closestAlignedLowerPowerOf2(u: UInt, addr: UInt, stride: UInt, rowBytes: Int): UInt = {
val lgRowBytes = log2Ceil(rowBytes)
// TODO figure out a more efficient way of doing this. Is this many muxes really necessary?
val exp = u.asBools.zipWithIndex.map { case (b, i) =>
Mux(b && addr(i + lgRowBytes - 1, 0) === 0.U && stride(i + lgRowBytes - 1, 0) === 0.U, i.U, 0.U)
}.reduce((acc, u) => Mux(acc > u, acc, u))
(1.U << exp).asUInt
}
// This function will return "next" with a 0-cycle delay when the "enable" signal is high. It's like a queue with
// the "pipe" and "flow" parameters set to "true"
def RegEnableThru[T <: Data](next: T, enable: Bool): T = {
val buf = RegEnable(next, enable)
Mux(enable, next, buf)
}
def RegEnableThru[T <: Data](next: T, init: T, enable: Bool): T = {
val buf = RegEnable(next, init, enable)
Mux(enable, next, buf)
}
def maxOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 > u2, u1, u2)
}
def maxOf[T <: Data](x: T, y: T)(implicit ev: Arithmetic[T]): T = {
import ev._
Mux(x > y, x, y)
}
def minOf(u1: UInt, u2: UInt): UInt = {
Mux(u1 < u2, u1, u2)
}
def accumulateTree[T <: Data](xs: Seq[T])(implicit ev: Arithmetic[T]): T = {
import ev._
assert(xs.nonEmpty, "can't accumulate 0 elements")
if (xs.length == 1) {
xs.head
} else {
val upperRowLen = 1 << log2Ceil(xs.length)
val upperRow = xs.padTo(upperRowLen, xs.head.zero)
val pairs = upperRow.grouped(2)
val lowerRow = pairs.map { case Seq(a, b) => a + b }
accumulateTree(lowerRow.toSeq)
}
}
// An undirectioned Valid bundle
class UDValid[T <: Data](t: T) extends Bundle {
val valid = Bool()
val bits = t.cloneType
def push(b: T): Unit = {
valid := true.B
bits := b
}
def pop(dummy: Int = 0): T = {
valid := false.B
bits
}
}
object UDValid {
def apply[T <: Data](t: T): UDValid[T] = new UDValid(t)
}
// creates a Reg and the next-state Wire, and returns both
def regwire(bits: Int) = {
val wire = Wire(UInt(bits.W))
val reg = RegNext(wire)
wire := reg // default wire to read from reg
(reg, wire)
}
}
| module LoopConvLdInput( // @[LoopConv.scala:235:7]
input clock, // @[LoopConv.scala:235:7]
input reset, // @[LoopConv.scala:235:7]
output io_req_ready, // @[LoopConv.scala:241:14]
input io_req_valid, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_batch_size, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_in_row_dim, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_in_col_dim, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_in_channels, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_out_channels, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_out_col_dim, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_out_row_dim, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_out_stride, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_in_stride, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_weight_stride, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_pool_out_row_dim, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_pool_out_col_dim, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_stride, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_padding, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_kernel_dim, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_kernel_dilation, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_pool_size, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_pool_stride, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_outer_bounds_pool_padding, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_batches, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_porows, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_pocols, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_pochs, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_krows, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_kcols, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_kchs, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_lpad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_rpad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_upad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_dpad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_plpad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_prad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_pupad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_pdpad, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_orows, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_inner_bounds_ocols, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_ochs, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_irows, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_icols, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_irows_unpadded, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_icols_unpadded, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_ichs, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_out_channels_per_bank, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_in_channels_per_bank, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_bias_spad_stride, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_input_spad_stride, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_derived_params_weight_spad_stride, // @[LoopConv.scala:241:14]
input [13:0] io_req_bits_addr_start, // @[LoopConv.scala:241:14]
input [39:0] io_req_bits_dram_addr, // @[LoopConv.scala:241:14]
input io_req_bits_downsample, // @[LoopConv.scala:241:14]
input [15:0] io_req_bits_max_pixels_per_row, // @[LoopConv.scala:241:14]
input io_req_bits_input_dilated, // @[LoopConv.scala:241:14]
input io_req_bits_trans_input_3120, // @[LoopConv.scala:241:14]
input io_req_bits_loop_id, // @[LoopConv.scala:241:14]
input io_cmd_ready, // @[LoopConv.scala:241:14]
output io_cmd_valid, // @[LoopConv.scala:241:14]
output [6:0] io_cmd_bits_inst_funct, // @[LoopConv.scala:241:14]
output [4:0] io_cmd_bits_inst_rs2, // @[LoopConv.scala:241:14]
output [4:0] io_cmd_bits_inst_rs1, // @[LoopConv.scala:241:14]
output io_cmd_bits_inst_xd, // @[LoopConv.scala:241:14]
output io_cmd_bits_inst_xs1, // @[LoopConv.scala:241:14]
output io_cmd_bits_inst_xs2, // @[LoopConv.scala:241:14]
output [4:0] io_cmd_bits_inst_rd, // @[LoopConv.scala:241:14]
output [6:0] io_cmd_bits_inst_opcode, // @[LoopConv.scala:241:14]
output [63:0] io_cmd_bits_rs1, // @[LoopConv.scala:241:14]
output [63:0] io_cmd_bits_rs2, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_debug, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_cease, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_wfi, // @[LoopConv.scala:241:14]
output [31:0] io_cmd_bits_status_isa, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_dprv, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_dv, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_prv, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_v, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_sd, // @[LoopConv.scala:241:14]
output [22:0] io_cmd_bits_status_zero2, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_mpv, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_gva, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_mbe, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_sbe, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_sxl, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_uxl, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_sd_rv32, // @[LoopConv.scala:241:14]
output [7:0] io_cmd_bits_status_zero1, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_tsr, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_tw, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_tvm, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_mxr, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_sum, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_mprv, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_xs, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_fs, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_mpp, // @[LoopConv.scala:241:14]
output [1:0] io_cmd_bits_status_vs, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_spp, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_mpie, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_ube, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_spie, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_upie, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_mie, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_hie, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_sie, // @[LoopConv.scala:241:14]
output io_cmd_bits_status_uie, // @[LoopConv.scala:241:14]
output io_idle, // @[LoopConv.scala:241:14]
input io_rob_overloaded, // @[LoopConv.scala:241:14]
input io_wait_for_prev_loop, // @[LoopConv.scala:241:14]
output io_loop_id // @[LoopConv.scala:241:14]
);
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:37]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:37]
wire [13:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:37]
wire [4:0] mvin_cmd_rs2_num_cols; // @[LoopConv.scala:355:28]
wire [2:0] mvin_cmd_rs2_local_addr_norm_cmd; // @[LoopConv.scala:355:28]
wire _command_p_io_in_ready; // @[LoopConv.scala:313:25]
wire _command_p_io_out_valid; // @[LoopConv.scala:313:25]
wire [6:0] _command_p_io_out_bits_cmd_inst_funct; // @[LoopConv.scala:313:25]
wire [63:0] _command_p_io_out_bits_cmd_rs1; // @[LoopConv.scala:313:25]
wire [63:0] _command_p_io_out_bits_cmd_rs2; // @[LoopConv.scala:313:25]
wire [69:0] _command_p_io_out_bits_dram_addr; // @[LoopConv.scala:313:25]
wire [19:0] _command_p_io_out_bits_I; // @[LoopConv.scala:313:25]
wire _command_p_io_busy; // @[LoopConv.scala:313:25]
wire io_req_valid_0 = io_req_valid; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_batch_size_0 = io_req_bits_outer_bounds_batch_size; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_in_row_dim_0 = io_req_bits_outer_bounds_in_row_dim; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_in_col_dim_0 = io_req_bits_outer_bounds_in_col_dim; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_in_channels_0 = io_req_bits_outer_bounds_in_channels; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_out_channels_0 = io_req_bits_outer_bounds_out_channels; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_out_col_dim_0 = io_req_bits_outer_bounds_out_col_dim; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_out_row_dim_0 = io_req_bits_outer_bounds_out_row_dim; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_out_stride_0 = io_req_bits_outer_bounds_out_stride; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_in_stride_0 = io_req_bits_outer_bounds_in_stride; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_weight_stride_0 = io_req_bits_outer_bounds_weight_stride; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_pool_out_row_dim_0 = io_req_bits_outer_bounds_pool_out_row_dim; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_pool_out_col_dim_0 = io_req_bits_outer_bounds_pool_out_col_dim; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_stride_0 = io_req_bits_outer_bounds_stride; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_padding_0 = io_req_bits_outer_bounds_padding; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_kernel_dim_0 = io_req_bits_outer_bounds_kernel_dim; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_kernel_dilation_0 = io_req_bits_outer_bounds_kernel_dilation; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_pool_size_0 = io_req_bits_outer_bounds_pool_size; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_pool_stride_0 = io_req_bits_outer_bounds_pool_stride; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_outer_bounds_pool_padding_0 = io_req_bits_outer_bounds_pool_padding; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_batches_0 = io_req_bits_inner_bounds_batches; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_porows_0 = io_req_bits_inner_bounds_porows; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_pocols_0 = io_req_bits_inner_bounds_pocols; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_pochs_0 = io_req_bits_inner_bounds_pochs; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_krows_0 = io_req_bits_inner_bounds_krows; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_kcols_0 = io_req_bits_inner_bounds_kcols; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_kchs_0 = io_req_bits_inner_bounds_kchs; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_lpad_0 = io_req_bits_inner_bounds_lpad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_rpad_0 = io_req_bits_inner_bounds_rpad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_upad_0 = io_req_bits_inner_bounds_upad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_dpad_0 = io_req_bits_inner_bounds_dpad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_plpad_0 = io_req_bits_inner_bounds_plpad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_prad_0 = io_req_bits_inner_bounds_prad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_pupad_0 = io_req_bits_inner_bounds_pupad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_pdpad_0 = io_req_bits_inner_bounds_pdpad; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_orows_0 = io_req_bits_inner_bounds_orows; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_inner_bounds_ocols_0 = io_req_bits_inner_bounds_ocols; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_ochs_0 = io_req_bits_derived_params_ochs; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_irows_0 = io_req_bits_derived_params_irows; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_icols_0 = io_req_bits_derived_params_icols; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_irows_unpadded_0 = io_req_bits_derived_params_irows_unpadded; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_icols_unpadded_0 = io_req_bits_derived_params_icols_unpadded; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_ichs_0 = io_req_bits_derived_params_ichs; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_out_channels_per_bank_0 = io_req_bits_derived_params_out_channels_per_bank; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_in_channels_per_bank_0 = io_req_bits_derived_params_in_channels_per_bank; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_bias_spad_stride_0 = io_req_bits_derived_params_bias_spad_stride; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_input_spad_stride_0 = io_req_bits_derived_params_input_spad_stride; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_derived_params_weight_spad_stride_0 = io_req_bits_derived_params_weight_spad_stride; // @[LoopConv.scala:235:7]
wire [13:0] io_req_bits_addr_start_0 = io_req_bits_addr_start; // @[LoopConv.scala:235:7]
wire [39:0] io_req_bits_dram_addr_0 = io_req_bits_dram_addr; // @[LoopConv.scala:235:7]
wire io_req_bits_downsample_0 = io_req_bits_downsample; // @[LoopConv.scala:235:7]
wire [15:0] io_req_bits_max_pixels_per_row_0 = io_req_bits_max_pixels_per_row; // @[LoopConv.scala:235:7]
wire io_req_bits_input_dilated_0 = io_req_bits_input_dilated; // @[LoopConv.scala:235:7]
wire io_req_bits_trans_input_3120_0 = io_req_bits_trans_input_3120; // @[LoopConv.scala:235:7]
wire io_req_bits_loop_id_0 = io_req_bits_loop_id; // @[LoopConv.scala:235:7]
wire io_cmd_ready_0 = io_cmd_ready; // @[LoopConv.scala:235:7]
wire io_rob_overloaded_0 = io_rob_overloaded; // @[LoopConv.scala:235:7]
wire io_wait_for_prev_loop_0 = io_wait_for_prev_loop; // @[LoopConv.scala:235:7]
wire [2:0] config_cmd_rs1__spacer0 = 3'h0; // @[LoopConv.scala:319:28]
wire [1:0] config_cmd_rs1__unused = 2'h1; // @[LoopConv.scala:319:28, :343:41]
wire [2:0] config_cmd_rs1_lo_lo = 3'h1; // @[LoopConv.scala:327:36]
wire [31:0] config_cmd_rs1_scale = 32'h3F800000; // @[LoopConv.scala:319:28]
wire [31:0] config_cmd_rs1_hi_hi_hi = 32'h3F800000; // @[LoopConv.scala:327:36]
wire [33:0] config_cmd_rs1_hi_hi = 34'hFE000000; // @[LoopConv.scala:327:36]
wire [6:0] mvin_cmd_inst_funct = 7'h2; // @[LoopConv.scala:331:22, :352:46]
wire [63:0] mvin_cmd_rs1 = 64'h0; // @[LoopConv.scala:331:22]
wire [63:0] mvin_cmd_rs2 = 64'h0; // @[LoopConv.scala:331:22]
wire [4:0] config_cmd_inst_rs2 = 5'h0; // @[LoopConv.scala:315:24]
wire [4:0] config_cmd_inst_rs1 = 5'h0; // @[LoopConv.scala:315:24]
wire [4:0] config_cmd_inst_rd = 5'h0; // @[LoopConv.scala:315:24]
wire [4:0] config_cmd_rs1__spacer1 = 5'h0; // @[LoopConv.scala:319:28]
wire [4:0] mvin_cmd_inst_rs2 = 5'h0; // @[LoopConv.scala:331:22]
wire [4:0] mvin_cmd_inst_rs1 = 5'h0; // @[LoopConv.scala:331:22]
wire [4:0] mvin_cmd_inst_rd = 5'h0; // @[LoopConv.scala:331:22]
wire [4:0] _command_p_io_in_bits_cmd_T_1_inst_rs2 = 5'h0; // @[LoopConv.scala:343:34]
wire [4:0] _command_p_io_in_bits_cmd_T_1_inst_rs1 = 5'h0; // @[LoopConv.scala:343:34]
wire [4:0] _command_p_io_in_bits_cmd_T_1_inst_rd = 5'h0; // @[LoopConv.scala:343:34]
wire [6:0] config_cmd_inst_funct = 7'h0; // @[LoopConv.scala:315:24]
wire [6:0] config_cmd_inst_opcode = 7'h0; // @[LoopConv.scala:315:24]
wire [6:0] mvin_cmd_inst_opcode = 7'h0; // @[LoopConv.scala:331:22]
wire [6:0] _command_p_io_in_bits_cmd_T_1_inst_opcode = 7'h0; // @[LoopConv.scala:343:34]
wire [31:0] config_cmd_status_isa = 32'h0; // @[LoopConv.scala:315:24]
wire [31:0] mvin_cmd_status_isa = 32'h0; // @[LoopConv.scala:331:22]
wire [31:0] _command_p_io_in_bits_cmd_T_1_status_isa = 32'h0; // @[LoopConv.scala:343:34]
wire [22:0] config_cmd_status_zero2 = 23'h0; // @[LoopConv.scala:315:24]
wire [22:0] mvin_cmd_status_zero2 = 23'h0; // @[LoopConv.scala:331:22]
wire [22:0] _command_p_io_in_bits_cmd_T_1_status_zero2 = 23'h0; // @[LoopConv.scala:343:34]
wire [7:0] config_cmd_status_zero1 = 8'h0; // @[LoopConv.scala:315:24]
wire [7:0] mvin_cmd_status_zero1 = 8'h0; // @[LoopConv.scala:331:22]
wire [7:0] _command_p_io_in_bits_cmd_T_1_status_zero1 = 8'h0; // @[LoopConv.scala:343:34]
wire [12:0] mvin_cmd_rs2__spacer2 = 13'h0; // @[LoopConv.scala:355:28]
wire [10:0] mvin_cmd_rs2__spacer1 = 11'h0; // @[LoopConv.scala:355:28]
wire [10:0] mvin_cmd_rs2_local_addr_garbage = 11'h0; // @[LoopConv.scala:355:28]
wire [10:0] mvin_cmd_rs2_local_addr_result_result_garbage = 11'h0; // @[LocalAddr.scala:108:26]
wire [10:0] mvin_cmd_rs2_local_addr_result_garbage = 11'h0; // @[LocalAddr.scala:116:26]
wire [1:0] config_cmd_status_dprv = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_status_prv = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_status_sxl = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_status_uxl = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_status_xs = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_status_fs = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_status_mpp = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_status_vs = 2'h0; // @[LoopConv.scala:315:24]
wire [1:0] config_cmd_rs1__spacer2 = 2'h0; // @[LoopConv.scala:319:28]
wire [1:0] config_cmd_rs1_state_id = 2'h0; // @[LoopConv.scala:319:28]
wire [1:0] mvin_cmd_status_dprv = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] mvin_cmd_status_prv = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] mvin_cmd_status_sxl = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] mvin_cmd_status_uxl = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] mvin_cmd_status_xs = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] mvin_cmd_status_fs = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] mvin_cmd_status_mpp = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] mvin_cmd_status_vs = 2'h0; // @[LoopConv.scala:331:22]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_dprv = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_prv = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_sxl = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_uxl = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_xs = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_fs = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_mpp = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] _command_p_io_in_bits_cmd_T_1_status_vs = 2'h0; // @[LoopConv.scala:343:34]
wire [1:0] io_cmd_bits_rs2_hi_hi = 2'h0; // @[LoopConv.scala:360:37]
wire config_cmd_inst_xd = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_inst_xs1 = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_inst_xs2 = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_debug = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_cease = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_wfi = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_dv = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_v = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_sd = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_mpv = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_gva = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_mbe = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_sbe = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_sd_rv32 = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_tsr = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_tw = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_tvm = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_mxr = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_sum = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_mprv = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_spp = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_mpie = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_ube = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_spie = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_upie = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_mie = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_hie = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_sie = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_status_uie = 1'h0; // @[LoopConv.scala:315:24]
wire config_cmd_rs1_shrink = 1'h0; // @[LoopConv.scala:319:28]
wire mvin_cmd_inst_xd = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_inst_xs1 = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_inst_xs2 = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_debug = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_cease = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_wfi = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_dv = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_v = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_sd = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_mpv = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_gva = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_mbe = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_sbe = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_sd_rv32 = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_tsr = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_tw = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_tvm = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_mxr = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_sum = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_mprv = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_spp = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_mpie = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_ube = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_spie = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_upie = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_mie = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_hie = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_sie = 1'h0; // @[LoopConv.scala:331:22]
wire mvin_cmd_status_uie = 1'h0; // @[LoopConv.scala:331:22]
wire _io_req_ready_T_2; // @[LoopConv.scala:338:34]
wire _command_p_io_in_bits_cmd_T_1_inst_xd = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_inst_xs1 = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_inst_xs2 = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_debug = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_cease = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_wfi = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_dv = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_v = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_sd = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_mpv = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_gva = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_mbe = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_sbe = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_sd_rv32 = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_tsr = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_tw = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_tvm = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_mxr = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_sum = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_mprv = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_spp = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_mpie = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_ube = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_spie = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_upie = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_mie = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_hie = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_sie = 1'h0; // @[LoopConv.scala:343:34]
wire _command_p_io_in_bits_cmd_T_1_status_uie = 1'h0; // @[LoopConv.scala:343:34]
wire mvin_cmd_rs2_local_addr_is_acc_addr = 1'h0; // @[LoopConv.scala:355:28]
wire mvin_cmd_rs2_local_addr_accumulate = 1'h0; // @[LoopConv.scala:355:28]
wire mvin_cmd_rs2_local_addr_read_full_acc_row = 1'h0; // @[LoopConv.scala:355:28]
wire mvin_cmd_rs2_local_addr_result_is_acc_addr = 1'h0; // @[LocalAddr.scala:116:26]
wire mvin_cmd_rs2_local_addr_result_accumulate = 1'h0; // @[LocalAddr.scala:116:26]
wire mvin_cmd_rs2_local_addr_result_read_full_acc_row = 1'h0; // @[LocalAddr.scala:116:26]
wire _next_ich_T_5 = 1'h0; // @[Util.scala:51:8]
wire _io_cmd_valid_T_1; // @[LoopConv.scala:350:42]
wire _io_idle_T_2; // @[LoopConv.scala:339:29]
wire io_req_ready_0; // @[LoopConv.scala:235:7]
wire [6:0] io_cmd_bits_inst_funct_0; // @[LoopConv.scala:235:7]
wire [4:0] io_cmd_bits_inst_rs2_0; // @[LoopConv.scala:235:7]
wire [4:0] io_cmd_bits_inst_rs1_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_inst_xd_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_inst_xs1_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_inst_xs2_0; // @[LoopConv.scala:235:7]
wire [4:0] io_cmd_bits_inst_rd_0; // @[LoopConv.scala:235:7]
wire [6:0] io_cmd_bits_inst_opcode_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_debug_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_cease_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_wfi_0; // @[LoopConv.scala:235:7]
wire [31:0] io_cmd_bits_status_isa_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_dprv_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_dv_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_prv_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_v_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_sd_0; // @[LoopConv.scala:235:7]
wire [22:0] io_cmd_bits_status_zero2_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_mpv_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_gva_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_mbe_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_sbe_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_sxl_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_uxl_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_sd_rv32_0; // @[LoopConv.scala:235:7]
wire [7:0] io_cmd_bits_status_zero1_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_tsr_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_tw_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_tvm_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_mxr_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_sum_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_mprv_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_xs_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_fs_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_mpp_0; // @[LoopConv.scala:235:7]
wire [1:0] io_cmd_bits_status_vs_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_spp_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_mpie_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_ube_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_spie_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_upie_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_mie_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_hie_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_sie_0; // @[LoopConv.scala:235:7]
wire io_cmd_bits_status_uie_0; // @[LoopConv.scala:235:7]
wire [63:0] io_cmd_bits_rs1_0; // @[LoopConv.scala:235:7]
wire [63:0] io_cmd_bits_rs2_0; // @[LoopConv.scala:235:7]
wire io_cmd_valid_0; // @[LoopConv.scala:235:7]
wire io_idle_0; // @[LoopConv.scala:235:7]
wire io_loop_id_0; // @[LoopConv.scala:235:7]
reg [1:0] state; // @[LoopConv.scala:256:22]
reg [15:0] req_outer_bounds_batch_size; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_in_row_dim; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_in_col_dim; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_in_channels; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_out_channels; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_out_col_dim; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_out_row_dim; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_out_stride; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_in_stride; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_weight_stride; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_pool_out_row_dim; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_pool_out_col_dim; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_stride; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_padding; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_kernel_dim; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_kernel_dilation; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_pool_size; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_pool_stride; // @[LoopConv.scala:258:16]
reg [15:0] req_outer_bounds_pool_padding; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_batches; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_porows; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_pocols; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_pochs; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_krows; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_kcols; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_kchs; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_lpad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_rpad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_upad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_dpad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_plpad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_prad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_pupad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_pdpad; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_orows; // @[LoopConv.scala:258:16]
reg [15:0] req_inner_bounds_ocols; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_ochs; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_irows; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_icols; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_irows_unpadded; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_icols_unpadded; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_ichs; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_out_channels_per_bank; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_in_channels_per_bank; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_bias_spad_stride; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_input_spad_stride; // @[LoopConv.scala:258:16]
reg [15:0] req_derived_params_weight_spad_stride; // @[LoopConv.scala:258:16]
reg [13:0] req_addr_start; // @[LoopConv.scala:258:16]
reg [39:0] req_dram_addr; // @[LoopConv.scala:258:16]
reg req_downsample; // @[LoopConv.scala:258:16]
reg [15:0] req_max_pixels_per_row; // @[LoopConv.scala:258:16]
reg req_input_dilated; // @[LoopConv.scala:258:16]
reg req_trans_input_3120; // @[LoopConv.scala:258:16]
reg req_loop_id; // @[LoopConv.scala:258:16]
assign io_loop_id_0 = req_loop_id; // @[LoopConv.scala:235:7, :258:16]
wire _max_ichs_per_mvin_T = req_derived_params_ichs < 16'h10; // @[LoopConv.scala:258:16, :266:36]
wire [15:0] _max_ichs_per_mvin_T_1 = _max_ichs_per_mvin_T ? req_derived_params_ichs : 16'h10; // @[LoopConv.scala:258:16, :266:{30,36}]
wire [16:0] max_ichs_per_mvin = {1'h0, _max_ichs_per_mvin_T_1}; // @[LoopConv.scala:266:{30,108}]
wire _max_batches_per_mvin_T = req_inner_bounds_batches < 16'h10; // @[LoopConv.scala:258:16, :266:36, :267:42]
wire [15:0] _max_batches_per_mvin_T_1 = _max_batches_per_mvin_T ? req_inner_bounds_batches : 16'h10; // @[LoopConv.scala:258:16, :266:36, :267:{33,42}]
wire [16:0] max_batches_per_mvin = {1'h0, _max_batches_per_mvin_T_1}; // @[LoopConv.scala:267:{33,117}]
wire [16:0] max_chs_per_mvin = req_trans_input_3120 ? max_batches_per_mvin : max_ichs_per_mvin; // @[LoopConv.scala:258:16, :266:108, :267:117, :268:29]
wire [16:0] _b_it_T = max_chs_per_mvin; // @[LoopConv.scala:268:29, :370:61]
wire [16:0] _ich_it_T = max_chs_per_mvin; // @[LoopConv.scala:268:29, :371:68]
reg [15:0] b; // @[LoopConv.scala:271:14]
reg [15:0] irow; // @[LoopConv.scala:272:17]
reg [15:0] icol; // @[LoopConv.scala:273:17]
reg [15:0] ich; // @[LoopConv.scala:274:16]
wire [16:0] _GEN = {16'h0, req_input_dilated}; // @[LoopConv.scala:258:16, :263:37, :279:23]
wire [16:0] _GEN_0 = {1'h0, req_inner_bounds_upad} + _GEN; // @[LoopConv.scala:258:16, :263:37]
wire [16:0] _irow_padded_T; // @[LoopConv.scala:263:37]
assign _irow_padded_T = _GEN_0; // @[LoopConv.scala:263:37]
wire [16:0] _next_irow_T_5; // @[LoopConv.scala:263:37]
assign _next_irow_T_5 = _GEN_0; // @[LoopConv.scala:263:37]
wire [16:0] _next_b_T_1; // @[LoopConv.scala:263:37]
assign _next_b_T_1 = _GEN_0; // @[LoopConv.scala:263:37]
wire [16:0] _state_T_1; // @[LoopConv.scala:263:37]
assign _state_T_1 = _GEN_0; // @[LoopConv.scala:263:37]
wire [16:0] _irow_padded_T_1 = _irow_padded_T >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _irow_padded_T_2 = {1'h0, _irow_padded_T_1}; // @[LoopConv.scala:263:59, :277:45]
wire [18:0] _GEN_1 = {{3{irow[15]}}, irow}; // @[LoopConv.scala:272:17, :277:26]
wire [18:0] irow_padded = _GEN_1 + {_irow_padded_T_2[17], _irow_padded_T_2}; // @[LoopConv.scala:277:{26,45}]
wire [16:0] _GEN_2 = {1'h0, req_inner_bounds_lpad} + _GEN; // @[LoopConv.scala:258:16, :263:37]
wire [16:0] _icol_padded_T; // @[LoopConv.scala:263:37]
assign _icol_padded_T = _GEN_2; // @[LoopConv.scala:263:37]
wire [16:0] _next_icol_T_5; // @[LoopConv.scala:263:37]
assign _next_icol_T_5 = _GEN_2; // @[LoopConv.scala:263:37]
wire [16:0] _next_irow_T_9; // @[LoopConv.scala:263:37]
assign _next_irow_T_9 = _GEN_2; // @[LoopConv.scala:263:37]
wire [16:0] _next_b_T_6; // @[LoopConv.scala:263:37]
assign _next_b_T_6 = _GEN_2; // @[LoopConv.scala:263:37]
wire [16:0] _state_T_7; // @[LoopConv.scala:263:37]
assign _state_T_7 = _GEN_2; // @[LoopConv.scala:263:37]
wire [16:0] _icol_padded_T_1 = _icol_padded_T >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _icol_padded_T_2 = {1'h0, _icol_padded_T_1}; // @[LoopConv.scala:263:59, :278:45]
wire [18:0] icol_padded = {{3{icol[15]}}, icol} + {_icol_padded_T_2[17], _icol_padded_T_2}; // @[LoopConv.scala:273:17, :278:{26,45}]
wire _is_zeros_T = $signed(irow) < 16'sh0; // @[LoopConv.scala:272:17, :279:23]
wire [16:0] _is_zeros_T_1 = {1'h0, req_derived_params_irows_unpadded}; // @[LoopConv.scala:258:16, :279:55]
wire [16:0] _GEN_3 = {irow[15], irow}; // @[LoopConv.scala:272:17, :277:26, :279:37]
wire _is_zeros_T_2 = $signed(_GEN_3) >= $signed(_is_zeros_T_1); // @[LoopConv.scala:279:{37,55}]
wire _is_zeros_T_3 = _is_zeros_T | _is_zeros_T_2; // @[LoopConv.scala:279:{23,29,37}]
wire _GEN_4 = $signed(icol) < 16'sh0; // @[LoopConv.scala:273:17, :279:{23,68}]
wire _is_zeros_T_4; // @[LoopConv.scala:279:68]
assign _is_zeros_T_4 = _GEN_4; // @[LoopConv.scala:279:68]
wire _I_T_6; // @[LoopConv.scala:298:13]
assign _I_T_6 = _GEN_4; // @[LoopConv.scala:279:68, :298:13]
wire _is_zeros_T_5 = _is_zeros_T_3 | _is_zeros_T_4; // @[LoopConv.scala:279:{29,60,68}]
wire [16:0] _GEN_5 = {1'h0, req_derived_params_icols_unpadded}; // @[LoopConv.scala:258:16, :279:100]
wire [16:0] _is_zeros_T_6; // @[LoopConv.scala:279:100]
assign _is_zeros_T_6 = _GEN_5; // @[LoopConv.scala:279:100]
wire [16:0] _I_T; // @[LoopConv.scala:296:24]
assign _I_T = _GEN_5; // @[LoopConv.scala:279:100, :296:24]
wire [16:0] _I_T_3; // @[LoopConv.scala:296:102]
assign _I_T_3 = _GEN_5; // @[LoopConv.scala:279:100, :296:102]
wire [16:0] _I_T_11; // @[LoopConv.scala:299:31]
assign _I_T_11 = _GEN_5; // @[LoopConv.scala:279:100, :299:31]
wire [16:0] _I_T_13; // @[LoopConv.scala:299:59]
assign _I_T_13 = _GEN_5; // @[LoopConv.scala:279:100, :299:59]
wire [16:0] _I_T_20; // @[LoopConv.scala:299:141]
assign _I_T_20 = _GEN_5; // @[LoopConv.scala:279:100, :299:141]
wire [16:0] _GEN_6 = {icol[15], icol}; // @[LoopConv.scala:273:17, :278:26, :279:82]
wire _is_zeros_T_7 = $signed(_GEN_6) >= $signed(_is_zeros_T_6); // @[LoopConv.scala:279:{82,100}]
wire is_zeros = _is_zeros_T_5 | _is_zeros_T_7; // @[LoopConv.scala:279:{60,74,82}]
wire [18:0] _dram_stride_T = {1'h0, req_outer_bounds_batch_size, 2'h0}; // @[LoopConv.scala:258:16, :281:58]
wire [18:0] _dram_stride_T_1 = {1'h0, req_outer_bounds_in_stride, 2'h0}; // @[LoopConv.scala:258:16, :281:85]
wire [18:0] dram_stride = req_trans_input_3120 ? _dram_stride_T : _dram_stride_T_1; // @[LoopConv.scala:258:16, :281:{24,58,85}]
wire [16:0] _GEN_7 = {1'h0, req_outer_bounds_in_col_dim}; // @[LoopConv.scala:258:16, :284:54]
wire [16:0] _dram_offset_T; // @[LoopConv.scala:284:54]
assign _dram_offset_T = _GEN_7; // @[LoopConv.scala:284:54]
wire [16:0] _dram_offset_T_8; // @[LoopConv.scala:284:87]
assign _dram_offset_T_8 = _GEN_7; // @[LoopConv.scala:284:{54,87}]
wire [16:0] _dram_offset_T_27; // @[LoopConv.scala:285:23]
assign _dram_offset_T_27 = _GEN_7; // @[LoopConv.scala:284:54, :285:23]
wire [16:0] _dram_offset_T_31; // @[LoopConv.scala:285:43]
assign _dram_offset_T_31 = _GEN_7; // @[LoopConv.scala:284:54, :285:43]
wire [32:0] _GEN_8 = {{17{ich[15]}}, ich}; // @[LoopConv.scala:274:16, :284:54]
wire [32:0] _dram_offset_T_1 = _GEN_8 * {{16{_dram_offset_T[16]}}, _dram_offset_T}; // @[LoopConv.scala:284:54]
wire [31:0] _dram_offset_T_2 = _dram_offset_T_1[31:0]; // @[LoopConv.scala:284:54]
wire [31:0] _dram_offset_T_3 = _dram_offset_T_2; // @[LoopConv.scala:284:54]
wire [16:0] _GEN_9 = {1'h0, req_outer_bounds_in_row_dim}; // @[LoopConv.scala:258:16, :284:67]
wire [16:0] _dram_offset_T_4; // @[LoopConv.scala:284:67]
assign _dram_offset_T_4 = _GEN_9; // @[LoopConv.scala:284:67]
wire [16:0] _dram_offset_T_23; // @[LoopConv.scala:285:10]
assign _dram_offset_T_23 = _GEN_9; // @[LoopConv.scala:284:67, :285:10]
wire [48:0] _dram_offset_T_5 = {{17{_dram_offset_T_3[31]}}, _dram_offset_T_3} * {{32{_dram_offset_T_4[16]}}, _dram_offset_T_4}; // @[LoopConv.scala:284:{54,67}]
wire [47:0] _dram_offset_T_6 = _dram_offset_T_5[47:0]; // @[LoopConv.scala:284:67]
wire [47:0] _dram_offset_T_7 = _dram_offset_T_6; // @[LoopConv.scala:284:67]
wire [32:0] _GEN_10 = {{17{irow[15]}}, irow}; // @[LoopConv.scala:272:17, :277:26, :284:87]
wire [32:0] _dram_offset_T_9 = _GEN_10 * {{16{_dram_offset_T_8[16]}}, _dram_offset_T_8}; // @[LoopConv.scala:284:87]
wire [31:0] _dram_offset_T_10 = _dram_offset_T_9[31:0]; // @[LoopConv.scala:284:87]
wire [31:0] _dram_offset_T_11 = _dram_offset_T_10; // @[LoopConv.scala:284:87]
wire [48:0] _dram_offset_T_12 = {_dram_offset_T_7[47], _dram_offset_T_7} + {{17{_dram_offset_T_11[31]}}, _dram_offset_T_11}; // @[LoopConv.scala:284:{67,80,87}]
wire [49:0] _GEN_11 = {{34{icol[15]}}, icol}; // @[LoopConv.scala:273:17, :278:26, :284:99]
wire [49:0] _dram_offset_T_13 = {_dram_offset_T_12[48], _dram_offset_T_12} + _GEN_11; // @[LoopConv.scala:284:{80,99}]
wire [16:0] _GEN_12 = {1'h0, req_inner_bounds_batches}; // @[LoopConv.scala:258:16, :284:108]
wire [16:0] _dram_offset_T_14; // @[LoopConv.scala:284:108]
assign _dram_offset_T_14 = _GEN_12; // @[LoopConv.scala:284:108]
wire [16:0] _K_T; // @[LoopConv.scala:303:17]
assign _K_T = _GEN_12; // @[LoopConv.scala:284:108, :303:17]
wire [16:0] _K_T_3; // @[LoopConv.scala:303:73]
assign _K_T_3 = _GEN_12; // @[LoopConv.scala:284:108, :303:73]
wire [16:0] _next_b_T; // @[LoopConv.scala:378:47]
assign _next_b_T = _GEN_12; // @[LoopConv.scala:284:108, :378:47]
wire [66:0] _dram_offset_T_15 = {{17{_dram_offset_T_13[49]}}, _dram_offset_T_13} * {{50{_dram_offset_T_14[16]}}, _dram_offset_T_14}; // @[LoopConv.scala:284:{99,108}]
wire [65:0] _dram_offset_T_16 = _dram_offset_T_15[65:0]; // @[LoopConv.scala:284:108]
wire [65:0] _dram_offset_T_17 = _dram_offset_T_16; // @[LoopConv.scala:284:108]
wire [66:0] _dram_offset_T_18 = {_dram_offset_T_17[65], _dram_offset_T_17} + {{51{b[15]}}, b}; // @[LoopConv.scala:271:14, :284:{108,118}]
wire [70:0] _dram_offset_T_19 = {{2{_dram_offset_T_18[66]}}, _dram_offset_T_18, 2'h0}; // @[LoopConv.scala:284:{118,124}]
wire [69:0] _dram_offset_T_20 = _dram_offset_T_19[69:0]; // @[LoopConv.scala:284:124]
wire [69:0] _dram_offset_T_21 = _dram_offset_T_20; // @[LoopConv.scala:284:124]
wire [69:0] _dram_offset_T_22 = _dram_offset_T_21; // @[LoopConv.scala:284:{124,141}]
wire [32:0] _GEN_13 = {{17{b[15]}}, b}; // @[LoopConv.scala:271:14, :284:118, :285:10]
wire [32:0] _dram_offset_T_24 = _GEN_13 * {{16{_dram_offset_T_23[16]}}, _dram_offset_T_23}; // @[LoopConv.scala:285:10]
wire [31:0] _dram_offset_T_25 = _dram_offset_T_24[31:0]; // @[LoopConv.scala:285:10]
wire [31:0] _dram_offset_T_26 = _dram_offset_T_25; // @[LoopConv.scala:285:10]
wire [48:0] _dram_offset_T_28 = {{17{_dram_offset_T_26[31]}}, _dram_offset_T_26} * {{32{_dram_offset_T_27[16]}}, _dram_offset_T_27}; // @[LoopConv.scala:285:{10,23}]
wire [47:0] _dram_offset_T_29 = _dram_offset_T_28[47:0]; // @[LoopConv.scala:285:23]
wire [47:0] _dram_offset_T_30 = _dram_offset_T_29; // @[LoopConv.scala:285:23]
wire [32:0] _dram_offset_T_32 = _GEN_10 * {{16{_dram_offset_T_31[16]}}, _dram_offset_T_31}; // @[LoopConv.scala:284:87, :285:43]
wire [31:0] _dram_offset_T_33 = _dram_offset_T_32[31:0]; // @[LoopConv.scala:285:43]
wire [31:0] _dram_offset_T_34 = _dram_offset_T_33; // @[LoopConv.scala:285:43]
wire [48:0] _dram_offset_T_35 = {_dram_offset_T_30[47], _dram_offset_T_30} + {{17{_dram_offset_T_34[31]}}, _dram_offset_T_34}; // @[LoopConv.scala:285:{23,36,43}]
wire [49:0] _dram_offset_T_36 = {_dram_offset_T_35[48], _dram_offset_T_35} + _GEN_11; // @[LoopConv.scala:284:99, :285:{36,55}]
wire [16:0] _dram_offset_T_37 = {1'h0, req_outer_bounds_in_stride}; // @[LoopConv.scala:258:16, :285:64]
wire [66:0] _dram_offset_T_38 = {{17{_dram_offset_T_36[49]}}, _dram_offset_T_36} * {{50{_dram_offset_T_37[16]}}, _dram_offset_T_37}; // @[LoopConv.scala:285:{55,64}]
wire [65:0] _dram_offset_T_39 = _dram_offset_T_38[65:0]; // @[LoopConv.scala:285:64]
wire [65:0] _dram_offset_T_40 = _dram_offset_T_39; // @[LoopConv.scala:285:64]
wire [66:0] _dram_offset_T_41 = {_dram_offset_T_40[65], _dram_offset_T_40} + {{51{ich[15]}}, ich}; // @[LoopConv.scala:274:16, :284:54, :285:{64,76}]
wire [70:0] _dram_offset_T_42 = {{2{_dram_offset_T_41[66]}}, _dram_offset_T_41, 2'h0}; // @[LoopConv.scala:285:{76,84}]
wire [69:0] _dram_offset_T_43 = _dram_offset_T_42[69:0]; // @[LoopConv.scala:285:84]
wire [69:0] _dram_offset_T_44 = _dram_offset_T_43; // @[LoopConv.scala:285:84]
wire [69:0] _dram_offset_T_45 = _dram_offset_T_44; // @[LoopConv.scala:285:{84,101}]
wire [69:0] dram_offset = req_trans_input_3120 ? _dram_offset_T_22 : _dram_offset_T_45; // @[LoopConv.scala:258:16, :284:{24,141}, :285:101]
wire [69:0] _dram_addr_T = {38'h0, dram_offset[31:0]}; // @[LoopConv.scala:284:24, :1556:17]
wire [70:0] _dram_addr_T_1 = {31'h0, req_dram_addr} + {1'h0, _dram_addr_T}; // @[LoopConv.scala:258:16, :286:52, :1556:17]
wire [69:0] _dram_addr_T_2 = _dram_addr_T_1[69:0]; // @[LoopConv.scala:286:52]
wire [69:0] dram_addr = is_zeros ? 70'h0 : _dram_addr_T_2; // @[LoopConv.scala:279:74, :286:{22,52}]
wire [14:0] _GEN_14 = {1'h0, req_addr_start}; // @[LoopConv.scala:258:16, :289:20]
wire [14:0] _spad_addr_T; // @[LoopConv.scala:289:20]
assign _spad_addr_T = _GEN_14; // @[LoopConv.scala:289:20]
wire [14:0] _spad_addr_T_27; // @[LoopConv.scala:290:20]
assign _spad_addr_T_27 = _GEN_14; // @[LoopConv.scala:289:20, :290:20]
wire [13:0] _spad_addr_T_1 = b[15:2]; // @[LoopConv.scala:271:14, :289:31]
wire [16:0] _GEN_15 = {1'h0, req_derived_params_input_spad_stride}; // @[LoopConv.scala:258:16, :289:54]
wire [16:0] _spad_addr_T_2; // @[LoopConv.scala:289:54]
assign _spad_addr_T_2 = _GEN_15; // @[LoopConv.scala:289:54]
wire [16:0] _spad_addr_T_29; // @[LoopConv.scala:290:56]
assign _spad_addr_T_29 = _GEN_15; // @[LoopConv.scala:289:54, :290:56]
wire [30:0] _spad_addr_T_3 = {{17{_spad_addr_T_1[13]}}, _spad_addr_T_1} * {{14{_spad_addr_T_2[16]}}, _spad_addr_T_2}; // @[LoopConv.scala:289:{31,54}]
wire [29:0] _spad_addr_T_4 = _spad_addr_T_3[29:0]; // @[LoopConv.scala:289:54]
wire [29:0] _spad_addr_T_5 = _spad_addr_T_4; // @[LoopConv.scala:289:54]
wire [30:0] _spad_addr_T_6 = {{16{_spad_addr_T[14]}}, _spad_addr_T} + {_spad_addr_T_5[29], _spad_addr_T_5}; // @[LoopConv.scala:289:{20,25,54}]
wire [15:0] _GEN_16 = {15'h0, req_downsample}; // @[LoopConv.scala:258:16, :289:90]
wire [15:0] _GEN_17 = req_derived_params_irows >> _GEN_16; // @[LoopConv.scala:258:16, :289:90]
wire [15:0] _spad_addr_T_7; // @[LoopConv.scala:289:90]
assign _spad_addr_T_7 = _GEN_17; // @[LoopConv.scala:289:90]
wire [15:0] _spad_addr_T_34; // @[LoopConv.scala:290:90]
assign _spad_addr_T_34 = _GEN_17; // @[LoopConv.scala:289:90, :290:90]
wire [16:0] _spad_addr_T_8 = {1'h0, _spad_addr_T_7}; // @[LoopConv.scala:289:{81,90}]
wire [32:0] _spad_addr_T_9 = _GEN_8 * {{16{_spad_addr_T_8[16]}}, _spad_addr_T_8}; // @[LoopConv.scala:284:54, :289:81]
wire [31:0] _spad_addr_T_10 = _spad_addr_T_9[31:0]; // @[LoopConv.scala:289:81]
wire [31:0] _spad_addr_T_11 = _spad_addr_T_10; // @[LoopConv.scala:289:81]
wire [15:0] _GEN_18 = req_derived_params_icols >> _GEN_16; // @[LoopConv.scala:258:16, :289:{90,118}]
wire [15:0] _spad_addr_T_12; // @[LoopConv.scala:289:118]
assign _spad_addr_T_12 = _GEN_18; // @[LoopConv.scala:289:118]
wire [15:0] _spad_addr_T_19; // @[LoopConv.scala:289:181]
assign _spad_addr_T_19 = _GEN_18; // @[LoopConv.scala:289:{118,181}]
wire [15:0] _spad_addr_T_39; // @[LoopConv.scala:290:118]
assign _spad_addr_T_39 = _GEN_18; // @[LoopConv.scala:289:118, :290:118]
wire [15:0] _spad_addr_T_46; // @[LoopConv.scala:290:181]
assign _spad_addr_T_46 = _GEN_18; // @[LoopConv.scala:289:118, :290:181]
wire [16:0] _spad_addr_T_13 = {1'h0, _spad_addr_T_12}; // @[LoopConv.scala:289:{109,118}]
wire [48:0] _spad_addr_T_14 = {{17{_spad_addr_T_11[31]}}, _spad_addr_T_11} * {{32{_spad_addr_T_13[16]}}, _spad_addr_T_13}; // @[LoopConv.scala:289:{81,109}]
wire [47:0] _spad_addr_T_15 = _spad_addr_T_14[47:0]; // @[LoopConv.scala:289:109]
wire [47:0] _spad_addr_T_16 = _spad_addr_T_15; // @[LoopConv.scala:289:109]
wire [48:0] _spad_addr_T_17 = {{18{_spad_addr_T_6[30]}}, _spad_addr_T_6} + {_spad_addr_T_16[47], _spad_addr_T_16}; // @[LoopConv.scala:289:{25,74,109}]
wire [18:0] _GEN_19 = {18'h0, req_downsample}; // @[LoopConv.scala:258:16, :289:153]
wire [18:0] _GEN_20 = $signed($signed(irow_padded) >>> _GEN_19); // @[LoopConv.scala:277:26, :289:153]
wire [18:0] _spad_addr_T_18; // @[LoopConv.scala:289:153]
assign _spad_addr_T_18 = _GEN_20; // @[LoopConv.scala:289:153]
wire [18:0] _spad_addr_T_45; // @[LoopConv.scala:290:153]
assign _spad_addr_T_45 = _GEN_20; // @[LoopConv.scala:289:153, :290:153]
wire [16:0] _spad_addr_T_20 = {1'h0, _spad_addr_T_19}; // @[LoopConv.scala:289:{172,181}]
wire [35:0] _spad_addr_T_21 = {{17{_spad_addr_T_18[18]}}, _spad_addr_T_18} * {{19{_spad_addr_T_20[16]}}, _spad_addr_T_20}; // @[LoopConv.scala:289:{153,172}]
wire [34:0] _spad_addr_T_22 = _spad_addr_T_21[34:0]; // @[LoopConv.scala:289:172]
wire [34:0] _spad_addr_T_23 = _spad_addr_T_22; // @[LoopConv.scala:289:172]
wire [49:0] _spad_addr_T_24 = {_spad_addr_T_17[48], _spad_addr_T_17} + {{15{_spad_addr_T_23[34]}}, _spad_addr_T_23}; // @[LoopConv.scala:289:{74,137,172}]
wire [18:0] _GEN_21 = $signed($signed(icol_padded) >>> _GEN_19); // @[LoopConv.scala:278:26, :289:{153,216}]
wire [18:0] _spad_addr_T_25; // @[LoopConv.scala:289:216]
assign _spad_addr_T_25 = _GEN_21; // @[LoopConv.scala:289:216]
wire [18:0] _spad_addr_T_52; // @[LoopConv.scala:290:216]
assign _spad_addr_T_52 = _GEN_21; // @[LoopConv.scala:289:216, :290:216]
wire [50:0] _spad_addr_T_26 = {_spad_addr_T_24[49], _spad_addr_T_24} + {{32{_spad_addr_T_25[18]}}, _spad_addr_T_25}; // @[LoopConv.scala:289:{137,200,216}]
wire [13:0] _spad_addr_T_28 = ich[15:2]; // @[LoopConv.scala:274:16, :290:33]
wire [30:0] _spad_addr_T_30 = {{17{_spad_addr_T_28[13]}}, _spad_addr_T_28} * {{14{_spad_addr_T_29[16]}}, _spad_addr_T_29}; // @[LoopConv.scala:290:{33,56}]
wire [29:0] _spad_addr_T_31 = _spad_addr_T_30[29:0]; // @[LoopConv.scala:290:56]
wire [29:0] _spad_addr_T_32 = _spad_addr_T_31; // @[LoopConv.scala:290:56]
wire [30:0] _spad_addr_T_33 = {{16{_spad_addr_T_27[14]}}, _spad_addr_T_27} + {_spad_addr_T_32[29], _spad_addr_T_32}; // @[LoopConv.scala:290:{20,25,56}]
wire [16:0] _spad_addr_T_35 = {1'h0, _spad_addr_T_34}; // @[LoopConv.scala:290:{81,90}]
wire [32:0] _spad_addr_T_36 = _GEN_13 * {{16{_spad_addr_T_35[16]}}, _spad_addr_T_35}; // @[LoopConv.scala:285:10, :290:81]
wire [31:0] _spad_addr_T_37 = _spad_addr_T_36[31:0]; // @[LoopConv.scala:290:81]
wire [31:0] _spad_addr_T_38 = _spad_addr_T_37; // @[LoopConv.scala:290:81]
wire [16:0] _spad_addr_T_40 = {1'h0, _spad_addr_T_39}; // @[LoopConv.scala:290:{109,118}]
wire [48:0] _spad_addr_T_41 = {{17{_spad_addr_T_38[31]}}, _spad_addr_T_38} * {{32{_spad_addr_T_40[16]}}, _spad_addr_T_40}; // @[LoopConv.scala:290:{81,109}]
wire [47:0] _spad_addr_T_42 = _spad_addr_T_41[47:0]; // @[LoopConv.scala:290:109]
wire [47:0] _spad_addr_T_43 = _spad_addr_T_42; // @[LoopConv.scala:290:109]
wire [48:0] _spad_addr_T_44 = {{18{_spad_addr_T_33[30]}}, _spad_addr_T_33} + {_spad_addr_T_43[47], _spad_addr_T_43}; // @[LoopConv.scala:290:{25,76,109}]
wire [16:0] _spad_addr_T_47 = {1'h0, _spad_addr_T_46}; // @[LoopConv.scala:290:{172,181}]
wire [35:0] _spad_addr_T_48 = {{17{_spad_addr_T_45[18]}}, _spad_addr_T_45} * {{19{_spad_addr_T_47[16]}}, _spad_addr_T_47}; // @[LoopConv.scala:290:{153,172}]
wire [34:0] _spad_addr_T_49 = _spad_addr_T_48[34:0]; // @[LoopConv.scala:290:172]
wire [34:0] _spad_addr_T_50 = _spad_addr_T_49; // @[LoopConv.scala:290:172]
wire [49:0] _spad_addr_T_51 = {_spad_addr_T_44[48], _spad_addr_T_44} + {{15{_spad_addr_T_50[34]}}, _spad_addr_T_50}; // @[LoopConv.scala:290:{76,137,172}]
wire [50:0] _spad_addr_T_53 = {_spad_addr_T_51[49], _spad_addr_T_51} + {{32{_spad_addr_T_52[18]}}, _spad_addr_T_52}; // @[LoopConv.scala:290:{137,200,216}]
wire [50:0] spad_addr = req_trans_input_3120 ? _spad_addr_T_26 : _spad_addr_T_53; // @[LoopConv.scala:258:16, :287:22, :289:200, :290:200]
wire [3:0] _block_size_downsampled_T = 4'h4 << req_downsample; // @[LoopConv.scala:258:16, :293:46]
wire [4:0] block_size_downsampled = {1'h0, _block_size_downsampled_T}; // @[LoopConv.scala:293:{46,72}]
wire [17:0] _GEN_22 = {{2{icol[15]}}, icol}; // @[LoopConv.scala:273:17, :278:26, :296:29]
wire [17:0] _I_T_1 = {_I_T[16], _I_T} - _GEN_22; // @[LoopConv.scala:296:{24,29}]
wire [17:0] _GEN_23 = {{13{block_size_downsampled[4]}}, block_size_downsampled}; // @[LoopConv.scala:293:72, :296:37]
wire _I_T_2 = $signed(_I_T_1) > $signed(_GEN_23); // @[LoopConv.scala:296:{29,37}]
wire [17:0] _I_T_4 = {_I_T_3[16], _I_T_3} - _GEN_22; // @[LoopConv.scala:296:{29,102,107}]
wire [17:0] _I_T_5 = _I_T_2 ? _GEN_23 : _I_T_4; // @[LoopConv.scala:296:{8,37,107}]
wire [16:0] _GEN_24 = 17'h0 - _GEN_6; // @[LoopConv.scala:279:{23,82}, :298:31]
wire [16:0] _I_T_7; // @[LoopConv.scala:298:31]
assign _I_T_7 = _GEN_24; // @[LoopConv.scala:279:23, :298:31]
wire [16:0] _I_T_9; // @[LoopConv.scala:298:72]
assign _I_T_9 = _GEN_24; // @[LoopConv.scala:279:23, :298:{31,72}]
wire _I_T_8 = $signed(_I_T_7) > 17'sh4; // @[LoopConv.scala:298:{31,39}]
wire [16:0] _I_T_10 = _I_T_8 ? 17'h4 : _I_T_9; // @[LoopConv.scala:298:{26,39,72}]
wire _I_T_12 = $signed(_GEN_6) >= $signed(_I_T_11); // @[LoopConv.scala:279:82, :299:{13,31}]
wire [16:0] _GEN_25 = {1'h0, req_inner_bounds_rpad} + _GEN; // @[LoopConv.scala:258:16, :263:37]
wire [16:0] _I_T_14; // @[LoopConv.scala:263:37]
assign _I_T_14 = _GEN_25; // @[LoopConv.scala:263:37]
wire [16:0] _I_T_21; // @[LoopConv.scala:263:37]
assign _I_T_21 = _GEN_25; // @[LoopConv.scala:263:37]
wire [16:0] _next_icol_T_1; // @[LoopConv.scala:263:37]
assign _next_icol_T_1 = _GEN_25; // @[LoopConv.scala:263:37]
wire [16:0] _I_T_15 = _I_T_14 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _I_T_16 = {1'h0, _I_T_15}; // @[LoopConv.scala:263:59, :299:83]
wire [18:0] _I_T_17 = {{2{_I_T_13[16]}}, _I_T_13} + {_I_T_16[17], _I_T_16}; // @[LoopConv.scala:299:{59,64,83}]
wire [19:0] _GEN_26 = {{4{icol[15]}}, icol}; // @[LoopConv.scala:273:17, :278:26, :299:88]
wire [19:0] _I_T_18 = {_I_T_17[18], _I_T_17} - _GEN_26; // @[LoopConv.scala:299:{64,88}]
wire _I_T_19 = $signed(_I_T_18) > 20'sh4; // @[LoopConv.scala:299:{88,96}]
wire [16:0] _I_T_22 = _I_T_21 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _I_T_23 = {1'h0, _I_T_22}; // @[LoopConv.scala:263:59, :299:165]
wire [18:0] _I_T_24 = {{2{_I_T_20[16]}}, _I_T_20} + {_I_T_23[17], _I_T_23}; // @[LoopConv.scala:299:{141,146,165}]
wire [19:0] _I_T_25 = {_I_T_24[18], _I_T_24} - _GEN_26; // @[LoopConv.scala:299:{88,146,170}]
wire [19:0] _I_T_26 = _I_T_19 ? 20'h4 : _I_T_25; // @[LoopConv.scala:299:{43,96,170}]
wire [19:0] _I_T_27 = _I_T_12 ? _I_T_26 : {{2{_I_T_5[17]}}, _I_T_5}; // @[Mux.scala:126:16]
wire [19:0] I = _I_T_6 ? {{3{_I_T_10[16]}}, _I_T_10} : _I_T_27; // @[Mux.scala:126:16]
wire [19:0] _next_icol_T = I; // @[Mux.scala:126:16]
wire [17:0] _GEN_27 = {{2{b[15]}}, b}; // @[LoopConv.scala:271:14, :284:118, :303:22]
wire [17:0] _K_T_1 = {_K_T[16], _K_T} - _GEN_27; // @[LoopConv.scala:303:{17,22}]
wire [17:0] _GEN_28 = {max_chs_per_mvin[16], max_chs_per_mvin}; // @[LoopConv.scala:268:29, :303:27]
wire _K_T_2 = $signed(_K_T_1) > $signed(_GEN_28); // @[LoopConv.scala:303:{22,27}]
wire [17:0] _K_T_4 = {_K_T_3[16], _K_T_3} - _GEN_27; // @[LoopConv.scala:303:{22,73,78}]
wire [17:0] _K_T_5 = _K_T_2 ? _GEN_28 : _K_T_4; // @[LoopConv.scala:303:{8,27,78}]
wire [16:0] _GEN_29 = {1'h0, req_derived_params_ichs}; // @[LoopConv.scala:258:16, :304:14]
wire [16:0] _K_T_6; // @[LoopConv.scala:304:14]
assign _K_T_6 = _GEN_29; // @[LoopConv.scala:304:14]
wire [16:0] _K_T_9; // @[LoopConv.scala:304:69]
assign _K_T_9 = _GEN_29; // @[LoopConv.scala:304:{14,69}]
wire [16:0] _next_ich_T; // @[LoopConv.scala:373:50]
assign _next_ich_T = _GEN_29; // @[LoopConv.scala:304:14, :373:50]
wire [17:0] _GEN_30 = {{2{ich[15]}}, ich}; // @[LoopConv.scala:274:16, :284:54, :304:19]
wire [17:0] _K_T_7 = {_K_T_6[16], _K_T_6} - _GEN_30; // @[LoopConv.scala:304:{14,19}]
wire _K_T_8 = $signed(_K_T_7) > $signed(_GEN_28); // @[LoopConv.scala:303:27, :304:{19,26}]
wire [17:0] _K_T_10 = {_K_T_9[16], _K_T_9} - _GEN_30; // @[LoopConv.scala:304:{19,69,74}]
wire [17:0] _K_T_11 = _K_T_8 ? _GEN_28 : _K_T_10; // @[LoopConv.scala:303:27, :304:{8,26,74}]
wire [17:0] K = req_trans_input_3120 ? _K_T_5 : _K_T_11; // @[LoopConv.scala:258:16, :302:14, :303:8, :304:8]
wire [63:0] _config_cmd_rs1_T; // @[LoopConv.scala:327:36]
wire [63:0] config_cmd_rs1; // @[LoopConv.scala:315:24]
wire [63:0] config_cmd_rs2; // @[LoopConv.scala:315:24]
wire [13:0] config_cmd_rs1_stride; // @[LoopConv.scala:319:28]
wire [2:0] config_cmd_rs1_pixel_repeats; // @[LoopConv.scala:319:28]
assign config_cmd_rs1_stride = req_derived_params_input_spad_stride[13:0]; // @[LoopConv.scala:258:16, :319:28, :322:25]
assign config_cmd_rs1_pixel_repeats = req_max_pixels_per_row[2:0]; // @[LoopConv.scala:258:16, :319:28, :323:32]
wire [5:0] config_cmd_rs1_lo_hi_hi = {config_cmd_rs1_pixel_repeats, 3'h0}; // @[LoopConv.scala:319:28, :327:36]
wire [7:0] config_cmd_rs1_lo_hi = {config_cmd_rs1_lo_hi_hi, 2'h0}; // @[LoopConv.scala:327:36]
wire [10:0] config_cmd_rs1_lo = {config_cmd_rs1_lo_hi, 3'h1}; // @[LoopConv.scala:327:36]
wire [18:0] config_cmd_rs1_hi_lo = {config_cmd_rs1_stride, 5'h0}; // @[LoopConv.scala:319:28, :327:36]
wire [52:0] config_cmd_rs1_hi = {34'hFE000000, config_cmd_rs1_hi_lo}; // @[LoopConv.scala:327:36]
assign _config_cmd_rs1_T = {config_cmd_rs1_hi, config_cmd_rs1_lo}; // @[LoopConv.scala:327:36]
assign config_cmd_rs1 = _config_cmd_rs1_T; // @[LoopConv.scala:315:24, :327:36]
wire [19:0] _GEN_31 = {19'h0, req_downsample}; // @[LoopConv.scala:258:16, :329:33]
wire [19:0] _config_cmd_rs2_T = {1'h0, dram_stride} << _GEN_31; // @[LoopConv.scala:281:24, :329:33]
assign config_cmd_rs2 = {44'h0, _config_cmd_rs2_T}; // @[LoopConv.scala:315:24, :329:{18,33}]
wire _io_req_ready_T = ~(|state); // @[LoopConv.scala:256:22, :338:25]
wire _io_req_ready_T_1 = ~_command_p_io_busy; // @[LoopConv.scala:313:25, :338:37]
assign _io_req_ready_T_2 = _io_req_ready_T & _io_req_ready_T_1; // @[LoopConv.scala:338:{25,34,37}]
assign io_req_ready_0 = _io_req_ready_T_2; // @[LoopConv.scala:235:7, :338:34]
wire _io_idle_T = ~(|state); // @[LoopConv.scala:256:22, :338:25, :339:20]
wire _io_idle_T_1 = ~_command_p_io_busy; // @[LoopConv.scala:313:25, :338:37, :339:32]
assign _io_idle_T_2 = _io_idle_T & _io_idle_T_1; // @[LoopConv.scala:339:{20,29,32}]
assign io_idle_0 = _io_idle_T_2; // @[LoopConv.scala:235:7, :339:29]
wire _command_p_io_in_valid_T = |state; // @[LoopConv.scala:256:22, :338:25, :342:34]
wire _command_p_io_in_valid_T_1 = ~io_wait_for_prev_loop_0; // @[LoopConv.scala:235:7, :342:46]
wire _command_p_io_in_valid_T_2 = _command_p_io_in_valid_T & _command_p_io_in_valid_T_1; // @[LoopConv.scala:342:{34,43,46}]
wire _command_p_io_in_valid_T_3 = |req_dram_addr; // @[LoopConv.scala:258:16, :342:87]
wire _command_p_io_in_valid_T_4 = _command_p_io_in_valid_T_2 & _command_p_io_in_valid_T_3; // @[LoopConv.scala:342:{43,69,87}]
wire _command_p_io_in_bits_cmd_T = state == 2'h1; // @[LoopConv.scala:256:22, :343:41]
wire [6:0] _command_p_io_in_bits_cmd_T_1_inst_funct = {5'h0, ~_command_p_io_in_bits_cmd_T, 1'h0}; // @[LoopConv.scala:343:{34,41}]
wire [63:0] _command_p_io_in_bits_cmd_T_1_rs1 = _command_p_io_in_bits_cmd_T ? config_cmd_rs1 : 64'h0; // @[LoopConv.scala:315:24, :343:{34,41}]
wire [63:0] _command_p_io_in_bits_cmd_T_1_rs2 = _command_p_io_in_bits_cmd_T ? config_cmd_rs2 : 64'h0; // @[LoopConv.scala:315:24, :343:{34,41}]
wire _command_p_io_out_ready_T = ~io_rob_overloaded_0; // @[LoopConv.scala:235:7, :349:45]
wire _command_p_io_out_ready_T_1 = io_cmd_ready_0 & _command_p_io_out_ready_T; // @[LoopConv.scala:235:7, :349:{42,45}]
wire _io_cmd_valid_T = ~io_rob_overloaded_0; // @[LoopConv.scala:235:7, :349:45, :350:45]
assign _io_cmd_valid_T_1 = _command_p_io_out_valid & _io_cmd_valid_T; // @[LoopConv.scala:313:25, :350:{42,45}]
assign io_cmd_valid_0 = _io_cmd_valid_T_1; // @[LoopConv.scala:235:7, :350:42]
wire _T = _command_p_io_out_bits_cmd_inst_funct == 7'h2; // @[LoopConv.scala:313:25, :352:46]
assign io_cmd_bits_rs1_0 = _T ? _command_p_io_out_bits_dram_addr[63:0] : _command_p_io_out_bits_cmd_rs1; // @[LoopConv.scala:235:7, :313:25, :351:15, :352:{46,60}, :354:21]
wire [4:0] io_cmd_bits_rs2_lo_hi_1 = mvin_cmd_rs2_num_cols; // @[LoopConv.scala:355:28, :360:37]
wire [2:0] mvin_cmd_rs2_local_addr_result_norm_cmd; // @[LocalAddr.scala:116:26]
wire [2:0] _io_cmd_bits_rs2_T = mvin_cmd_rs2_local_addr_norm_cmd; // @[LoopConv.scala:355:28, :360:37]
wire mvin_cmd_rs2_local_addr_result_garbage_bit; // @[LocalAddr.scala:116:26]
wire [13:0] mvin_cmd_rs2_local_addr_result_data; // @[LocalAddr.scala:116:26]
wire mvin_cmd_rs2_local_addr_garbage_bit; // @[LoopConv.scala:355:28]
wire [13:0] mvin_cmd_rs2_local_addr_data; // @[LoopConv.scala:355:28]
wire [2:0] mvin_cmd_rs2_num_rows; // @[LoopConv.scala:355:28]
wire [19:0] _mvin_cmd_rs2_num_rows_T = $signed($signed(_command_p_io_out_bits_I) >>> _GEN_31); // @[LoopConv.scala:313:25, :329:33, :357:35]
wire [19:0] _mvin_cmd_rs2_num_rows_T_1 = _mvin_cmd_rs2_num_rows_T; // @[LoopConv.scala:357:{35,54}]
assign mvin_cmd_rs2_num_rows = _mvin_cmd_rs2_num_rows_T_1[2:0]; // @[LoopConv.scala:355:28, :357:{27,54}]
wire [17:0] _mvin_cmd_rs2_num_cols_T; // @[LoopConv.scala:358:34]
assign mvin_cmd_rs2_num_cols = _mvin_cmd_rs2_num_cols_T[4:0]; // @[LoopConv.scala:355:28, :358:{27,34}]
wire _mvin_cmd_rs2_local_addr_result_result_T_7; // @[LocalAddr.scala:108:37]
wire _mvin_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_is_acc_addr = _mvin_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr; // @[LocalAddr.scala:108:{26,37}]
wire _mvin_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_accumulate = _mvin_cmd_rs2_local_addr_result_result_WIRE_accumulate; // @[LocalAddr.scala:108:{26,37}]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_read_full_acc_row = _mvin_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row; // @[LocalAddr.scala:108:{26,37}]
wire [10:0] _mvin_cmd_rs2_local_addr_result_result_T_3; // @[LocalAddr.scala:108:37]
wire [2:0] mvin_cmd_rs2_local_addr_result_result_norm_cmd = _mvin_cmd_rs2_local_addr_result_result_WIRE_norm_cmd; // @[LocalAddr.scala:108:{26,37}]
wire _mvin_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
wire [13:0] _mvin_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
wire mvin_cmd_rs2_local_addr_result_result_garbage_bit = _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage_bit; // @[LocalAddr.scala:108:{26,37}]
wire [13:0] mvin_cmd_rs2_local_addr_result_result_data = _mvin_cmd_rs2_local_addr_result_result_WIRE_data; // @[LocalAddr.scala:108:{26,37}]
wire [50:0] _mvin_cmd_rs2_local_addr_result_result_T; // @[LocalAddr.scala:108:37]
wire [31:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_1 = _mvin_cmd_rs2_local_addr_result_result_T[31:0]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_1 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[13:0]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_data = _mvin_cmd_rs2_local_addr_result_result_T_1; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_2 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[14]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage_bit = _mvin_cmd_rs2_local_addr_result_result_T_2; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_3 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[25:15]; // @[LocalAddr.scala:108:37]
wire [10:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_garbage = _mvin_cmd_rs2_local_addr_result_result_T_3; // @[LocalAddr.scala:108:37]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_T_4 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[28:26]; // @[LocalAddr.scala:108:37]
wire [2:0] _mvin_cmd_rs2_local_addr_result_result_WIRE_2 = _mvin_cmd_rs2_local_addr_result_result_T_4; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_3 = _mvin_cmd_rs2_local_addr_result_result_WIRE_2; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_norm_cmd = _mvin_cmd_rs2_local_addr_result_result_WIRE_3; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_5 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[29]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_read_full_acc_row = _mvin_cmd_rs2_local_addr_result_result_T_5; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_6 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[30]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_accumulate = _mvin_cmd_rs2_local_addr_result_result_T_6; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_T_7 = _mvin_cmd_rs2_local_addr_result_result_WIRE_1[31]; // @[LocalAddr.scala:108:37]
assign _mvin_cmd_rs2_local_addr_result_result_WIRE_is_acc_addr = _mvin_cmd_rs2_local_addr_result_result_T_7; // @[LocalAddr.scala:108:37]
assign mvin_cmd_rs2_local_addr_result_norm_cmd = mvin_cmd_rs2_local_addr_result_result_norm_cmd; // @[LocalAddr.scala:108:26, :116:26]
assign mvin_cmd_rs2_local_addr_result_garbage_bit = mvin_cmd_rs2_local_addr_result_result_garbage_bit; // @[LocalAddr.scala:108:26, :116:26]
assign mvin_cmd_rs2_local_addr_result_data = mvin_cmd_rs2_local_addr_result_result_data; // @[LocalAddr.scala:108:26, :116:26]
assign mvin_cmd_rs2_local_addr_norm_cmd = mvin_cmd_rs2_local_addr_result_norm_cmd; // @[LoopConv.scala:355:28]
assign mvin_cmd_rs2_local_addr_garbage_bit = mvin_cmd_rs2_local_addr_result_garbage_bit; // @[LoopConv.scala:355:28]
assign mvin_cmd_rs2_local_addr_data = mvin_cmd_rs2_local_addr_result_data; // @[LoopConv.scala:355:28]
wire [11:0] io_cmd_bits_rs2_lo_hi = {11'h0, mvin_cmd_rs2_local_addr_garbage_bit}; // @[LoopConv.scala:355:28, :360:37]
wire [25:0] io_cmd_bits_rs2_lo = {io_cmd_bits_rs2_lo_hi, mvin_cmd_rs2_local_addr_data}; // @[LoopConv.scala:355:28, :360:37]
wire [3:0] io_cmd_bits_rs2_hi_lo = {1'h0, _io_cmd_bits_rs2_T}; // @[LoopConv.scala:360:37]
wire [5:0] io_cmd_bits_rs2_hi = {2'h0, io_cmd_bits_rs2_hi_lo}; // @[LoopConv.scala:360:37]
wire [31:0] _io_cmd_bits_rs2_T_1 = {io_cmd_bits_rs2_hi, io_cmd_bits_rs2_lo}; // @[LoopConv.scala:360:37]
wire [36:0] io_cmd_bits_rs2_lo_1 = {io_cmd_bits_rs2_lo_hi_1, _io_cmd_bits_rs2_T_1}; // @[LoopConv.scala:360:37]
wire [15:0] io_cmd_bits_rs2_hi_hi_1 = {13'h0, mvin_cmd_rs2_num_rows}; // @[LoopConv.scala:355:28, :360:37]
wire [26:0] io_cmd_bits_rs2_hi_1 = {io_cmd_bits_rs2_hi_hi_1, 11'h0}; // @[LoopConv.scala:360:37]
wire [63:0] _io_cmd_bits_rs2_T_2 = {io_cmd_bits_rs2_hi_1, io_cmd_bits_rs2_lo_1}; // @[LoopConv.scala:360:37]
assign io_cmd_bits_rs2_0 = _T ? _io_cmd_bits_rs2_T_2 : _command_p_io_out_bits_cmd_rs2; // @[LoopConv.scala:235:7, :313:25, :351:15, :352:{46,60}, :360:{21,37}]
wire [16:0] b_it = req_trans_input_3120 ? _b_it_T : 17'h1; // @[LoopConv.scala:258:16, :370:{21,61}]
wire [16:0] ich_it = req_trans_input_3120 ? 17'h1 : _ich_it_T; // @[LoopConv.scala:258:16, :371:{23,68}]
wire [17:0] _next_ich_max_T = {_next_ich_T[16], _next_ich_T} - 18'h1; // @[Util.scala:48:28]
wire [16:0] _next_ich_max_T_1 = _next_ich_max_T[16:0]; // @[Util.scala:48:28]
wire [16:0] next_ich_max = _next_ich_max_T_1; // @[Util.scala:48:28]
wire [17:0] _GEN_32 = {1'h0, ich_it}; // @[Util.scala:50:19]
wire [17:0] _next_ich_T_1; // @[Util.scala:50:19]
assign _next_ich_T_1 = _GEN_32; // @[Util.scala:50:19]
wire [17:0] _next_ich_T_6; // @[Util.scala:52:16]
assign _next_ich_T_6 = _GEN_32; // @[Util.scala:50:19, :52:16]
wire [18:0] _GEN_33 = {{3{ich[15]}}, ich}; // @[Util.scala:50:15]
wire [18:0] _next_ich_T_2 = _GEN_33 + {_next_ich_T_1[17], _next_ich_T_1}; // @[Util.scala:50:{15,19}]
wire [17:0] _next_ich_T_3 = _next_ich_T_2[17:0]; // @[Util.scala:50:15]
wire [17:0] _next_ich_T_4 = _next_ich_T_3; // @[Util.scala:50:15]
wire [18:0] _next_ich_T_7 = _GEN_33 + {_next_ich_T_6[17], _next_ich_T_6}; // @[Util.scala:50:15, :52:{11,16}]
wire _next_ich_T_8 = $signed(_next_ich_T_7) > $signed({{2{next_ich_max[16]}}, next_ich_max}); // @[Util.scala:48:28, :52:{11,22}]
wire [17:0] _next_ich_T_9 = _next_ich_T_8 ? 18'h0 : _next_ich_T_4; // @[Mux.scala:126:16]
wire [17:0] next_ich = _next_ich_T_9; // @[Mux.scala:126:16]
wire [16:0] _next_icol_T_2 = _next_icol_T_1 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _next_icol_T_3 = {2'h0, req_derived_params_icols_unpadded} + {1'h0, _next_icol_T_2}; // @[LoopConv.scala:258:16, :263:59, :374:65]
wire [18:0] _next_icol_T_4 = {1'h0, _next_icol_T_3}; // @[LoopConv.scala:374:{65,85}]
wire [16:0] _next_icol_T_6 = _next_icol_T_5 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _next_icol_T_7 = {1'h0, _next_icol_T_6}; // @[LoopConv.scala:263:59, :374:112]
wire [18:0] _next_icol_T_8 = 19'h0 - {_next_icol_T_7[17], _next_icol_T_7}; // @[LoopConv.scala:279:23, :329:33, :374:{94,112}]
wire _GEN_34 = next_ich == 18'h0; // @[Mux.scala:126:16]
wire _next_icol_T_9; // @[LoopConv.scala:375:18]
assign _next_icol_T_9 = _GEN_34; // @[LoopConv.scala:375:18]
wire _next_irow_T_14; // @[LoopConv.scala:377:61]
assign _next_irow_T_14 = _GEN_34; // @[LoopConv.scala:375:18, :377:61]
wire _next_b_T_12; // @[LoopConv.scala:379:104]
assign _next_b_T_12 = _GEN_34; // @[LoopConv.scala:375:18, :379:104]
wire _state_T_13; // @[LoopConv.scala:386:133]
assign _state_T_13 = _GEN_34; // @[LoopConv.scala:375:18, :386:133]
wire [19:0] _next_icol_max_T = {_next_icol_T_4[18], _next_icol_T_4} - 20'h1; // @[Util.scala:48:28]
wire [18:0] _next_icol_max_T_1 = _next_icol_max_T[18:0]; // @[Util.scala:48:28]
wire [18:0] next_icol_max = _next_icol_max_T_1; // @[Util.scala:48:28]
wire [20:0] _GEN_35 = {1'h0, _next_icol_T}; // @[Util.scala:50:19]
wire [20:0] _next_icol_T_10; // @[Util.scala:50:19]
assign _next_icol_T_10 = _GEN_35; // @[Util.scala:50:19]
wire [20:0] _next_icol_T_15; // @[Util.scala:52:16]
assign _next_icol_T_15 = _GEN_35; // @[Util.scala:50:19, :52:16]
wire [21:0] _GEN_36 = {{6{icol[15]}}, icol}; // @[Util.scala:50:15]
wire [21:0] _next_icol_T_11 = _GEN_36 + {_next_icol_T_10[20], _next_icol_T_10}; // @[Util.scala:50:{15,19}]
wire [20:0] _next_icol_T_12 = _next_icol_T_11[20:0]; // @[Util.scala:50:15]
wire [20:0] _next_icol_T_13 = _next_icol_T_12; // @[Util.scala:50:15]
wire _next_icol_T_14 = ~_next_icol_T_9; // @[Util.scala:51:8]
wire [21:0] _next_icol_T_16 = _GEN_36 + {_next_icol_T_15[20], _next_icol_T_15}; // @[Util.scala:50:15, :52:{11,16}]
wire _next_icol_T_17 = $signed(_next_icol_T_16) > $signed({{3{next_icol_max[18]}}, next_icol_max}); // @[Util.scala:48:28, :52:{11,22}]
wire [20:0] _next_icol_T_18 = _next_icol_T_17 ? {{2{_next_icol_T_8[18]}}, _next_icol_T_8} : _next_icol_T_13; // @[Mux.scala:126:16]
wire [20:0] next_icol = _next_icol_T_14 ? {{5{icol[15]}}, icol} : _next_icol_T_18; // @[Mux.scala:126:16]
wire [1:0] _next_irow_T = 2'h1 << req_downsample; // @[LoopConv.scala:258:16, :343:41, :376:43]
wire [16:0] _next_irow_T_1 = {1'h0, req_inner_bounds_dpad} + _GEN; // @[LoopConv.scala:258:16, :263:37]
wire [16:0] _next_irow_T_2 = _next_irow_T_1 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _next_irow_T_3 = {2'h0, req_derived_params_irows_unpadded} + {1'h0, _next_irow_T_2}; // @[LoopConv.scala:258:16, :263:59, :376:78]
wire [18:0] _next_irow_T_4 = {1'h0, _next_irow_T_3}; // @[LoopConv.scala:376:{78,98}]
wire [16:0] _next_irow_T_6 = _next_irow_T_5 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _next_irow_T_7 = {1'h0, _next_irow_T_6}; // @[LoopConv.scala:263:59, :376:125]
wire [18:0] _next_irow_T_8 = 19'h0 - {_next_irow_T_7[17], _next_irow_T_7}; // @[LoopConv.scala:279:23, :329:33, :376:{107,125}]
wire [16:0] _next_irow_T_10 = _next_irow_T_9 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _next_irow_T_11 = {1'h0, _next_irow_T_10}; // @[LoopConv.scala:263:59, :377:44]
wire [18:0] _next_irow_T_12 = 19'h0 - {_next_irow_T_11[17], _next_irow_T_11}; // @[LoopConv.scala:279:23, :329:33, :377:{26,44}]
wire _next_irow_T_13 = next_icol == {{2{_next_irow_T_12[18]}}, _next_irow_T_12}; // @[Mux.scala:126:16]
wire _next_irow_T_15 = _next_irow_T_13 & _next_irow_T_14; // @[LoopConv.scala:377:{19,49,61}]
wire [19:0] _next_irow_max_T = {_next_irow_T_4[18], _next_irow_T_4} - 20'h1; // @[Util.scala:48:28]
wire [18:0] _next_irow_max_T_1 = _next_irow_max_T[18:0]; // @[Util.scala:48:28]
wire [18:0] next_irow_max = _next_irow_max_T_1; // @[Util.scala:48:28]
wire [2:0] _GEN_37 = {1'h0, _next_irow_T}; // @[Util.scala:50:19]
wire [2:0] _next_irow_T_16; // @[Util.scala:50:19]
assign _next_irow_T_16 = _GEN_37; // @[Util.scala:50:19]
wire [2:0] _next_irow_T_21; // @[Util.scala:52:16]
assign _next_irow_T_21 = _GEN_37; // @[Util.scala:50:19, :52:16]
wire [16:0] _next_irow_T_17 = _GEN_3 + {{14{_next_irow_T_16[2]}}, _next_irow_T_16}; // @[Util.scala:50:{15,19}]
wire [15:0] _next_irow_T_18 = _next_irow_T_17[15:0]; // @[Util.scala:50:15]
wire [15:0] _next_irow_T_19 = _next_irow_T_18; // @[Util.scala:50:15]
wire _next_irow_T_20 = ~_next_irow_T_15; // @[Util.scala:51:8]
wire [16:0] _next_irow_T_22 = _GEN_3 + {{14{_next_irow_T_21[2]}}, _next_irow_T_21}; // @[Util.scala:52:{11,16}]
wire _next_irow_T_23 = $signed({{2{_next_irow_T_22[16]}}, _next_irow_T_22}) > $signed(next_irow_max); // @[Util.scala:48:28, :52:{11,22}]
wire [18:0] _next_irow_T_24 = _next_irow_T_23 ? _next_irow_T_8 : {{3{_next_irow_T_19[15]}}, _next_irow_T_19}; // @[Mux.scala:126:16]
wire [18:0] next_irow = _next_irow_T_20 ? _GEN_1 : _next_irow_T_24; // @[Mux.scala:126:16]
wire [16:0] _next_b_T_2 = _next_b_T_1 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _next_b_T_3 = {1'h0, _next_b_T_2}; // @[LoopConv.scala:263:59, :379:44]
wire [18:0] _next_b_T_4 = 19'h0 - {_next_b_T_3[17], _next_b_T_3}; // @[LoopConv.scala:279:23, :329:33, :379:{26,44}]
wire _next_b_T_5 = next_irow == _next_b_T_4; // @[Mux.scala:126:16]
wire [16:0] _next_b_T_7 = _next_b_T_6 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _next_b_T_8 = {1'h0, _next_b_T_7}; // @[LoopConv.scala:263:59, :379:87]
wire [18:0] _next_b_T_9 = 19'h0 - {_next_b_T_8[17], _next_b_T_8}; // @[LoopConv.scala:279:23, :329:33, :379:{69,87}]
wire _next_b_T_10 = next_icol == {{2{_next_b_T_9[18]}}, _next_b_T_9}; // @[Mux.scala:126:16]
wire _next_b_T_11 = _next_b_T_5 & _next_b_T_10; // @[LoopConv.scala:379:{19,49,62}]
wire _next_b_T_13 = _next_b_T_11 & _next_b_T_12; // @[LoopConv.scala:379:{49,92,104}]
wire [17:0] _next_b_max_T = {_next_b_T[16], _next_b_T} - 18'h1; // @[Util.scala:48:28]
wire [16:0] _next_b_max_T_1 = _next_b_max_T[16:0]; // @[Util.scala:48:28]
wire [16:0] next_b_max = _next_b_max_T_1; // @[Util.scala:48:28]
wire [17:0] _GEN_38 = {1'h0, b_it}; // @[Util.scala:50:19]
wire [17:0] _next_b_T_14; // @[Util.scala:50:19]
assign _next_b_T_14 = _GEN_38; // @[Util.scala:50:19]
wire [17:0] _next_b_T_19; // @[Util.scala:52:16]
assign _next_b_T_19 = _GEN_38; // @[Util.scala:50:19, :52:16]
wire [18:0] _GEN_39 = {{3{b[15]}}, b}; // @[Util.scala:50:15]
wire [18:0] _next_b_T_15 = _GEN_39 + {_next_b_T_14[17], _next_b_T_14}; // @[Util.scala:50:{15,19}]
wire [17:0] _next_b_T_16 = _next_b_T_15[17:0]; // @[Util.scala:50:15]
wire [17:0] _next_b_T_17 = _next_b_T_16; // @[Util.scala:50:15]
wire _next_b_T_18 = ~_next_b_T_13; // @[Util.scala:51:8]
wire [18:0] _next_b_T_20 = _GEN_39 + {_next_b_T_19[17], _next_b_T_19}; // @[Util.scala:50:15, :52:{11,16}]
wire _next_b_T_21 = $signed(_next_b_T_20) > $signed({{2{next_b_max[16]}}, next_b_max}); // @[Util.scala:48:28, :52:{11,22}]
wire [17:0] _next_b_T_22 = _next_b_T_21 ? 18'h0 : _next_b_T_17; // @[Mux.scala:126:16]
wire [17:0] next_b = _next_b_T_18 ? _GEN_27 : _next_b_T_22; // @[Mux.scala:126:16]
wire _state_T = next_b == 18'h0; // @[Mux.scala:126:16]
wire [16:0] _state_T_2 = _state_T_1 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _state_T_3 = {1'h0, _state_T_2}; // @[LoopConv.scala:263:59, :386:73]
wire [18:0] _state_T_4 = 19'h0 - {_state_T_3[17], _state_T_3}; // @[LoopConv.scala:279:23, :329:33, :386:{55,73}]
wire _state_T_5 = next_irow == _state_T_4; // @[Mux.scala:126:16]
wire _state_T_6 = _state_T & _state_T_5; // @[LoopConv.scala:386:{27,35,48}]
wire [16:0] _state_T_8 = _state_T_7 >> _GEN; // @[LoopConv.scala:263:{37,59}]
wire [17:0] _state_T_9 = {1'h0, _state_T_8}; // @[LoopConv.scala:263:59, :386:116]
wire [18:0] _state_T_10 = 19'h0 - {_state_T_9[17], _state_T_9}; // @[LoopConv.scala:279:23, :329:33, :386:{98,116}]
wire _state_T_11 = next_icol == {{2{_state_T_10[18]}}, _state_T_10}; // @[Mux.scala:126:16]
wire _state_T_12 = _state_T_6 & _state_T_11; // @[LoopConv.scala:386:{35,78,91}]
wire _state_T_14 = _state_T_12 & _state_T_13; // @[LoopConv.scala:386:{78,121,133}]
wire [1:0] _state_T_15 = {~_state_T_14, 1'h0}; // @[LoopConv.scala:386:{19,121}]
wire [16:0] _GEN_40 = {16'h0, io_req_bits_input_dilated_0}; // @[LoopConv.scala:235:7, :279:23, :396:52]
wire [16:0] _irow_T = {1'h0, io_req_bits_inner_bounds_upad_0} + _GEN_40; // @[LoopConv.scala:235:7, :396:52]
wire [16:0] _irow_T_1 = _irow_T >> _GEN_40; // @[LoopConv.scala:396:{52,82}]
wire [17:0] _irow_T_2 = {1'h0, _irow_T_1}; // @[LoopConv.scala:396:{82,112}]
wire [18:0] _irow_T_3 = 19'h0 - {_irow_T_2[17], _irow_T_2}; // @[LoopConv.scala:279:23, :329:33, :396:{17,112}]
wire [16:0] _icol_T = {1'h0, io_req_bits_inner_bounds_lpad_0} + _GEN_40; // @[LoopConv.scala:235:7, :396:52, :397:52]
wire [16:0] _icol_T_1 = _icol_T >> _GEN_40; // @[LoopConv.scala:396:52, :397:{52,82}]
wire [17:0] _icol_T_2 = {1'h0, _icol_T_1}; // @[LoopConv.scala:397:{82,112}]
wire [18:0] _icol_T_3 = 19'h0 - {_icol_T_2[17], _icol_T_2}; // @[LoopConv.scala:279:23, :329:33, :397:{17,112}]
wire _T_2 = _command_p_io_in_ready & _command_p_io_in_valid_T_4; // @[Decoupled.scala:51:35]
wire _T_4 = io_req_ready_0 & io_req_valid_0; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[LoopConv.scala:235:7]
if (reset) // @[LoopConv.scala:235:7]
state <= 2'h0; // @[LoopConv.scala:256:22]
else if (_T_4) // @[Decoupled.scala:51:35]
state <= 2'h1; // @[LoopConv.scala:256:22, :343:41]
else if (|req_dram_addr) begin // @[LoopConv.scala:258:16, :342:87]
if (_T_2) // @[Decoupled.scala:51:35]
state <= _command_p_io_in_bits_cmd_T ? 2'h2 : _state_T_15; // @[LoopConv.scala:256:22, :343:41, :367:29, :368:13, :386:{13,19}]
end
else // @[LoopConv.scala:342:87]
state <= 2'h0; // @[LoopConv.scala:256:22]
if (_T_4) begin // @[Decoupled.scala:51:35]
req_outer_bounds_batch_size <= io_req_bits_outer_bounds_batch_size_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_in_row_dim <= io_req_bits_outer_bounds_in_row_dim_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_in_col_dim <= io_req_bits_outer_bounds_in_col_dim_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_in_channels <= io_req_bits_outer_bounds_in_channels_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_out_channels <= io_req_bits_outer_bounds_out_channels_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_out_col_dim <= io_req_bits_outer_bounds_out_col_dim_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_out_row_dim <= io_req_bits_outer_bounds_out_row_dim_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_out_stride <= io_req_bits_outer_bounds_out_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_in_stride <= io_req_bits_outer_bounds_in_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_weight_stride <= io_req_bits_outer_bounds_weight_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_pool_out_row_dim <= io_req_bits_outer_bounds_pool_out_row_dim_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_pool_out_col_dim <= io_req_bits_outer_bounds_pool_out_col_dim_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_stride <= io_req_bits_outer_bounds_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_padding <= io_req_bits_outer_bounds_padding_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_kernel_dim <= io_req_bits_outer_bounds_kernel_dim_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_kernel_dilation <= io_req_bits_outer_bounds_kernel_dilation_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_pool_size <= io_req_bits_outer_bounds_pool_size_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_pool_stride <= io_req_bits_outer_bounds_pool_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_outer_bounds_pool_padding <= io_req_bits_outer_bounds_pool_padding_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_batches <= io_req_bits_inner_bounds_batches_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_porows <= io_req_bits_inner_bounds_porows_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_pocols <= io_req_bits_inner_bounds_pocols_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_pochs <= io_req_bits_inner_bounds_pochs_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_krows <= io_req_bits_inner_bounds_krows_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_kcols <= io_req_bits_inner_bounds_kcols_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_kchs <= io_req_bits_inner_bounds_kchs_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_lpad <= io_req_bits_inner_bounds_lpad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_rpad <= io_req_bits_inner_bounds_rpad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_upad <= io_req_bits_inner_bounds_upad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_dpad <= io_req_bits_inner_bounds_dpad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_plpad <= io_req_bits_inner_bounds_plpad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_prad <= io_req_bits_inner_bounds_prad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_pupad <= io_req_bits_inner_bounds_pupad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_pdpad <= io_req_bits_inner_bounds_pdpad_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_orows <= io_req_bits_inner_bounds_orows_0; // @[LoopConv.scala:235:7, :258:16]
req_inner_bounds_ocols <= io_req_bits_inner_bounds_ocols_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_ochs <= io_req_bits_derived_params_ochs_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_irows <= io_req_bits_derived_params_irows_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_icols <= io_req_bits_derived_params_icols_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_irows_unpadded <= io_req_bits_derived_params_irows_unpadded_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_icols_unpadded <= io_req_bits_derived_params_icols_unpadded_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_ichs <= io_req_bits_derived_params_ichs_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_out_channels_per_bank <= io_req_bits_derived_params_out_channels_per_bank_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_in_channels_per_bank <= io_req_bits_derived_params_in_channels_per_bank_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_bias_spad_stride <= io_req_bits_derived_params_bias_spad_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_input_spad_stride <= io_req_bits_derived_params_input_spad_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_derived_params_weight_spad_stride <= io_req_bits_derived_params_weight_spad_stride_0; // @[LoopConv.scala:235:7, :258:16]
req_addr_start <= io_req_bits_addr_start_0; // @[LoopConv.scala:235:7, :258:16]
req_dram_addr <= io_req_bits_dram_addr_0; // @[LoopConv.scala:235:7, :258:16]
req_downsample <= io_req_bits_downsample_0; // @[LoopConv.scala:235:7, :258:16]
req_max_pixels_per_row <= io_req_bits_max_pixels_per_row_0; // @[LoopConv.scala:235:7, :258:16]
req_input_dilated <= io_req_bits_input_dilated_0; // @[LoopConv.scala:235:7, :258:16]
req_trans_input_3120 <= io_req_bits_trans_input_3120_0; // @[LoopConv.scala:235:7, :258:16]
req_loop_id <= io_req_bits_loop_id_0; // @[LoopConv.scala:235:7, :258:16]
b <= 16'h0; // @[LoopConv.scala:271:14, :279:23]
irow <= _irow_T_3[15:0]; // @[LoopConv.scala:272:17, :396:{10,17}]
icol <= _icol_T_3[15:0]; // @[LoopConv.scala:273:17, :397:{10,17}]
ich <= 16'h0; // @[LoopConv.scala:274:16, :279:23]
end
else if (~(|req_dram_addr) | ~_T_2 | _command_p_io_in_bits_cmd_T) begin // @[Decoupled.scala:51:35]
end
else begin // @[LoopConv.scala:274:16, :364:30, :366:36, :367:29]
b <= next_b[15:0]; // @[Mux.scala:126:16]
irow <= next_irow[15:0]; // @[Mux.scala:126:16]
icol <= next_icol[15:0]; // @[Mux.scala:126:16]
ich <= next_ich[15:0]; // @[Mux.scala:126:16]
end
always @(posedge)
Pipeline_12 command_p ( // @[LoopConv.scala:313:25]
.clock (clock),
.reset (reset),
.io_in_ready (_command_p_io_in_ready),
.io_in_valid (_command_p_io_in_valid_T_4), // @[LoopConv.scala:342:69]
.io_in_bits_cmd_inst_funct (_command_p_io_in_bits_cmd_T_1_inst_funct), // @[LoopConv.scala:343:34]
.io_in_bits_cmd_rs1 (_command_p_io_in_bits_cmd_T_1_rs1), // @[LoopConv.scala:343:34]
.io_in_bits_cmd_rs2 (_command_p_io_in_bits_cmd_T_1_rs2), // @[LoopConv.scala:343:34]
.io_in_bits_dram_addr (dram_addr), // @[LoopConv.scala:286:22]
.io_in_bits_spad_addr (spad_addr), // @[LoopConv.scala:287:22]
.io_in_bits_I (I), // @[Mux.scala:126:16]
.io_in_bits_K (K), // @[LoopConv.scala:302:14]
.io_out_ready (_command_p_io_out_ready_T_1), // @[LoopConv.scala:349:42]
.io_out_valid (_command_p_io_out_valid),
.io_out_bits_cmd_inst_funct (_command_p_io_out_bits_cmd_inst_funct),
.io_out_bits_cmd_inst_rs2 (io_cmd_bits_inst_rs2_0),
.io_out_bits_cmd_inst_rs1 (io_cmd_bits_inst_rs1_0),
.io_out_bits_cmd_inst_xd (io_cmd_bits_inst_xd_0),
.io_out_bits_cmd_inst_xs1 (io_cmd_bits_inst_xs1_0),
.io_out_bits_cmd_inst_xs2 (io_cmd_bits_inst_xs2_0),
.io_out_bits_cmd_inst_rd (io_cmd_bits_inst_rd_0),
.io_out_bits_cmd_inst_opcode (io_cmd_bits_inst_opcode_0),
.io_out_bits_cmd_rs1 (_command_p_io_out_bits_cmd_rs1),
.io_out_bits_cmd_rs2 (_command_p_io_out_bits_cmd_rs2),
.io_out_bits_cmd_status_debug (io_cmd_bits_status_debug_0),
.io_out_bits_cmd_status_cease (io_cmd_bits_status_cease_0),
.io_out_bits_cmd_status_wfi (io_cmd_bits_status_wfi_0),
.io_out_bits_cmd_status_isa (io_cmd_bits_status_isa_0),
.io_out_bits_cmd_status_dprv (io_cmd_bits_status_dprv_0),
.io_out_bits_cmd_status_dv (io_cmd_bits_status_dv_0),
.io_out_bits_cmd_status_prv (io_cmd_bits_status_prv_0),
.io_out_bits_cmd_status_v (io_cmd_bits_status_v_0),
.io_out_bits_cmd_status_sd (io_cmd_bits_status_sd_0),
.io_out_bits_cmd_status_zero2 (io_cmd_bits_status_zero2_0),
.io_out_bits_cmd_status_mpv (io_cmd_bits_status_mpv_0),
.io_out_bits_cmd_status_gva (io_cmd_bits_status_gva_0),
.io_out_bits_cmd_status_mbe (io_cmd_bits_status_mbe_0),
.io_out_bits_cmd_status_sbe (io_cmd_bits_status_sbe_0),
.io_out_bits_cmd_status_sxl (io_cmd_bits_status_sxl_0),
.io_out_bits_cmd_status_uxl (io_cmd_bits_status_uxl_0),
.io_out_bits_cmd_status_sd_rv32 (io_cmd_bits_status_sd_rv32_0),
.io_out_bits_cmd_status_zero1 (io_cmd_bits_status_zero1_0),
.io_out_bits_cmd_status_tsr (io_cmd_bits_status_tsr_0),
.io_out_bits_cmd_status_tw (io_cmd_bits_status_tw_0),
.io_out_bits_cmd_status_tvm (io_cmd_bits_status_tvm_0),
.io_out_bits_cmd_status_mxr (io_cmd_bits_status_mxr_0),
.io_out_bits_cmd_status_sum (io_cmd_bits_status_sum_0),
.io_out_bits_cmd_status_mprv (io_cmd_bits_status_mprv_0),
.io_out_bits_cmd_status_xs (io_cmd_bits_status_xs_0),
.io_out_bits_cmd_status_fs (io_cmd_bits_status_fs_0),
.io_out_bits_cmd_status_mpp (io_cmd_bits_status_mpp_0),
.io_out_bits_cmd_status_vs (io_cmd_bits_status_vs_0),
.io_out_bits_cmd_status_spp (io_cmd_bits_status_spp_0),
.io_out_bits_cmd_status_mpie (io_cmd_bits_status_mpie_0),
.io_out_bits_cmd_status_ube (io_cmd_bits_status_ube_0),
.io_out_bits_cmd_status_spie (io_cmd_bits_status_spie_0),
.io_out_bits_cmd_status_upie (io_cmd_bits_status_upie_0),
.io_out_bits_cmd_status_mie (io_cmd_bits_status_mie_0),
.io_out_bits_cmd_status_hie (io_cmd_bits_status_hie_0),
.io_out_bits_cmd_status_sie (io_cmd_bits_status_sie_0),
.io_out_bits_cmd_status_uie (io_cmd_bits_status_uie_0),
.io_out_bits_dram_addr (_command_p_io_out_bits_dram_addr),
.io_out_bits_spad_addr (_mvin_cmd_rs2_local_addr_result_result_T),
.io_out_bits_I (_command_p_io_out_bits_I),
.io_out_bits_K (_mvin_cmd_rs2_num_cols_T),
.io_busy (_command_p_io_busy)
); // @[LoopConv.scala:313:25]
assign io_cmd_bits_inst_funct_0 = _command_p_io_out_bits_cmd_inst_funct; // @[LoopConv.scala:235:7, :313:25]
assign io_req_ready = io_req_ready_0; // @[LoopConv.scala:235:7]
assign io_cmd_valid = io_cmd_valid_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_funct = io_cmd_bits_inst_funct_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_rs2 = io_cmd_bits_inst_rs2_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_rs1 = io_cmd_bits_inst_rs1_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_xd = io_cmd_bits_inst_xd_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_xs1 = io_cmd_bits_inst_xs1_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_xs2 = io_cmd_bits_inst_xs2_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_rd = io_cmd_bits_inst_rd_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_inst_opcode = io_cmd_bits_inst_opcode_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_rs1 = io_cmd_bits_rs1_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_rs2 = io_cmd_bits_rs2_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_debug = io_cmd_bits_status_debug_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_cease = io_cmd_bits_status_cease_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_wfi = io_cmd_bits_status_wfi_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_isa = io_cmd_bits_status_isa_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_dprv = io_cmd_bits_status_dprv_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_dv = io_cmd_bits_status_dv_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_prv = io_cmd_bits_status_prv_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_v = io_cmd_bits_status_v_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_sd = io_cmd_bits_status_sd_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_zero2 = io_cmd_bits_status_zero2_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_mpv = io_cmd_bits_status_mpv_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_gva = io_cmd_bits_status_gva_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_mbe = io_cmd_bits_status_mbe_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_sbe = io_cmd_bits_status_sbe_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_sxl = io_cmd_bits_status_sxl_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_uxl = io_cmd_bits_status_uxl_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_sd_rv32 = io_cmd_bits_status_sd_rv32_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_zero1 = io_cmd_bits_status_zero1_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_tsr = io_cmd_bits_status_tsr_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_tw = io_cmd_bits_status_tw_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_tvm = io_cmd_bits_status_tvm_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_mxr = io_cmd_bits_status_mxr_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_sum = io_cmd_bits_status_sum_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_mprv = io_cmd_bits_status_mprv_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_xs = io_cmd_bits_status_xs_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_fs = io_cmd_bits_status_fs_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_mpp = io_cmd_bits_status_mpp_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_vs = io_cmd_bits_status_vs_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_spp = io_cmd_bits_status_spp_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_mpie = io_cmd_bits_status_mpie_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_ube = io_cmd_bits_status_ube_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_spie = io_cmd_bits_status_spie_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_upie = io_cmd_bits_status_upie_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_mie = io_cmd_bits_status_mie_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_hie = io_cmd_bits_status_hie_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_sie = io_cmd_bits_status_sie_0; // @[LoopConv.scala:235:7]
assign io_cmd_bits_status_uie = io_cmd_bits_status_uie_0; // @[LoopConv.scala:235:7]
assign io_idle = io_idle_0; // @[LoopConv.scala:235:7]
assign io_loop_id = io_loop_id_0; // @[LoopConv.scala:235:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File fetch-buffer.scala:
//******************************************************************************
// Copyright (c) 2018 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Fetch Buffer
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Takes a FetchBundle and converts into a vector of MicroOps.
package boom.v3.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.rocket.{MStatus, BP, BreakpointUnit}
import boom.v3.common._
import boom.v3.util.{BoolToChar, MaskUpper}
/**
* Bundle that is made up of converted MicroOps from the Fetch Bundle
* input to the Fetch Buffer. This is handed to the Decode stage.
*/
class FetchBufferResp(implicit p: Parameters) extends BoomBundle
{
val uops = Vec(coreWidth, Valid(new MicroOp()))
}
/**
* Buffer to hold fetched packets and convert them into a vector of MicroOps
* to give the Decode stage
*
* @param num_entries effectively the number of full-sized fetch packets we can hold.
*/
class FetchBuffer(implicit p: Parameters) extends BoomModule
with HasBoomCoreParameters
with HasBoomFrontendParameters
{
val numEntries = numFetchBufferEntries
val io = IO(new BoomBundle {
val enq = Flipped(Decoupled(new FetchBundle()))
val deq = new DecoupledIO(new FetchBufferResp())
// Was the pipeline redirected? Clear/reset the fetchbuffer.
val clear = Input(Bool())
})
require (numEntries > fetchWidth)
require (numEntries % coreWidth == 0)
val numRows = numEntries / coreWidth
val ram = Reg(Vec(numEntries, new MicroOp))
ram.suggestName("fb_uop_ram")
val deq_vec = Wire(Vec(numRows, Vec(coreWidth, new MicroOp)))
val head = RegInit(1.U(numRows.W))
val tail = RegInit(1.U(numEntries.W))
val maybe_full = RegInit(false.B)
//-------------------------------------------------------------
// **** Enqueue Uops ****
//-------------------------------------------------------------
// Step 1: Convert FetchPacket into a vector of MicroOps.
// Step 2: Generate one-hot write indices.
// Step 3: Write MicroOps into the RAM.
def rotateLeft(in: UInt, k: Int) = {
val n = in.getWidth
Cat(in(n-k-1,0), in(n-1, n-k))
}
val might_hit_head = (1 until fetchWidth).map(k => VecInit(rotateLeft(tail, k).asBools.zipWithIndex.filter
{case (e,i) => i % coreWidth == 0}.map {case (e,i) => e}).asUInt).map(tail => head & tail).reduce(_|_).orR
val at_head = (VecInit(tail.asBools.zipWithIndex.filter {case (e,i) => i % coreWidth == 0}
.map {case (e,i) => e}).asUInt & head).orR
val do_enq = !(at_head && maybe_full || might_hit_head)
io.enq.ready := do_enq
// Input microops.
val in_mask = Wire(Vec(fetchWidth, Bool()))
val in_uops = Wire(Vec(fetchWidth, new MicroOp()))
// Step 1: Convert FetchPacket into a vector of MicroOps.
for (b <- 0 until nBanks) {
for (w <- 0 until bankWidth) {
val i = (b * bankWidth) + w
val pc = (bankAlign(io.enq.bits.pc) + (i << 1).U)
in_uops(i) := DontCare
in_mask(i) := io.enq.valid && io.enq.bits.mask(i)
in_uops(i).edge_inst := false.B
in_uops(i).debug_pc := pc
in_uops(i).pc_lob := pc
in_uops(i).is_sfb := io.enq.bits.sfbs(i) || io.enq.bits.shadowed_mask(i)
if (w == 0) {
when (io.enq.bits.edge_inst(b)) {
in_uops(i).debug_pc := bankAlign(io.enq.bits.pc) + (b * bankBytes).U - 2.U
in_uops(i).pc_lob := bankAlign(io.enq.bits.pc) + (b * bankBytes).U
in_uops(i).edge_inst := true.B
}
}
in_uops(i).ftq_idx := io.enq.bits.ftq_idx
in_uops(i).inst := io.enq.bits.exp_insts(i)
in_uops(i).debug_inst := io.enq.bits.insts(i)
in_uops(i).is_rvc := io.enq.bits.insts(i)(1,0) =/= 3.U
in_uops(i).taken := io.enq.bits.cfi_idx.bits === i.U && io.enq.bits.cfi_idx.valid
in_uops(i).xcpt_pf_if := io.enq.bits.xcpt_pf_if
in_uops(i).xcpt_ae_if := io.enq.bits.xcpt_ae_if
in_uops(i).bp_debug_if := io.enq.bits.bp_debug_if_oh(i)
in_uops(i).bp_xcpt_if := io.enq.bits.bp_xcpt_if_oh(i)
in_uops(i).debug_fsrc := io.enq.bits.fsrc
}
}
// Step 2. Generate one-hot write indices.
val enq_idxs = Wire(Vec(fetchWidth, UInt(numEntries.W)))
def inc(ptr: UInt) = {
val n = ptr.getWidth
Cat(ptr(n-2,0), ptr(n-1))
}
var enq_idx = tail
for (i <- 0 until fetchWidth) {
enq_idxs(i) := enq_idx
enq_idx = Mux(in_mask(i), inc(enq_idx), enq_idx)
}
// Step 3: Write MicroOps into the RAM.
for (i <- 0 until fetchWidth) {
for (j <- 0 until numEntries) {
when (do_enq && in_mask(i) && enq_idxs(i)(j)) {
ram(j) := in_uops(i)
}
}
}
//-------------------------------------------------------------
// **** Dequeue Uops ****
//-------------------------------------------------------------
val tail_collisions = VecInit((0 until numEntries).map(i =>
head(i/coreWidth) && (!maybe_full || (i % coreWidth != 0).B))).asUInt & tail
val slot_will_hit_tail = (0 until numRows).map(i => tail_collisions((i+1)*coreWidth-1, i*coreWidth)).reduce(_|_)
val will_hit_tail = slot_will_hit_tail.orR
val do_deq = io.deq.ready && !will_hit_tail
val deq_valids = (~MaskUpper(slot_will_hit_tail)).asBools
// Generate vec for dequeue read port.
for (i <- 0 until numEntries) {
deq_vec(i/coreWidth)(i%coreWidth) := ram(i)
}
io.deq.bits.uops zip deq_valids map {case (d,v) => d.valid := v}
io.deq.bits.uops zip Mux1H(head, deq_vec) map {case (d,q) => d.bits := q}
io.deq.valid := deq_valids.reduce(_||_)
//-------------------------------------------------------------
// **** Update State ****
//-------------------------------------------------------------
when (do_enq) {
tail := enq_idx
when (in_mask.reduce(_||_)) {
maybe_full := true.B
}
}
when (do_deq) {
head := inc(head)
maybe_full := false.B
}
when (io.clear) {
head := 1.U
tail := 1.U
maybe_full := false.B
}
// TODO Is this necessary?
when (reset.asBool) {
io.deq.bits.uops map { u => u.valid := false.B }
}
}
| module FetchBuffer_1( // @[fetch-buffer.scala:40:7]
input clock, // @[fetch-buffer.scala:40:7]
input reset, // @[fetch-buffer.scala:40:7]
output io_enq_ready, // @[fetch-buffer.scala:45:14]
input io_enq_valid, // @[fetch-buffer.scala:45:14]
input [39:0] io_enq_bits_pc, // @[fetch-buffer.scala:45:14]
input [39:0] io_enq_bits_next_pc, // @[fetch-buffer.scala:45:14]
input io_enq_bits_edge_inst_0, // @[fetch-buffer.scala:45:14]
input io_enq_bits_edge_inst_1, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_0, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_1, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_2, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_3, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_4, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_5, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_6, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_insts_7, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_0, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_1, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_2, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_3, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_4, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_5, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_6, // @[fetch-buffer.scala:45:14]
input [31:0] io_enq_bits_exp_insts_7, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_0, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_1, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_2, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_3, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_4, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_5, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_6, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_sfb_masks_7, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_0, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_1, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_2, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_3, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_4, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_5, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_6, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_sfb_dests_7, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_0, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_1, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_2, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_3, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_4, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_5, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_6, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowable_mask_7, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_0, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_1, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_2, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_3, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_4, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_5, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_6, // @[fetch-buffer.scala:45:14]
input io_enq_bits_shadowed_mask_7, // @[fetch-buffer.scala:45:14]
input io_enq_bits_cfi_idx_valid, // @[fetch-buffer.scala:45:14]
input [2:0] io_enq_bits_cfi_idx_bits, // @[fetch-buffer.scala:45:14]
input [2:0] io_enq_bits_cfi_type, // @[fetch-buffer.scala:45:14]
input io_enq_bits_cfi_is_call, // @[fetch-buffer.scala:45:14]
input io_enq_bits_cfi_is_ret, // @[fetch-buffer.scala:45:14]
input io_enq_bits_cfi_npc_plus4, // @[fetch-buffer.scala:45:14]
input [39:0] io_enq_bits_ras_top, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_ftq_idx, // @[fetch-buffer.scala:45:14]
input [7:0] io_enq_bits_mask, // @[fetch-buffer.scala:45:14]
input [7:0] io_enq_bits_br_mask, // @[fetch-buffer.scala:45:14]
input [63:0] io_enq_bits_ghist_old_history, // @[fetch-buffer.scala:45:14]
input io_enq_bits_ghist_current_saw_branch_not_taken, // @[fetch-buffer.scala:45:14]
input io_enq_bits_ghist_new_saw_branch_not_taken, // @[fetch-buffer.scala:45:14]
input io_enq_bits_ghist_new_saw_branch_taken, // @[fetch-buffer.scala:45:14]
input [4:0] io_enq_bits_ghist_ras_idx, // @[fetch-buffer.scala:45:14]
input io_enq_bits_lhist_0, // @[fetch-buffer.scala:45:14]
input io_enq_bits_lhist_1, // @[fetch-buffer.scala:45:14]
input io_enq_bits_xcpt_pf_if, // @[fetch-buffer.scala:45:14]
input io_enq_bits_xcpt_ae_if, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_0, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_1, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_2, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_3, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_4, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_5, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_6, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_debug_if_oh_7, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_0, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_1, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_2, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_3, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_4, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_5, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_6, // @[fetch-buffer.scala:45:14]
input io_enq_bits_bp_xcpt_if_oh_7, // @[fetch-buffer.scala:45:14]
input io_enq_bits_end_half_valid, // @[fetch-buffer.scala:45:14]
input [15:0] io_enq_bits_end_half_bits, // @[fetch-buffer.scala:45:14]
input [119:0] io_enq_bits_bpd_meta_0, // @[fetch-buffer.scala:45:14]
input [119:0] io_enq_bits_bpd_meta_1, // @[fetch-buffer.scala:45:14]
input [1:0] io_enq_bits_fsrc, // @[fetch-buffer.scala:45:14]
input [1:0] io_enq_bits_tsrc, // @[fetch-buffer.scala:45:14]
input io_deq_ready, // @[fetch-buffer.scala:45:14]
output io_deq_valid, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_valid, // @[fetch-buffer.scala:45:14]
output [31:0] io_deq_bits_uops_0_bits_inst, // @[fetch-buffer.scala:45:14]
output [31:0] io_deq_bits_uops_0_bits_debug_inst, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_is_rvc, // @[fetch-buffer.scala:45:14]
output [39:0] io_deq_bits_uops_0_bits_debug_pc, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_is_sfb, // @[fetch-buffer.scala:45:14]
output [4:0] io_deq_bits_uops_0_bits_ftq_idx, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_edge_inst, // @[fetch-buffer.scala:45:14]
output [5:0] io_deq_bits_uops_0_bits_pc_lob, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_taken, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_xcpt_pf_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_xcpt_ae_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_bp_debug_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_0_bits_bp_xcpt_if, // @[fetch-buffer.scala:45:14]
output [1:0] io_deq_bits_uops_0_bits_debug_fsrc, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_valid, // @[fetch-buffer.scala:45:14]
output [31:0] io_deq_bits_uops_1_bits_inst, // @[fetch-buffer.scala:45:14]
output [31:0] io_deq_bits_uops_1_bits_debug_inst, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_is_rvc, // @[fetch-buffer.scala:45:14]
output [39:0] io_deq_bits_uops_1_bits_debug_pc, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_is_sfb, // @[fetch-buffer.scala:45:14]
output [4:0] io_deq_bits_uops_1_bits_ftq_idx, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_edge_inst, // @[fetch-buffer.scala:45:14]
output [5:0] io_deq_bits_uops_1_bits_pc_lob, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_taken, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_xcpt_pf_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_xcpt_ae_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_bp_debug_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_1_bits_bp_xcpt_if, // @[fetch-buffer.scala:45:14]
output [1:0] io_deq_bits_uops_1_bits_debug_fsrc, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_valid, // @[fetch-buffer.scala:45:14]
output [31:0] io_deq_bits_uops_2_bits_inst, // @[fetch-buffer.scala:45:14]
output [31:0] io_deq_bits_uops_2_bits_debug_inst, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_is_rvc, // @[fetch-buffer.scala:45:14]
output [39:0] io_deq_bits_uops_2_bits_debug_pc, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_is_sfb, // @[fetch-buffer.scala:45:14]
output [4:0] io_deq_bits_uops_2_bits_ftq_idx, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_edge_inst, // @[fetch-buffer.scala:45:14]
output [5:0] io_deq_bits_uops_2_bits_pc_lob, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_taken, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_xcpt_pf_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_xcpt_ae_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_bp_debug_if, // @[fetch-buffer.scala:45:14]
output io_deq_bits_uops_2_bits_bp_xcpt_if, // @[fetch-buffer.scala:45:14]
output [1:0] io_deq_bits_uops_2_bits_debug_fsrc, // @[fetch-buffer.scala:45:14]
input io_clear // @[fetch-buffer.scala:45:14]
);
wire io_enq_valid_0 = io_enq_valid; // @[fetch-buffer.scala:40:7]
wire [39:0] io_enq_bits_pc_0 = io_enq_bits_pc; // @[fetch-buffer.scala:40:7]
wire [39:0] io_enq_bits_next_pc_0 = io_enq_bits_next_pc; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_edge_inst_0_0 = io_enq_bits_edge_inst_0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_edge_inst_1_0 = io_enq_bits_edge_inst_1; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_0_0 = io_enq_bits_insts_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_1_0 = io_enq_bits_insts_1; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_2_0 = io_enq_bits_insts_2; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_3_0 = io_enq_bits_insts_3; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_4_0 = io_enq_bits_insts_4; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_5_0 = io_enq_bits_insts_5; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_6_0 = io_enq_bits_insts_6; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_insts_7_0 = io_enq_bits_insts_7; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_0_0 = io_enq_bits_exp_insts_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_1_0 = io_enq_bits_exp_insts_1; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_2_0 = io_enq_bits_exp_insts_2; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_3_0 = io_enq_bits_exp_insts_3; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_4_0 = io_enq_bits_exp_insts_4; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_5_0 = io_enq_bits_exp_insts_5; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_6_0 = io_enq_bits_exp_insts_6; // @[fetch-buffer.scala:40:7]
wire [31:0] io_enq_bits_exp_insts_7_0 = io_enq_bits_exp_insts_7; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_0_0 = io_enq_bits_sfb_masks_0; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_1_0 = io_enq_bits_sfb_masks_1; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_2_0 = io_enq_bits_sfb_masks_2; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_3_0 = io_enq_bits_sfb_masks_3; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_4_0 = io_enq_bits_sfb_masks_4; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_5_0 = io_enq_bits_sfb_masks_5; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_6_0 = io_enq_bits_sfb_masks_6; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_sfb_masks_7_0 = io_enq_bits_sfb_masks_7; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_0_0 = io_enq_bits_sfb_dests_0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_1_0 = io_enq_bits_sfb_dests_1; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_2_0 = io_enq_bits_sfb_dests_2; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_3_0 = io_enq_bits_sfb_dests_3; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_4_0 = io_enq_bits_sfb_dests_4; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_5_0 = io_enq_bits_sfb_dests_5; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_6_0 = io_enq_bits_sfb_dests_6; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_sfb_dests_7_0 = io_enq_bits_sfb_dests_7; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_0_0 = io_enq_bits_shadowable_mask_0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_1_0 = io_enq_bits_shadowable_mask_1; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_2_0 = io_enq_bits_shadowable_mask_2; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_3_0 = io_enq_bits_shadowable_mask_3; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_4_0 = io_enq_bits_shadowable_mask_4; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_5_0 = io_enq_bits_shadowable_mask_5; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_6_0 = io_enq_bits_shadowable_mask_6; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowable_mask_7_0 = io_enq_bits_shadowable_mask_7; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_0_0 = io_enq_bits_shadowed_mask_0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_1_0 = io_enq_bits_shadowed_mask_1; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_2_0 = io_enq_bits_shadowed_mask_2; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_3_0 = io_enq_bits_shadowed_mask_3; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_4_0 = io_enq_bits_shadowed_mask_4; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_5_0 = io_enq_bits_shadowed_mask_5; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_6_0 = io_enq_bits_shadowed_mask_6; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_shadowed_mask_7_0 = io_enq_bits_shadowed_mask_7; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_cfi_idx_valid_0 = io_enq_bits_cfi_idx_valid; // @[fetch-buffer.scala:40:7]
wire [2:0] io_enq_bits_cfi_idx_bits_0 = io_enq_bits_cfi_idx_bits; // @[fetch-buffer.scala:40:7]
wire [2:0] io_enq_bits_cfi_type_0 = io_enq_bits_cfi_type; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_cfi_is_call_0 = io_enq_bits_cfi_is_call; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_cfi_is_ret_0 = io_enq_bits_cfi_is_ret; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_cfi_npc_plus4_0 = io_enq_bits_cfi_npc_plus4; // @[fetch-buffer.scala:40:7]
wire [39:0] io_enq_bits_ras_top_0 = io_enq_bits_ras_top; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_ftq_idx_0 = io_enq_bits_ftq_idx; // @[fetch-buffer.scala:40:7]
wire [7:0] io_enq_bits_mask_0 = io_enq_bits_mask; // @[fetch-buffer.scala:40:7]
wire [7:0] io_enq_bits_br_mask_0 = io_enq_bits_br_mask; // @[fetch-buffer.scala:40:7]
wire [63:0] io_enq_bits_ghist_old_history_0 = io_enq_bits_ghist_old_history; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_ghist_current_saw_branch_not_taken_0 = io_enq_bits_ghist_current_saw_branch_not_taken; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_ghist_new_saw_branch_not_taken_0 = io_enq_bits_ghist_new_saw_branch_not_taken; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_ghist_new_saw_branch_taken_0 = io_enq_bits_ghist_new_saw_branch_taken; // @[fetch-buffer.scala:40:7]
wire [4:0] io_enq_bits_ghist_ras_idx_0 = io_enq_bits_ghist_ras_idx; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_lhist_0_0 = io_enq_bits_lhist_0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_lhist_1_0 = io_enq_bits_lhist_1; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_xcpt_pf_if_0 = io_enq_bits_xcpt_pf_if; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_xcpt_ae_if_0 = io_enq_bits_xcpt_ae_if; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_0_0 = io_enq_bits_bp_debug_if_oh_0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_1_0 = io_enq_bits_bp_debug_if_oh_1; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_2_0 = io_enq_bits_bp_debug_if_oh_2; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_3_0 = io_enq_bits_bp_debug_if_oh_3; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_4_0 = io_enq_bits_bp_debug_if_oh_4; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_5_0 = io_enq_bits_bp_debug_if_oh_5; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_6_0 = io_enq_bits_bp_debug_if_oh_6; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_debug_if_oh_7_0 = io_enq_bits_bp_debug_if_oh_7; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_0_0 = io_enq_bits_bp_xcpt_if_oh_0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_1_0 = io_enq_bits_bp_xcpt_if_oh_1; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_2_0 = io_enq_bits_bp_xcpt_if_oh_2; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_3_0 = io_enq_bits_bp_xcpt_if_oh_3; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_4_0 = io_enq_bits_bp_xcpt_if_oh_4; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_5_0 = io_enq_bits_bp_xcpt_if_oh_5; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_6_0 = io_enq_bits_bp_xcpt_if_oh_6; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_bp_xcpt_if_oh_7_0 = io_enq_bits_bp_xcpt_if_oh_7; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_end_half_valid_0 = io_enq_bits_end_half_valid; // @[fetch-buffer.scala:40:7]
wire [15:0] io_enq_bits_end_half_bits_0 = io_enq_bits_end_half_bits; // @[fetch-buffer.scala:40:7]
wire [119:0] io_enq_bits_bpd_meta_0_0 = io_enq_bits_bpd_meta_0; // @[fetch-buffer.scala:40:7]
wire [119:0] io_enq_bits_bpd_meta_1_0 = io_enq_bits_bpd_meta_1; // @[fetch-buffer.scala:40:7]
wire [1:0] io_enq_bits_fsrc_0 = io_enq_bits_fsrc; // @[fetch-buffer.scala:40:7]
wire [1:0] io_enq_bits_tsrc_0 = io_enq_bits_tsrc; // @[fetch-buffer.scala:40:7]
wire io_deq_ready_0 = io_deq_ready; // @[fetch-buffer.scala:40:7]
wire io_clear_0 = io_clear; // @[fetch-buffer.scala:40:7]
wire _tail_collisions_T_6 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_10 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_18 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_22 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_30 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_34 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_42 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_46 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_54 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_58 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_66 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_70 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_78 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_82 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_90 = 1'h1; // @[fetch-buffer.scala:155:61]
wire _tail_collisions_T_94 = 1'h1; // @[fetch-buffer.scala:155:61]
wire [5:0] io_deq_bits_uops_0_bits_ldst = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_0_bits_lrs1 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_0_bits_lrs2 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_0_bits_lrs3 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_1_bits_ldst = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_1_bits_lrs1 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_1_bits_lrs2 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_1_bits_lrs3 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_2_bits_ldst = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_2_bits_lrs1 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_2_bits_lrs2 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_2_bits_lrs3 = 6'h0; // @[fetch-buffer.scala:40:7]
wire [5:0] deq_vec_0_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_0_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_1_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_2_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_3_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_4_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_5_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_6_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_0_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_0_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_0_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_0_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_1_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_1_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_1_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_1_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_2_ldst = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_2_lrs1 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_2_lrs2 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] deq_vec_7_2_lrs3 = 6'h0; // @[fetch-buffer.scala:59:21]
wire [5:0] in_uops_0_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_0_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_0_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_0_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_1_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_1_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_1_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_1_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_2_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_2_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_2_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_2_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_3_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_3_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_3_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_3_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_4_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_4_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_4_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_4_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_5_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_5_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_5_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_5_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_6_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_6_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_6_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_6_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_7_ldst = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_7_lrs1 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_7_lrs2 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_7_lrs3 = 6'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] io_deq_bits_uops_0_bits_exc_cause = 64'h0; // @[fetch-buffer.scala:40:7]
wire [63:0] io_deq_bits_uops_1_bits_exc_cause = 64'h0; // @[fetch-buffer.scala:40:7]
wire [63:0] io_deq_bits_uops_2_bits_exc_cause = 64'h0; // @[fetch-buffer.scala:40:7]
wire [63:0] deq_vec_0_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_0_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_0_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_1_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_1_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_1_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_2_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_2_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_2_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_3_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_3_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_3_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_4_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_4_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_4_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_5_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_5_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_5_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_6_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_6_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_6_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_7_0_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_7_1_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] deq_vec_7_2_exc_cause = 64'h0; // @[fetch-buffer.scala:59:21]
wire [63:0] in_uops_0_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] in_uops_1_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] in_uops_2_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] in_uops_3_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] in_uops_4_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] in_uops_5_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] in_uops_6_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [63:0] in_uops_7_exc_cause = 64'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] io_deq_bits_uops_0_bits_csr_addr = 12'h0; // @[fetch-buffer.scala:40:7]
wire [11:0] io_deq_bits_uops_1_bits_csr_addr = 12'h0; // @[fetch-buffer.scala:40:7]
wire [11:0] io_deq_bits_uops_2_bits_csr_addr = 12'h0; // @[fetch-buffer.scala:40:7]
wire [11:0] deq_vec_0_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_0_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_0_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_1_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_1_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_1_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_2_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_2_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_2_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_3_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_3_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_3_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_4_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_4_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_4_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_5_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_5_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_5_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_6_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_6_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_6_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_7_0_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_7_1_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] deq_vec_7_2_csr_addr = 12'h0; // @[fetch-buffer.scala:59:21]
wire [11:0] in_uops_0_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] in_uops_1_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] in_uops_2_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] in_uops_3_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] in_uops_4_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] in_uops_5_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] in_uops_6_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [11:0] in_uops_7_csr_addr = 12'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] io_deq_bits_uops_0_bits_imm_packed = 20'h0; // @[fetch-buffer.scala:40:7]
wire [19:0] io_deq_bits_uops_1_bits_imm_packed = 20'h0; // @[fetch-buffer.scala:40:7]
wire [19:0] io_deq_bits_uops_2_bits_imm_packed = 20'h0; // @[fetch-buffer.scala:40:7]
wire [19:0] deq_vec_0_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_0_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_0_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_1_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_1_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_1_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_2_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_2_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_2_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_3_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_3_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_3_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_4_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_4_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_4_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_5_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_5_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_5_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_6_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_6_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_6_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_7_0_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_7_1_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] deq_vec_7_2_imm_packed = 20'h0; // @[fetch-buffer.scala:59:21]
wire [19:0] in_uops_0_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] in_uops_1_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] in_uops_2_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] in_uops_3_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] in_uops_4_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] in_uops_5_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] in_uops_6_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [19:0] in_uops_7_imm_packed = 20'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] io_deq_bits_uops_0_bits_br_mask = 16'h0; // @[fetch-buffer.scala:40:7]
wire [15:0] io_deq_bits_uops_1_bits_br_mask = 16'h0; // @[fetch-buffer.scala:40:7]
wire [15:0] io_deq_bits_uops_2_bits_br_mask = 16'h0; // @[fetch-buffer.scala:40:7]
wire [15:0] deq_vec_0_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_0_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_0_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_1_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_1_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_1_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_2_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_2_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_2_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_3_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_3_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_3_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_4_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_4_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_4_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_5_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_5_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_5_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_6_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_6_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_6_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_7_0_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_7_1_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] deq_vec_7_2_br_mask = 16'h0; // @[fetch-buffer.scala:59:21]
wire [15:0] in_uops_0_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] in_uops_1_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] in_uops_2_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] in_uops_3_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] in_uops_4_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] in_uops_5_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] in_uops_6_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire [15:0] in_uops_7_br_mask = 16'h0; // @[fetch-buffer.scala:88:21]
wire io_enq_bits_sfbs_0 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_sfbs_1 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_sfbs_2 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_sfbs_3 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_sfbs_4 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_sfbs_5 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_sfbs_6 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_enq_bits_sfbs_7 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_br = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_jalr = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_jal = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_prs1_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_prs2_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_prs3_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_ppred_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_exception = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_bypassable = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_mem_signed = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_fence = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_fencei = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_amo = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_uses_ldq = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_uses_stq = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_unique = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_flush_on_commit = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_ldst_val = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_frs3_en = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_fp_val = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_fp_single = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_br = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_jalr = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_jal = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_prs1_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_prs2_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_prs3_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_ppred_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_exception = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_bypassable = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_mem_signed = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_fence = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_fencei = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_amo = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_uses_ldq = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_uses_stq = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_unique = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_flush_on_commit = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_ldst_val = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_frs3_en = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_fp_val = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_fp_single = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_br = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_jalr = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_jal = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_prs1_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_prs2_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_prs3_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_ppred_busy = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_exception = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_bypassable = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_mem_signed = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_fence = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_fencei = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_amo = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_uses_ldq = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_uses_stq = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_unique = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_flush_on_commit = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_ldst_val = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_frs3_en = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_fp_val = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_fp_single = 1'h0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:40:7]
wire deq_vec_0_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_0_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_1_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_2_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_3_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_4_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_5_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_6_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_br = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_jalr = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_jal = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_exception = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_bypassable = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_mem_signed = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_fence = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_fencei = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_amo = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_uses_stq = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_is_unique = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_ldst_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_frs3_en = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_fp_val = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_fp_single = 1'h0; // @[fetch-buffer.scala:59:21]
wire deq_vec_7_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:59:21]
wire in_uops_0_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_edge_inst = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_1_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_edge_inst = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_2_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_edge_inst = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_3_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_4_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_edge_inst = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_5_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_edge_inst = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_6_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_ctrl_fcn_dw = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_ctrl_is_load = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_ctrl_is_sta = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_ctrl_is_std = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_iw_p1_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_iw_p2_poisoned = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_br = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_jalr = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_jal = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_edge_inst = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_prs1_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_prs2_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_prs3_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_ppred_busy = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_exception = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_bypassable = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_mem_signed = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_fence = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_fencei = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_amo = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_uses_ldq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_uses_stq = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_sys_pc2epc = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_unique = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_flush_on_commit = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_ldst_is_rs1 = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_ldst_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_frs3_en = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_fp_val = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_fp_single = 1'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_7_xcpt_ma_if = 1'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] io_deq_bits_uops_0_bits_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_0_bits_ldq_idx = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_0_bits_stq_idx = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_0_bits_ppred = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_0_bits_mem_cmd = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_1_bits_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_1_bits_ldq_idx = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_1_bits_stq_idx = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_1_bits_ppred = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_1_bits_mem_cmd = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_2_bits_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_2_bits_ldq_idx = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_2_bits_stq_idx = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_2_bits_ppred = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_2_bits_mem_cmd = 5'h0; // @[fetch-buffer.scala:40:7]
wire [4:0] deq_vec_0_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_0_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_1_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_2_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_3_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_4_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_5_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_6_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_0_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_0_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_1_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_1_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_2_stq_idx = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_2_ppred = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] deq_vec_7_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:59:21]
wire [4:0] in_uops_0_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_0_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_0_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_0_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_0_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_1_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_1_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_1_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_1_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_1_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_2_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_2_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_2_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_2_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_2_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_3_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_3_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_3_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_3_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_3_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_4_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_4_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_4_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_4_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_4_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_5_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_5_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_5_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_5_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_5_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_6_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_6_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_6_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_6_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_6_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_7_ctrl_op_fcn = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_7_ldq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_7_stq_idx = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_7_ppred = 5'h0; // @[fetch-buffer.scala:88:21]
wire [4:0] in_uops_7_mem_cmd = 5'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] io_deq_bits_uops_0_bits_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_iw_state = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_rxq_idx = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_mem_size = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_dst_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_debug_tsrc = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_iw_state = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_rxq_idx = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_mem_size = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_dst_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_debug_tsrc = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_iw_state = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_rxq_idx = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_mem_size = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_dst_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_debug_tsrc = 2'h0; // @[fetch-buffer.scala:40:7]
wire [1:0] deq_vec_0_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_0_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_1_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_2_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_3_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_4_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_5_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_6_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_iw_state = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_mem_size = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] deq_vec_7_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:59:21]
wire [1:0] in_uops_0_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_0_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_0_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_0_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_0_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_0_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_0_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_0_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_1_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_2_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_3_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_4_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_5_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_6_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_ctrl_op1_sel = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_iw_state = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_rxq_idx = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_mem_size = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_dst_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_lrs1_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_lrs2_rtype = 2'h0; // @[fetch-buffer.scala:88:21]
wire [1:0] in_uops_7_debug_tsrc = 2'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] io_deq_bits_uops_0_bits_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:40:7]
wire [3:0] io_deq_bits_uops_0_bits_br_tag = 4'h0; // @[fetch-buffer.scala:40:7]
wire [3:0] io_deq_bits_uops_1_bits_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:40:7]
wire [3:0] io_deq_bits_uops_1_bits_br_tag = 4'h0; // @[fetch-buffer.scala:40:7]
wire [3:0] io_deq_bits_uops_2_bits_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:40:7]
wire [3:0] io_deq_bits_uops_2_bits_br_tag = 4'h0; // @[fetch-buffer.scala:40:7]
wire [3:0] deq_vec_0_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_0_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_0_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_0_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_0_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_0_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_1_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_1_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_1_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_1_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_1_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_1_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_2_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_2_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_2_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_2_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_2_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_2_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_3_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_3_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_3_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_3_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_3_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_3_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_4_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_4_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_4_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_4_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_4_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_4_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_5_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_5_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_5_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_5_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_5_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_5_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_6_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_6_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_6_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_6_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_6_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_6_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_7_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_7_0_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_7_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_7_1_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_7_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] deq_vec_7_2_br_tag = 4'h0; // @[fetch-buffer.scala:59:21]
wire [3:0] in_uops_0_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_0_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_1_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_1_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_2_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_2_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_3_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_3_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_4_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_4_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_5_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_5_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_6_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_6_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_7_ctrl_br_type = 4'h0; // @[fetch-buffer.scala:88:21]
wire [3:0] in_uops_7_br_tag = 4'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] io_deq_bits_uops_0_bits_fu_code = 10'h0; // @[fetch-buffer.scala:40:7]
wire [9:0] io_deq_bits_uops_1_bits_fu_code = 10'h0; // @[fetch-buffer.scala:40:7]
wire [9:0] io_deq_bits_uops_2_bits_fu_code = 10'h0; // @[fetch-buffer.scala:40:7]
wire [9:0] deq_vec_0_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_0_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_0_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_1_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_1_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_1_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_2_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_2_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_2_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_3_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_3_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_3_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_4_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_4_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_4_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_5_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_5_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_5_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_6_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_6_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_6_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_7_0_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_7_1_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] deq_vec_7_2_fu_code = 10'h0; // @[fetch-buffer.scala:59:21]
wire [9:0] in_uops_0_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] in_uops_1_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] in_uops_2_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] in_uops_3_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] in_uops_4_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] in_uops_5_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] in_uops_6_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [9:0] in_uops_7_fu_code = 10'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] io_deq_bits_uops_0_bits_iq_type = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_0_bits_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_0_bits_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_0_bits_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_1_bits_iq_type = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_1_bits_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_1_bits_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_1_bits_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_2_bits_iq_type = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_2_bits_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_2_bits_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] io_deq_bits_uops_2_bits_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:40:7]
wire [2:0] deq_vec_0_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_0_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_1_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_2_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_3_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_4_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_5_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_6_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_0_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_1_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_2_iq_type = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] deq_vec_7_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:59:21]
wire [2:0] in_uops_0_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_0_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_0_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_0_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_1_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_1_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_1_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_1_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_2_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_2_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_2_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_2_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_3_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_3_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_3_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_3_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_4_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_4_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_4_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_4_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_5_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_5_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_5_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_5_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_6_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_6_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_6_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_6_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_7_iq_type = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_7_ctrl_op2_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_7_ctrl_imm_sel = 3'h0; // @[fetch-buffer.scala:88:21]
wire [2:0] in_uops_7_ctrl_csr_cmd = 3'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] io_deq_bits_uops_0_bits_uopc = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_0_bits_rob_idx = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_0_bits_pdst = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_0_bits_prs1 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_0_bits_prs2 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_0_bits_prs3 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_0_bits_stale_pdst = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_1_bits_uopc = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_1_bits_rob_idx = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_1_bits_pdst = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_1_bits_prs1 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_1_bits_prs2 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_1_bits_prs3 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_1_bits_stale_pdst = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_2_bits_uopc = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_2_bits_rob_idx = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_2_bits_pdst = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_2_bits_prs1 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_2_bits_prs2 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_2_bits_prs3 = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] io_deq_bits_uops_2_bits_stale_pdst = 7'h0; // @[fetch-buffer.scala:40:7]
wire [6:0] deq_vec_0_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_0_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_1_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_2_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_3_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_4_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_5_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_6_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_0_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_0_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_0_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_0_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_0_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_0_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_1_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_1_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_1_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_1_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_1_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_1_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_2_uopc = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_2_rob_idx = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_2_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_2_prs1 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_2_prs2 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_2_prs3 = 7'h0; // @[fetch-buffer.scala:59:21]
wire [6:0] deq_vec_7_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:59:21]
wire do_enq; // @[fetch-buffer.scala:82:16]
wire [6:0] in_uops_0_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_0_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_0_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_0_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_0_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_0_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_0_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_1_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_1_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_1_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_1_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_1_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_1_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_1_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_2_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_2_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_2_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_2_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_2_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_2_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_2_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_3_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_3_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_3_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_3_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_3_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_3_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_3_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_4_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_4_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_4_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_4_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_4_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_4_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_4_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_5_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_5_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_5_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_5_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_5_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_5_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_5_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_6_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_6_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_6_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_6_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_6_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_6_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_6_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_7_uopc = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_7_rob_idx = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_7_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_7_prs1 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_7_prs2 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_7_prs3 = 7'h0; // @[fetch-buffer.scala:88:21]
wire [6:0] in_uops_7_stale_pdst = 7'h0; // @[fetch-buffer.scala:88:21]
wire in_uops_0_edge_inst = io_enq_bits_edge_inst_0_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_4_edge_inst = io_enq_bits_edge_inst_1_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_0_debug_inst = io_enq_bits_insts_0_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_1_debug_inst = io_enq_bits_insts_1_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_2_debug_inst = io_enq_bits_insts_2_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_3_debug_inst = io_enq_bits_insts_3_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_4_debug_inst = io_enq_bits_insts_4_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_5_debug_inst = io_enq_bits_insts_5_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_6_debug_inst = io_enq_bits_insts_6_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_7_debug_inst = io_enq_bits_insts_7_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_0_inst = io_enq_bits_exp_insts_0_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_1_inst = io_enq_bits_exp_insts_1_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_2_inst = io_enq_bits_exp_insts_2_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_3_inst = io_enq_bits_exp_insts_3_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_4_inst = io_enq_bits_exp_insts_4_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_5_inst = io_enq_bits_exp_insts_5_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_6_inst = io_enq_bits_exp_insts_6_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [31:0] in_uops_7_inst = io_enq_bits_exp_insts_7_0; // @[fetch-buffer.scala:40:7, :88:21]
wire _in_uops_0_is_sfb_T = io_enq_bits_shadowed_mask_0_0; // @[fetch-buffer.scala:40:7, :103:56]
wire _in_uops_1_is_sfb_T = io_enq_bits_shadowed_mask_1_0; // @[fetch-buffer.scala:40:7, :103:56]
wire _in_uops_2_is_sfb_T = io_enq_bits_shadowed_mask_2_0; // @[fetch-buffer.scala:40:7, :103:56]
wire _in_uops_3_is_sfb_T = io_enq_bits_shadowed_mask_3_0; // @[fetch-buffer.scala:40:7, :103:56]
wire _in_uops_4_is_sfb_T = io_enq_bits_shadowed_mask_4_0; // @[fetch-buffer.scala:40:7, :103:56]
wire _in_uops_5_is_sfb_T = io_enq_bits_shadowed_mask_5_0; // @[fetch-buffer.scala:40:7, :103:56]
wire _in_uops_6_is_sfb_T = io_enq_bits_shadowed_mask_6_0; // @[fetch-buffer.scala:40:7, :103:56]
wire _in_uops_7_is_sfb_T = io_enq_bits_shadowed_mask_7_0; // @[fetch-buffer.scala:40:7, :103:56]
wire [4:0] in_uops_0_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [4:0] in_uops_1_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [4:0] in_uops_2_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [4:0] in_uops_3_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [4:0] in_uops_4_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [4:0] in_uops_5_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [4:0] in_uops_6_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [4:0] in_uops_7_ftq_idx = io_enq_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_0_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_1_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_2_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_3_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_4_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_5_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_6_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_7_xcpt_pf_if = io_enq_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_0_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_1_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_2_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_3_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_4_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_5_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_6_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_7_xcpt_ae_if = io_enq_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_0_bp_debug_if = io_enq_bits_bp_debug_if_oh_0_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_1_bp_debug_if = io_enq_bits_bp_debug_if_oh_1_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_2_bp_debug_if = io_enq_bits_bp_debug_if_oh_2_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_3_bp_debug_if = io_enq_bits_bp_debug_if_oh_3_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_4_bp_debug_if = io_enq_bits_bp_debug_if_oh_4_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_5_bp_debug_if = io_enq_bits_bp_debug_if_oh_5_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_6_bp_debug_if = io_enq_bits_bp_debug_if_oh_6_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_7_bp_debug_if = io_enq_bits_bp_debug_if_oh_7_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_0_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_0_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_1_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_1_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_2_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_2_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_3_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_3_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_4_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_4_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_5_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_5_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_6_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_6_0; // @[fetch-buffer.scala:40:7, :88:21]
wire in_uops_7_bp_xcpt_if = io_enq_bits_bp_xcpt_if_oh_7_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_0_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_1_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_2_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_3_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_4_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_5_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_6_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire [1:0] in_uops_7_debug_fsrc = io_enq_bits_fsrc_0; // @[fetch-buffer.scala:40:7, :88:21]
wire _io_deq_valid_T_1; // @[fetch-buffer.scala:170:38]
wire io_enq_ready_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_deq_bits_uops_0_bits_inst_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_deq_bits_uops_0_bits_debug_inst_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_rvc_0; // @[fetch-buffer.scala:40:7]
wire [39:0] io_deq_bits_uops_0_bits_debug_pc_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_is_sfb_0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_0_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_edge_inst_0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_0_bits_pc_lob_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_taken_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_bp_debug_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_bits_bp_xcpt_if_0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_0_bits_debug_fsrc_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_0_valid_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_deq_bits_uops_1_bits_inst_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_deq_bits_uops_1_bits_debug_inst_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_rvc_0; // @[fetch-buffer.scala:40:7]
wire [39:0] io_deq_bits_uops_1_bits_debug_pc_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_is_sfb_0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_1_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_edge_inst_0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_1_bits_pc_lob_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_taken_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_bp_debug_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_bits_bp_xcpt_if_0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_1_bits_debug_fsrc_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_1_valid_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_deq_bits_uops_2_bits_inst_0; // @[fetch-buffer.scala:40:7]
wire [31:0] io_deq_bits_uops_2_bits_debug_inst_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_rvc_0; // @[fetch-buffer.scala:40:7]
wire [39:0] io_deq_bits_uops_2_bits_debug_pc_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_is_sfb_0; // @[fetch-buffer.scala:40:7]
wire [4:0] io_deq_bits_uops_2_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_edge_inst_0; // @[fetch-buffer.scala:40:7]
wire [5:0] io_deq_bits_uops_2_bits_pc_lob_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_taken_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_bp_debug_if_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_bits_bp_xcpt_if_0; // @[fetch-buffer.scala:40:7]
wire [1:0] io_deq_bits_uops_2_bits_debug_fsrc_0; // @[fetch-buffer.scala:40:7]
wire io_deq_bits_uops_2_valid_0; // @[fetch-buffer.scala:40:7]
wire io_deq_valid_0; // @[fetch-buffer.scala:40:7]
reg [31:0] fb_uop_ram_0_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_0_0_inst = fb_uop_ram_0_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_0_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_0_0_debug_inst = fb_uop_ram_0_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_is_rvc = fb_uop_ram_0_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_0_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_0_0_debug_pc = fb_uop_ram_0_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_is_sfb = fb_uop_ram_0_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_0_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_0_0_ftq_idx = fb_uop_ram_0_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_edge_inst = fb_uop_ram_0_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_0_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_0_0_pc_lob = fb_uop_ram_0_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_taken = fb_uop_ram_0_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_xcpt_pf_if = fb_uop_ram_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_xcpt_ae_if = fb_uop_ram_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_bp_debug_if = fb_uop_ram_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_0_bp_xcpt_if = fb_uop_ram_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_0_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_0_0_debug_fsrc = fb_uop_ram_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_1_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_0_1_inst = fb_uop_ram_1_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_1_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_0_1_debug_inst = fb_uop_ram_1_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_is_rvc = fb_uop_ram_1_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_1_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_0_1_debug_pc = fb_uop_ram_1_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_is_sfb = fb_uop_ram_1_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_1_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_0_1_ftq_idx = fb_uop_ram_1_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_edge_inst = fb_uop_ram_1_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_1_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_0_1_pc_lob = fb_uop_ram_1_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_taken = fb_uop_ram_1_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_xcpt_pf_if = fb_uop_ram_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_xcpt_ae_if = fb_uop_ram_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_bp_debug_if = fb_uop_ram_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_1_bp_xcpt_if = fb_uop_ram_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_1_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_0_1_debug_fsrc = fb_uop_ram_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_2_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_0_2_inst = fb_uop_ram_2_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_2_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_0_2_debug_inst = fb_uop_ram_2_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_is_rvc = fb_uop_ram_2_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_2_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_0_2_debug_pc = fb_uop_ram_2_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_is_sfb = fb_uop_ram_2_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_2_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_0_2_ftq_idx = fb_uop_ram_2_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_edge_inst = fb_uop_ram_2_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_2_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_0_2_pc_lob = fb_uop_ram_2_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_taken = fb_uop_ram_2_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_xcpt_pf_if = fb_uop_ram_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_xcpt_ae_if = fb_uop_ram_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_bp_debug_if = fb_uop_ram_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_0_2_bp_xcpt_if = fb_uop_ram_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_2_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_0_2_debug_fsrc = fb_uop_ram_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_3_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_1_0_inst = fb_uop_ram_3_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_3_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_1_0_debug_inst = fb_uop_ram_3_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_is_rvc = fb_uop_ram_3_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_3_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_1_0_debug_pc = fb_uop_ram_3_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_is_sfb = fb_uop_ram_3_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_3_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_1_0_ftq_idx = fb_uop_ram_3_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_edge_inst = fb_uop_ram_3_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_3_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_1_0_pc_lob = fb_uop_ram_3_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_taken = fb_uop_ram_3_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_xcpt_pf_if = fb_uop_ram_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_xcpt_ae_if = fb_uop_ram_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_bp_debug_if = fb_uop_ram_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_0_bp_xcpt_if = fb_uop_ram_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_3_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_1_0_debug_fsrc = fb_uop_ram_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_4_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_1_1_inst = fb_uop_ram_4_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_4_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_1_1_debug_inst = fb_uop_ram_4_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_is_rvc = fb_uop_ram_4_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_4_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_1_1_debug_pc = fb_uop_ram_4_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_is_sfb = fb_uop_ram_4_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_4_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_1_1_ftq_idx = fb_uop_ram_4_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_edge_inst = fb_uop_ram_4_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_4_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_1_1_pc_lob = fb_uop_ram_4_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_taken = fb_uop_ram_4_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_xcpt_pf_if = fb_uop_ram_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_xcpt_ae_if = fb_uop_ram_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_bp_debug_if = fb_uop_ram_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_1_bp_xcpt_if = fb_uop_ram_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_4_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_1_1_debug_fsrc = fb_uop_ram_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_5_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_1_2_inst = fb_uop_ram_5_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_5_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_1_2_debug_inst = fb_uop_ram_5_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_is_rvc = fb_uop_ram_5_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_5_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_1_2_debug_pc = fb_uop_ram_5_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_is_sfb = fb_uop_ram_5_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_5_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_1_2_ftq_idx = fb_uop_ram_5_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_edge_inst = fb_uop_ram_5_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_5_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_1_2_pc_lob = fb_uop_ram_5_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_taken = fb_uop_ram_5_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_xcpt_pf_if = fb_uop_ram_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_xcpt_ae_if = fb_uop_ram_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_bp_debug_if = fb_uop_ram_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_1_2_bp_xcpt_if = fb_uop_ram_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_5_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_1_2_debug_fsrc = fb_uop_ram_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_6_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_2_0_inst = fb_uop_ram_6_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_6_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_2_0_debug_inst = fb_uop_ram_6_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_is_rvc = fb_uop_ram_6_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_6_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_2_0_debug_pc = fb_uop_ram_6_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_is_sfb = fb_uop_ram_6_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_6_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_2_0_ftq_idx = fb_uop_ram_6_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_edge_inst = fb_uop_ram_6_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_6_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_2_0_pc_lob = fb_uop_ram_6_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_taken = fb_uop_ram_6_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_xcpt_pf_if = fb_uop_ram_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_xcpt_ae_if = fb_uop_ram_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_bp_debug_if = fb_uop_ram_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_0_bp_xcpt_if = fb_uop_ram_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_6_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_2_0_debug_fsrc = fb_uop_ram_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_7_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_2_1_inst = fb_uop_ram_7_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_7_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_2_1_debug_inst = fb_uop_ram_7_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_is_rvc = fb_uop_ram_7_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_7_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_2_1_debug_pc = fb_uop_ram_7_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_is_sfb = fb_uop_ram_7_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_7_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_2_1_ftq_idx = fb_uop_ram_7_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_edge_inst = fb_uop_ram_7_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_7_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_2_1_pc_lob = fb_uop_ram_7_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_taken = fb_uop_ram_7_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_xcpt_pf_if = fb_uop_ram_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_xcpt_ae_if = fb_uop_ram_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_bp_debug_if = fb_uop_ram_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_1_bp_xcpt_if = fb_uop_ram_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_7_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_2_1_debug_fsrc = fb_uop_ram_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_8_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_2_2_inst = fb_uop_ram_8_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_8_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_2_2_debug_inst = fb_uop_ram_8_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_is_rvc = fb_uop_ram_8_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_8_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_2_2_debug_pc = fb_uop_ram_8_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_is_sfb = fb_uop_ram_8_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_8_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_2_2_ftq_idx = fb_uop_ram_8_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_edge_inst = fb_uop_ram_8_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_8_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_2_2_pc_lob = fb_uop_ram_8_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_taken = fb_uop_ram_8_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_xcpt_pf_if = fb_uop_ram_8_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_xcpt_ae_if = fb_uop_ram_8_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_bp_debug_if = fb_uop_ram_8_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_8_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_2_2_bp_xcpt_if = fb_uop_ram_8_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_8_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_2_2_debug_fsrc = fb_uop_ram_8_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_9_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_3_0_inst = fb_uop_ram_9_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_9_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_3_0_debug_inst = fb_uop_ram_9_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_is_rvc = fb_uop_ram_9_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_9_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_3_0_debug_pc = fb_uop_ram_9_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_is_sfb = fb_uop_ram_9_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_9_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_3_0_ftq_idx = fb_uop_ram_9_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_edge_inst = fb_uop_ram_9_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_9_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_3_0_pc_lob = fb_uop_ram_9_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_taken = fb_uop_ram_9_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_xcpt_pf_if = fb_uop_ram_9_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_xcpt_ae_if = fb_uop_ram_9_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_bp_debug_if = fb_uop_ram_9_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_9_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_0_bp_xcpt_if = fb_uop_ram_9_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_9_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_3_0_debug_fsrc = fb_uop_ram_9_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_10_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_3_1_inst = fb_uop_ram_10_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_10_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_3_1_debug_inst = fb_uop_ram_10_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_is_rvc = fb_uop_ram_10_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_10_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_3_1_debug_pc = fb_uop_ram_10_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_is_sfb = fb_uop_ram_10_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_10_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_3_1_ftq_idx = fb_uop_ram_10_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_edge_inst = fb_uop_ram_10_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_10_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_3_1_pc_lob = fb_uop_ram_10_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_taken = fb_uop_ram_10_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_xcpt_pf_if = fb_uop_ram_10_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_xcpt_ae_if = fb_uop_ram_10_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_bp_debug_if = fb_uop_ram_10_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_10_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_1_bp_xcpt_if = fb_uop_ram_10_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_10_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_3_1_debug_fsrc = fb_uop_ram_10_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_11_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_3_2_inst = fb_uop_ram_11_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_11_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_3_2_debug_inst = fb_uop_ram_11_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_is_rvc = fb_uop_ram_11_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_11_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_3_2_debug_pc = fb_uop_ram_11_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_is_sfb = fb_uop_ram_11_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_11_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_3_2_ftq_idx = fb_uop_ram_11_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_edge_inst = fb_uop_ram_11_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_11_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_3_2_pc_lob = fb_uop_ram_11_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_taken = fb_uop_ram_11_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_xcpt_pf_if = fb_uop_ram_11_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_xcpt_ae_if = fb_uop_ram_11_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_bp_debug_if = fb_uop_ram_11_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_11_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_3_2_bp_xcpt_if = fb_uop_ram_11_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_11_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_3_2_debug_fsrc = fb_uop_ram_11_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_12_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_4_0_inst = fb_uop_ram_12_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_12_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_4_0_debug_inst = fb_uop_ram_12_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_is_rvc = fb_uop_ram_12_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_12_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_4_0_debug_pc = fb_uop_ram_12_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_is_sfb = fb_uop_ram_12_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_12_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_4_0_ftq_idx = fb_uop_ram_12_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_edge_inst = fb_uop_ram_12_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_12_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_4_0_pc_lob = fb_uop_ram_12_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_taken = fb_uop_ram_12_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_xcpt_pf_if = fb_uop_ram_12_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_xcpt_ae_if = fb_uop_ram_12_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_bp_debug_if = fb_uop_ram_12_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_12_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_0_bp_xcpt_if = fb_uop_ram_12_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_12_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_4_0_debug_fsrc = fb_uop_ram_12_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_13_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_4_1_inst = fb_uop_ram_13_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_13_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_4_1_debug_inst = fb_uop_ram_13_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_is_rvc = fb_uop_ram_13_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_13_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_4_1_debug_pc = fb_uop_ram_13_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_is_sfb = fb_uop_ram_13_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_13_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_4_1_ftq_idx = fb_uop_ram_13_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_edge_inst = fb_uop_ram_13_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_13_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_4_1_pc_lob = fb_uop_ram_13_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_taken = fb_uop_ram_13_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_xcpt_pf_if = fb_uop_ram_13_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_xcpt_ae_if = fb_uop_ram_13_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_bp_debug_if = fb_uop_ram_13_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_13_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_1_bp_xcpt_if = fb_uop_ram_13_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_13_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_4_1_debug_fsrc = fb_uop_ram_13_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_14_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_4_2_inst = fb_uop_ram_14_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_14_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_4_2_debug_inst = fb_uop_ram_14_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_is_rvc = fb_uop_ram_14_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_14_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_4_2_debug_pc = fb_uop_ram_14_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_is_sfb = fb_uop_ram_14_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_14_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_4_2_ftq_idx = fb_uop_ram_14_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_edge_inst = fb_uop_ram_14_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_14_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_4_2_pc_lob = fb_uop_ram_14_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_taken = fb_uop_ram_14_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_xcpt_pf_if = fb_uop_ram_14_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_xcpt_ae_if = fb_uop_ram_14_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_bp_debug_if = fb_uop_ram_14_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_14_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_4_2_bp_xcpt_if = fb_uop_ram_14_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_14_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_4_2_debug_fsrc = fb_uop_ram_14_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_15_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_5_0_inst = fb_uop_ram_15_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_15_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_5_0_debug_inst = fb_uop_ram_15_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_is_rvc = fb_uop_ram_15_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_15_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_5_0_debug_pc = fb_uop_ram_15_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_is_sfb = fb_uop_ram_15_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_15_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_5_0_ftq_idx = fb_uop_ram_15_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_edge_inst = fb_uop_ram_15_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_15_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_5_0_pc_lob = fb_uop_ram_15_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_taken = fb_uop_ram_15_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_xcpt_pf_if = fb_uop_ram_15_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_xcpt_ae_if = fb_uop_ram_15_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_bp_debug_if = fb_uop_ram_15_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_15_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_0_bp_xcpt_if = fb_uop_ram_15_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_15_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_5_0_debug_fsrc = fb_uop_ram_15_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_16_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_5_1_inst = fb_uop_ram_16_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_16_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_5_1_debug_inst = fb_uop_ram_16_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_is_rvc = fb_uop_ram_16_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_16_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_5_1_debug_pc = fb_uop_ram_16_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_is_sfb = fb_uop_ram_16_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_16_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_5_1_ftq_idx = fb_uop_ram_16_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_edge_inst = fb_uop_ram_16_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_16_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_5_1_pc_lob = fb_uop_ram_16_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_taken = fb_uop_ram_16_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_xcpt_pf_if = fb_uop_ram_16_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_xcpt_ae_if = fb_uop_ram_16_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_bp_debug_if = fb_uop_ram_16_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_16_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_1_bp_xcpt_if = fb_uop_ram_16_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_16_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_5_1_debug_fsrc = fb_uop_ram_16_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_17_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_5_2_inst = fb_uop_ram_17_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_17_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_5_2_debug_inst = fb_uop_ram_17_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_is_rvc = fb_uop_ram_17_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_17_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_5_2_debug_pc = fb_uop_ram_17_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_is_sfb = fb_uop_ram_17_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_17_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_5_2_ftq_idx = fb_uop_ram_17_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_edge_inst = fb_uop_ram_17_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_17_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_5_2_pc_lob = fb_uop_ram_17_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_taken = fb_uop_ram_17_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_xcpt_pf_if = fb_uop_ram_17_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_xcpt_ae_if = fb_uop_ram_17_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_bp_debug_if = fb_uop_ram_17_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_17_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_5_2_bp_xcpt_if = fb_uop_ram_17_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_17_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_5_2_debug_fsrc = fb_uop_ram_17_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_18_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_6_0_inst = fb_uop_ram_18_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_18_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_6_0_debug_inst = fb_uop_ram_18_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_is_rvc = fb_uop_ram_18_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_18_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_6_0_debug_pc = fb_uop_ram_18_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_is_sfb = fb_uop_ram_18_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_18_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_6_0_ftq_idx = fb_uop_ram_18_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_edge_inst = fb_uop_ram_18_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_18_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_6_0_pc_lob = fb_uop_ram_18_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_taken = fb_uop_ram_18_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_xcpt_pf_if = fb_uop_ram_18_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_xcpt_ae_if = fb_uop_ram_18_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_bp_debug_if = fb_uop_ram_18_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_18_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_0_bp_xcpt_if = fb_uop_ram_18_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_18_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_6_0_debug_fsrc = fb_uop_ram_18_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_19_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_6_1_inst = fb_uop_ram_19_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_19_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_6_1_debug_inst = fb_uop_ram_19_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_is_rvc = fb_uop_ram_19_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_19_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_6_1_debug_pc = fb_uop_ram_19_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_is_sfb = fb_uop_ram_19_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_19_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_6_1_ftq_idx = fb_uop_ram_19_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_edge_inst = fb_uop_ram_19_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_19_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_6_1_pc_lob = fb_uop_ram_19_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_taken = fb_uop_ram_19_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_xcpt_pf_if = fb_uop_ram_19_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_xcpt_ae_if = fb_uop_ram_19_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_bp_debug_if = fb_uop_ram_19_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_19_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_1_bp_xcpt_if = fb_uop_ram_19_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_19_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_6_1_debug_fsrc = fb_uop_ram_19_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_20_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_6_2_inst = fb_uop_ram_20_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_20_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_6_2_debug_inst = fb_uop_ram_20_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_is_rvc = fb_uop_ram_20_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_20_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_6_2_debug_pc = fb_uop_ram_20_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_is_sfb = fb_uop_ram_20_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_20_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_6_2_ftq_idx = fb_uop_ram_20_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_edge_inst = fb_uop_ram_20_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_20_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_6_2_pc_lob = fb_uop_ram_20_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_taken = fb_uop_ram_20_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_xcpt_pf_if = fb_uop_ram_20_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_xcpt_ae_if = fb_uop_ram_20_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_bp_debug_if = fb_uop_ram_20_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_20_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_6_2_bp_xcpt_if = fb_uop_ram_20_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_20_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_6_2_debug_fsrc = fb_uop_ram_20_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_21_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_7_0_inst = fb_uop_ram_21_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_21_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_7_0_debug_inst = fb_uop_ram_21_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_is_rvc = fb_uop_ram_21_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_21_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_7_0_debug_pc = fb_uop_ram_21_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_is_sfb = fb_uop_ram_21_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_21_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_7_0_ftq_idx = fb_uop_ram_21_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_edge_inst = fb_uop_ram_21_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_21_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_7_0_pc_lob = fb_uop_ram_21_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_taken = fb_uop_ram_21_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_xcpt_pf_if = fb_uop_ram_21_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_xcpt_ae_if = fb_uop_ram_21_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_bp_debug_if = fb_uop_ram_21_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_21_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_0_bp_xcpt_if = fb_uop_ram_21_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_21_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_7_0_debug_fsrc = fb_uop_ram_21_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_22_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_7_1_inst = fb_uop_ram_22_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_22_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_7_1_debug_inst = fb_uop_ram_22_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_is_rvc = fb_uop_ram_22_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_22_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_7_1_debug_pc = fb_uop_ram_22_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_is_sfb = fb_uop_ram_22_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_22_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_7_1_ftq_idx = fb_uop_ram_22_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_edge_inst = fb_uop_ram_22_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_22_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_7_1_pc_lob = fb_uop_ram_22_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_taken = fb_uop_ram_22_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_xcpt_pf_if = fb_uop_ram_22_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_xcpt_ae_if = fb_uop_ram_22_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_bp_debug_if = fb_uop_ram_22_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_22_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_1_bp_xcpt_if = fb_uop_ram_22_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_22_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_7_1_debug_fsrc = fb_uop_ram_22_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_23_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_7_2_inst = fb_uop_ram_23_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [31:0] fb_uop_ram_23_debug_inst; // @[fetch-buffer.scala:57:16]
wire [31:0] deq_vec_7_2_debug_inst = fb_uop_ram_23_debug_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_is_rvc; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_is_rvc = fb_uop_ram_23_is_rvc; // @[fetch-buffer.scala:57:16, :59:21]
reg [39:0] fb_uop_ram_23_debug_pc; // @[fetch-buffer.scala:57:16]
wire [39:0] deq_vec_7_2_debug_pc = fb_uop_ram_23_debug_pc; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_is_sfb; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_is_sfb = fb_uop_ram_23_is_sfb; // @[fetch-buffer.scala:57:16, :59:21]
reg [4:0] fb_uop_ram_23_ftq_idx; // @[fetch-buffer.scala:57:16]
wire [4:0] deq_vec_7_2_ftq_idx = fb_uop_ram_23_ftq_idx; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_edge_inst; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_edge_inst = fb_uop_ram_23_edge_inst; // @[fetch-buffer.scala:57:16, :59:21]
reg [5:0] fb_uop_ram_23_pc_lob; // @[fetch-buffer.scala:57:16]
wire [5:0] deq_vec_7_2_pc_lob = fb_uop_ram_23_pc_lob; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_taken; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_taken = fb_uop_ram_23_taken; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_xcpt_pf_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_xcpt_pf_if = fb_uop_ram_23_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_xcpt_ae_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_xcpt_ae_if = fb_uop_ram_23_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_bp_debug_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_bp_debug_if = fb_uop_ram_23_bp_debug_if; // @[fetch-buffer.scala:57:16, :59:21]
reg fb_uop_ram_23_bp_xcpt_if; // @[fetch-buffer.scala:57:16]
wire deq_vec_7_2_bp_xcpt_if = fb_uop_ram_23_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :59:21]
reg [1:0] fb_uop_ram_23_debug_fsrc; // @[fetch-buffer.scala:57:16]
wire [1:0] deq_vec_7_2_debug_fsrc = fb_uop_ram_23_debug_fsrc; // @[fetch-buffer.scala:57:16, :59:21]
reg [7:0] head; // @[fetch-buffer.scala:61:21]
reg [23:0] tail; // @[fetch-buffer.scala:62:21]
wire [23:0] enq_idxs_0 = tail; // @[fetch-buffer.scala:62:21, :128:22]
reg maybe_full; // @[fetch-buffer.scala:64:27]
wire [22:0] _might_hit_head_T = tail[22:0]; // @[fetch-buffer.scala:62:21, :75:11]
wire _might_hit_head_T_1 = tail[23]; // @[fetch-buffer.scala:62:21, :75:24]
wire _at_head_T_23 = tail[23]; // @[fetch-buffer.scala:62:21, :75:24, :80:31]
wire [23:0] _might_hit_head_T_2 = {_might_hit_head_T, _might_hit_head_T_1}; // @[fetch-buffer.scala:75:{8,11,24}]
wire _might_hit_head_T_3 = _might_hit_head_T_2[0]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_0 = _might_hit_head_T_3; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_4 = _might_hit_head_T_2[1]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_5 = _might_hit_head_T_2[2]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_6 = _might_hit_head_T_2[3]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1 = _might_hit_head_T_6; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_7 = _might_hit_head_T_2[4]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_8 = _might_hit_head_T_2[5]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_9 = _might_hit_head_T_2[6]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2 = _might_hit_head_T_9; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_10 = _might_hit_head_T_2[7]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_11 = _might_hit_head_T_2[8]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_12 = _might_hit_head_T_2[9]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3 = _might_hit_head_T_12; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_13 = _might_hit_head_T_2[10]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_14 = _might_hit_head_T_2[11]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_15 = _might_hit_head_T_2[12]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4 = _might_hit_head_T_15; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_16 = _might_hit_head_T_2[13]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_17 = _might_hit_head_T_2[14]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_18 = _might_hit_head_T_2[15]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5 = _might_hit_head_T_18; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_19 = _might_hit_head_T_2[16]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_20 = _might_hit_head_T_2[17]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_21 = _might_hit_head_T_2[18]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6 = _might_hit_head_T_21; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_22 = _might_hit_head_T_2[19]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_23 = _might_hit_head_T_2[20]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_24 = _might_hit_head_T_2[21]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_7 = _might_hit_head_T_24; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_25 = _might_hit_head_T_2[22]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_26 = _might_hit_head_T_2[23]; // @[fetch-buffer.scala:75:8, :78:82]
wire [1:0] might_hit_head_lo_lo = {_might_hit_head_WIRE_1, _might_hit_head_WIRE_0}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_lo_hi = {_might_hit_head_WIRE_3, _might_hit_head_WIRE_2}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_lo = {might_hit_head_lo_hi, might_hit_head_lo_lo}; // @[fetch-buffer.scala:79:63]
wire [1:0] might_hit_head_hi_lo = {_might_hit_head_WIRE_5, _might_hit_head_WIRE_4}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_hi_hi = {_might_hit_head_WIRE_7, _might_hit_head_WIRE_6}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_hi = {might_hit_head_hi_hi, might_hit_head_hi_lo}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_27 = {might_hit_head_hi, might_hit_head_lo}; // @[fetch-buffer.scala:79:63]
wire [21:0] _might_hit_head_T_28 = tail[21:0]; // @[fetch-buffer.scala:62:21, :75:11]
wire [1:0] _might_hit_head_T_29 = tail[23:22]; // @[fetch-buffer.scala:62:21, :75:24]
wire [23:0] _might_hit_head_T_30 = {_might_hit_head_T_28, _might_hit_head_T_29}; // @[fetch-buffer.scala:75:{8,11,24}]
wire _might_hit_head_T_31 = _might_hit_head_T_30[0]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_0 = _might_hit_head_T_31; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_32 = _might_hit_head_T_30[1]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_33 = _might_hit_head_T_30[2]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_34 = _might_hit_head_T_30[3]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_1 = _might_hit_head_T_34; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_35 = _might_hit_head_T_30[4]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_36 = _might_hit_head_T_30[5]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_37 = _might_hit_head_T_30[6]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_2 = _might_hit_head_T_37; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_38 = _might_hit_head_T_30[7]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_39 = _might_hit_head_T_30[8]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_40 = _might_hit_head_T_30[9]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_3 = _might_hit_head_T_40; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_41 = _might_hit_head_T_30[10]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_42 = _might_hit_head_T_30[11]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_43 = _might_hit_head_T_30[12]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_4 = _might_hit_head_T_43; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_44 = _might_hit_head_T_30[13]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_45 = _might_hit_head_T_30[14]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_46 = _might_hit_head_T_30[15]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_5 = _might_hit_head_T_46; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_47 = _might_hit_head_T_30[16]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_48 = _might_hit_head_T_30[17]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_49 = _might_hit_head_T_30[18]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_6 = _might_hit_head_T_49; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_50 = _might_hit_head_T_30[19]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_51 = _might_hit_head_T_30[20]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_52 = _might_hit_head_T_30[21]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_1_7 = _might_hit_head_T_52; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_53 = _might_hit_head_T_30[22]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_54 = _might_hit_head_T_30[23]; // @[fetch-buffer.scala:75:8, :78:82]
wire [1:0] might_hit_head_lo_lo_1 = {_might_hit_head_WIRE_1_1, _might_hit_head_WIRE_1_0}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_lo_hi_1 = {_might_hit_head_WIRE_1_3, _might_hit_head_WIRE_1_2}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_lo_1 = {might_hit_head_lo_hi_1, might_hit_head_lo_lo_1}; // @[fetch-buffer.scala:79:63]
wire [1:0] might_hit_head_hi_lo_1 = {_might_hit_head_WIRE_1_5, _might_hit_head_WIRE_1_4}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_hi_hi_1 = {_might_hit_head_WIRE_1_7, _might_hit_head_WIRE_1_6}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_hi_1 = {might_hit_head_hi_hi_1, might_hit_head_hi_lo_1}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_55 = {might_hit_head_hi_1, might_hit_head_lo_1}; // @[fetch-buffer.scala:79:63]
wire [20:0] _might_hit_head_T_56 = tail[20:0]; // @[fetch-buffer.scala:62:21, :75:11]
wire [2:0] _might_hit_head_T_57 = tail[23:21]; // @[fetch-buffer.scala:62:21, :75:24]
wire [23:0] _might_hit_head_T_58 = {_might_hit_head_T_56, _might_hit_head_T_57}; // @[fetch-buffer.scala:75:{8,11,24}]
wire _might_hit_head_T_59 = _might_hit_head_T_58[0]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_0 = _might_hit_head_T_59; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_60 = _might_hit_head_T_58[1]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_61 = _might_hit_head_T_58[2]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_62 = _might_hit_head_T_58[3]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_1 = _might_hit_head_T_62; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_63 = _might_hit_head_T_58[4]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_64 = _might_hit_head_T_58[5]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_65 = _might_hit_head_T_58[6]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_2 = _might_hit_head_T_65; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_66 = _might_hit_head_T_58[7]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_67 = _might_hit_head_T_58[8]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_68 = _might_hit_head_T_58[9]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_3 = _might_hit_head_T_68; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_69 = _might_hit_head_T_58[10]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_70 = _might_hit_head_T_58[11]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_71 = _might_hit_head_T_58[12]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_4 = _might_hit_head_T_71; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_72 = _might_hit_head_T_58[13]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_73 = _might_hit_head_T_58[14]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_74 = _might_hit_head_T_58[15]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_5 = _might_hit_head_T_74; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_75 = _might_hit_head_T_58[16]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_76 = _might_hit_head_T_58[17]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_77 = _might_hit_head_T_58[18]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_6 = _might_hit_head_T_77; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_78 = _might_hit_head_T_58[19]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_79 = _might_hit_head_T_58[20]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_80 = _might_hit_head_T_58[21]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_2_7 = _might_hit_head_T_80; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_81 = _might_hit_head_T_58[22]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_82 = _might_hit_head_T_58[23]; // @[fetch-buffer.scala:75:8, :78:82]
wire [1:0] might_hit_head_lo_lo_2 = {_might_hit_head_WIRE_2_1, _might_hit_head_WIRE_2_0}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_lo_hi_2 = {_might_hit_head_WIRE_2_3, _might_hit_head_WIRE_2_2}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_lo_2 = {might_hit_head_lo_hi_2, might_hit_head_lo_lo_2}; // @[fetch-buffer.scala:79:63]
wire [1:0] might_hit_head_hi_lo_2 = {_might_hit_head_WIRE_2_5, _might_hit_head_WIRE_2_4}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_hi_hi_2 = {_might_hit_head_WIRE_2_7, _might_hit_head_WIRE_2_6}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_hi_2 = {might_hit_head_hi_hi_2, might_hit_head_hi_lo_2}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_83 = {might_hit_head_hi_2, might_hit_head_lo_2}; // @[fetch-buffer.scala:79:63]
wire [19:0] _might_hit_head_T_84 = tail[19:0]; // @[fetch-buffer.scala:62:21, :75:11]
wire [3:0] _might_hit_head_T_85 = tail[23:20]; // @[fetch-buffer.scala:62:21, :75:24]
wire [23:0] _might_hit_head_T_86 = {_might_hit_head_T_84, _might_hit_head_T_85}; // @[fetch-buffer.scala:75:{8,11,24}]
wire _might_hit_head_T_87 = _might_hit_head_T_86[0]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_0 = _might_hit_head_T_87; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_88 = _might_hit_head_T_86[1]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_89 = _might_hit_head_T_86[2]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_90 = _might_hit_head_T_86[3]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_1 = _might_hit_head_T_90; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_91 = _might_hit_head_T_86[4]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_92 = _might_hit_head_T_86[5]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_93 = _might_hit_head_T_86[6]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_2 = _might_hit_head_T_93; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_94 = _might_hit_head_T_86[7]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_95 = _might_hit_head_T_86[8]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_96 = _might_hit_head_T_86[9]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_3 = _might_hit_head_T_96; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_97 = _might_hit_head_T_86[10]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_98 = _might_hit_head_T_86[11]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_99 = _might_hit_head_T_86[12]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_4 = _might_hit_head_T_99; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_100 = _might_hit_head_T_86[13]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_101 = _might_hit_head_T_86[14]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_102 = _might_hit_head_T_86[15]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_5 = _might_hit_head_T_102; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_103 = _might_hit_head_T_86[16]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_104 = _might_hit_head_T_86[17]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_105 = _might_hit_head_T_86[18]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_6 = _might_hit_head_T_105; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_106 = _might_hit_head_T_86[19]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_107 = _might_hit_head_T_86[20]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_108 = _might_hit_head_T_86[21]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_3_7 = _might_hit_head_T_108; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_109 = _might_hit_head_T_86[22]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_110 = _might_hit_head_T_86[23]; // @[fetch-buffer.scala:75:8, :78:82]
wire [1:0] might_hit_head_lo_lo_3 = {_might_hit_head_WIRE_3_1, _might_hit_head_WIRE_3_0}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_lo_hi_3 = {_might_hit_head_WIRE_3_3, _might_hit_head_WIRE_3_2}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_lo_3 = {might_hit_head_lo_hi_3, might_hit_head_lo_lo_3}; // @[fetch-buffer.scala:79:63]
wire [1:0] might_hit_head_hi_lo_3 = {_might_hit_head_WIRE_3_5, _might_hit_head_WIRE_3_4}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_hi_hi_3 = {_might_hit_head_WIRE_3_7, _might_hit_head_WIRE_3_6}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_hi_3 = {might_hit_head_hi_hi_3, might_hit_head_hi_lo_3}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_111 = {might_hit_head_hi_3, might_hit_head_lo_3}; // @[fetch-buffer.scala:79:63]
wire [18:0] _might_hit_head_T_112 = tail[18:0]; // @[fetch-buffer.scala:62:21, :75:11]
wire [4:0] _might_hit_head_T_113 = tail[23:19]; // @[fetch-buffer.scala:62:21, :75:24]
wire [23:0] _might_hit_head_T_114 = {_might_hit_head_T_112, _might_hit_head_T_113}; // @[fetch-buffer.scala:75:{8,11,24}]
wire _might_hit_head_T_115 = _might_hit_head_T_114[0]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_0 = _might_hit_head_T_115; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_116 = _might_hit_head_T_114[1]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_117 = _might_hit_head_T_114[2]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_118 = _might_hit_head_T_114[3]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_1 = _might_hit_head_T_118; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_119 = _might_hit_head_T_114[4]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_120 = _might_hit_head_T_114[5]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_121 = _might_hit_head_T_114[6]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_2 = _might_hit_head_T_121; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_122 = _might_hit_head_T_114[7]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_123 = _might_hit_head_T_114[8]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_124 = _might_hit_head_T_114[9]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_3 = _might_hit_head_T_124; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_125 = _might_hit_head_T_114[10]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_126 = _might_hit_head_T_114[11]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_127 = _might_hit_head_T_114[12]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_4 = _might_hit_head_T_127; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_128 = _might_hit_head_T_114[13]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_129 = _might_hit_head_T_114[14]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_130 = _might_hit_head_T_114[15]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_5 = _might_hit_head_T_130; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_131 = _might_hit_head_T_114[16]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_132 = _might_hit_head_T_114[17]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_133 = _might_hit_head_T_114[18]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_6 = _might_hit_head_T_133; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_134 = _might_hit_head_T_114[19]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_135 = _might_hit_head_T_114[20]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_136 = _might_hit_head_T_114[21]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_4_7 = _might_hit_head_T_136; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_137 = _might_hit_head_T_114[22]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_138 = _might_hit_head_T_114[23]; // @[fetch-buffer.scala:75:8, :78:82]
wire [1:0] might_hit_head_lo_lo_4 = {_might_hit_head_WIRE_4_1, _might_hit_head_WIRE_4_0}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_lo_hi_4 = {_might_hit_head_WIRE_4_3, _might_hit_head_WIRE_4_2}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_lo_4 = {might_hit_head_lo_hi_4, might_hit_head_lo_lo_4}; // @[fetch-buffer.scala:79:63]
wire [1:0] might_hit_head_hi_lo_4 = {_might_hit_head_WIRE_4_5, _might_hit_head_WIRE_4_4}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_hi_hi_4 = {_might_hit_head_WIRE_4_7, _might_hit_head_WIRE_4_6}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_hi_4 = {might_hit_head_hi_hi_4, might_hit_head_hi_lo_4}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_139 = {might_hit_head_hi_4, might_hit_head_lo_4}; // @[fetch-buffer.scala:79:63]
wire [17:0] _might_hit_head_T_140 = tail[17:0]; // @[fetch-buffer.scala:62:21, :75:11]
wire [5:0] _might_hit_head_T_141 = tail[23:18]; // @[fetch-buffer.scala:62:21, :75:24]
wire [23:0] _might_hit_head_T_142 = {_might_hit_head_T_140, _might_hit_head_T_141}; // @[fetch-buffer.scala:75:{8,11,24}]
wire _might_hit_head_T_143 = _might_hit_head_T_142[0]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_0 = _might_hit_head_T_143; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_144 = _might_hit_head_T_142[1]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_145 = _might_hit_head_T_142[2]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_146 = _might_hit_head_T_142[3]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_1 = _might_hit_head_T_146; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_147 = _might_hit_head_T_142[4]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_148 = _might_hit_head_T_142[5]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_149 = _might_hit_head_T_142[6]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_2 = _might_hit_head_T_149; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_150 = _might_hit_head_T_142[7]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_151 = _might_hit_head_T_142[8]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_152 = _might_hit_head_T_142[9]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_3 = _might_hit_head_T_152; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_153 = _might_hit_head_T_142[10]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_154 = _might_hit_head_T_142[11]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_155 = _might_hit_head_T_142[12]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_4 = _might_hit_head_T_155; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_156 = _might_hit_head_T_142[13]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_157 = _might_hit_head_T_142[14]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_158 = _might_hit_head_T_142[15]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_5 = _might_hit_head_T_158; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_159 = _might_hit_head_T_142[16]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_160 = _might_hit_head_T_142[17]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_161 = _might_hit_head_T_142[18]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_6 = _might_hit_head_T_161; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_162 = _might_hit_head_T_142[19]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_163 = _might_hit_head_T_142[20]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_164 = _might_hit_head_T_142[21]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_5_7 = _might_hit_head_T_164; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_165 = _might_hit_head_T_142[22]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_166 = _might_hit_head_T_142[23]; // @[fetch-buffer.scala:75:8, :78:82]
wire [1:0] might_hit_head_lo_lo_5 = {_might_hit_head_WIRE_5_1, _might_hit_head_WIRE_5_0}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_lo_hi_5 = {_might_hit_head_WIRE_5_3, _might_hit_head_WIRE_5_2}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_lo_5 = {might_hit_head_lo_hi_5, might_hit_head_lo_lo_5}; // @[fetch-buffer.scala:79:63]
wire [1:0] might_hit_head_hi_lo_5 = {_might_hit_head_WIRE_5_5, _might_hit_head_WIRE_5_4}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_hi_hi_5 = {_might_hit_head_WIRE_5_7, _might_hit_head_WIRE_5_6}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_hi_5 = {might_hit_head_hi_hi_5, might_hit_head_hi_lo_5}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_167 = {might_hit_head_hi_5, might_hit_head_lo_5}; // @[fetch-buffer.scala:79:63]
wire [16:0] _might_hit_head_T_168 = tail[16:0]; // @[fetch-buffer.scala:62:21, :75:11]
wire [6:0] _might_hit_head_T_169 = tail[23:17]; // @[fetch-buffer.scala:62:21, :75:24]
wire [23:0] _might_hit_head_T_170 = {_might_hit_head_T_168, _might_hit_head_T_169}; // @[fetch-buffer.scala:75:{8,11,24}]
wire _might_hit_head_T_171 = _might_hit_head_T_170[0]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_0 = _might_hit_head_T_171; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_172 = _might_hit_head_T_170[1]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_173 = _might_hit_head_T_170[2]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_174 = _might_hit_head_T_170[3]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_1 = _might_hit_head_T_174; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_175 = _might_hit_head_T_170[4]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_176 = _might_hit_head_T_170[5]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_177 = _might_hit_head_T_170[6]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_2 = _might_hit_head_T_177; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_178 = _might_hit_head_T_170[7]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_179 = _might_hit_head_T_170[8]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_180 = _might_hit_head_T_170[9]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_3 = _might_hit_head_T_180; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_181 = _might_hit_head_T_170[10]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_182 = _might_hit_head_T_170[11]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_183 = _might_hit_head_T_170[12]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_4 = _might_hit_head_T_183; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_184 = _might_hit_head_T_170[13]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_185 = _might_hit_head_T_170[14]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_186 = _might_hit_head_T_170[15]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_5 = _might_hit_head_T_186; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_187 = _might_hit_head_T_170[16]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_188 = _might_hit_head_T_170[17]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_189 = _might_hit_head_T_170[18]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_6 = _might_hit_head_T_189; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_190 = _might_hit_head_T_170[19]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_191 = _might_hit_head_T_170[20]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_192 = _might_hit_head_T_170[21]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_WIRE_6_7 = _might_hit_head_T_192; // @[fetch-buffer.scala:78:{61,82}]
wire _might_hit_head_T_193 = _might_hit_head_T_170[22]; // @[fetch-buffer.scala:75:8, :78:82]
wire _might_hit_head_T_194 = _might_hit_head_T_170[23]; // @[fetch-buffer.scala:75:8, :78:82]
wire [1:0] might_hit_head_lo_lo_6 = {_might_hit_head_WIRE_6_1, _might_hit_head_WIRE_6_0}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_lo_hi_6 = {_might_hit_head_WIRE_6_3, _might_hit_head_WIRE_6_2}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_lo_6 = {might_hit_head_lo_hi_6, might_hit_head_lo_lo_6}; // @[fetch-buffer.scala:79:63]
wire [1:0] might_hit_head_hi_lo_6 = {_might_hit_head_WIRE_6_5, _might_hit_head_WIRE_6_4}; // @[fetch-buffer.scala:78:61, :79:63]
wire [1:0] might_hit_head_hi_hi_6 = {_might_hit_head_WIRE_6_7, _might_hit_head_WIRE_6_6}; // @[fetch-buffer.scala:78:61, :79:63]
wire [3:0] might_hit_head_hi_6 = {might_hit_head_hi_hi_6, might_hit_head_hi_lo_6}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_195 = {might_hit_head_hi_6, might_hit_head_lo_6}; // @[fetch-buffer.scala:79:63]
wire [7:0] _might_hit_head_T_196 = head & _might_hit_head_T_27; // @[fetch-buffer.scala:61:21, :79:{63,88}]
wire [7:0] _might_hit_head_T_197 = head & _might_hit_head_T_55; // @[fetch-buffer.scala:61:21, :79:{63,88}]
wire [7:0] _might_hit_head_T_198 = head & _might_hit_head_T_83; // @[fetch-buffer.scala:61:21, :79:{63,88}]
wire [7:0] _might_hit_head_T_199 = head & _might_hit_head_T_111; // @[fetch-buffer.scala:61:21, :79:{63,88}]
wire [7:0] _might_hit_head_T_200 = head & _might_hit_head_T_139; // @[fetch-buffer.scala:61:21, :79:{63,88}]
wire [7:0] _might_hit_head_T_201 = head & _might_hit_head_T_167; // @[fetch-buffer.scala:61:21, :79:{63,88}]
wire [7:0] _might_hit_head_T_202 = head & _might_hit_head_T_195; // @[fetch-buffer.scala:61:21, :79:{63,88}]
wire [7:0] _might_hit_head_T_203 = _might_hit_head_T_196 | _might_hit_head_T_197; // @[fetch-buffer.scala:79:{88,104}]
wire [7:0] _might_hit_head_T_204 = _might_hit_head_T_203 | _might_hit_head_T_198; // @[fetch-buffer.scala:79:{88,104}]
wire [7:0] _might_hit_head_T_205 = _might_hit_head_T_204 | _might_hit_head_T_199; // @[fetch-buffer.scala:79:{88,104}]
wire [7:0] _might_hit_head_T_206 = _might_hit_head_T_205 | _might_hit_head_T_200; // @[fetch-buffer.scala:79:{88,104}]
wire [7:0] _might_hit_head_T_207 = _might_hit_head_T_206 | _might_hit_head_T_201; // @[fetch-buffer.scala:79:{88,104}]
wire [7:0] _might_hit_head_T_208 = _might_hit_head_T_207 | _might_hit_head_T_202; // @[fetch-buffer.scala:79:{88,104}]
wire might_hit_head = |_might_hit_head_T_208; // @[fetch-buffer.scala:79:{104,108}]
wire _at_head_T = tail[0]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_0 = _at_head_T; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_1 = tail[1]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_2 = tail[2]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_3 = tail[3]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_1 = _at_head_T_3; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_4 = tail[4]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_5 = tail[5]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_6 = tail[6]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_2 = _at_head_T_6; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_7 = tail[7]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_8 = tail[8]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_9 = tail[9]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_3 = _at_head_T_9; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_10 = tail[10]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_11 = tail[11]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_12 = tail[12]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_4 = _at_head_T_12; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_13 = tail[13]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_14 = tail[14]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_15 = tail[15]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_5 = _at_head_T_15; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_16 = tail[16]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_17 = tail[17]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_18 = tail[18]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_6 = _at_head_T_18; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_19 = tail[19]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_20 = tail[20]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_T_21 = tail[21]; // @[fetch-buffer.scala:62:21, :80:31]
wire _at_head_WIRE_7 = _at_head_T_21; // @[fetch-buffer.scala:80:{25,31}]
wire _at_head_T_22 = tail[22]; // @[fetch-buffer.scala:62:21, :80:31]
wire [1:0] at_head_lo_lo = {_at_head_WIRE_1, _at_head_WIRE_0}; // @[fetch-buffer.scala:80:25, :81:29]
wire [1:0] at_head_lo_hi = {_at_head_WIRE_3, _at_head_WIRE_2}; // @[fetch-buffer.scala:80:25, :81:29]
wire [3:0] at_head_lo = {at_head_lo_hi, at_head_lo_lo}; // @[fetch-buffer.scala:81:29]
wire [1:0] at_head_hi_lo = {_at_head_WIRE_5, _at_head_WIRE_4}; // @[fetch-buffer.scala:80:25, :81:29]
wire [1:0] at_head_hi_hi = {_at_head_WIRE_7, _at_head_WIRE_6}; // @[fetch-buffer.scala:80:25, :81:29]
wire [3:0] at_head_hi = {at_head_hi_hi, at_head_hi_lo}; // @[fetch-buffer.scala:81:29]
wire [7:0] _at_head_T_24 = {at_head_hi, at_head_lo}; // @[fetch-buffer.scala:81:29]
wire [7:0] _at_head_T_25 = _at_head_T_24 & head; // @[fetch-buffer.scala:61:21, :81:{29,36}]
wire at_head = |_at_head_T_25; // @[fetch-buffer.scala:81:{36,44}]
wire _do_enq_T = at_head & maybe_full; // @[fetch-buffer.scala:64:27, :81:44, :82:26]
wire _do_enq_T_1 = _do_enq_T | might_hit_head; // @[fetch-buffer.scala:79:108, :82:{26,40}]
assign do_enq = ~_do_enq_T_1; // @[fetch-buffer.scala:82:{16,40}]
assign io_enq_ready_0 = do_enq; // @[fetch-buffer.scala:40:7, :82:16]
wire _in_mask_0_T_1; // @[fetch-buffer.scala:98:49]
wire _in_mask_1_T_1; // @[fetch-buffer.scala:98:49]
wire _in_mask_2_T_1; // @[fetch-buffer.scala:98:49]
wire _in_mask_3_T_1; // @[fetch-buffer.scala:98:49]
wire _in_mask_4_T_1; // @[fetch-buffer.scala:98:49]
wire _in_mask_5_T_1; // @[fetch-buffer.scala:98:49]
wire _in_mask_6_T_1; // @[fetch-buffer.scala:98:49]
wire _in_mask_7_T_1; // @[fetch-buffer.scala:98:49]
wire in_mask_0; // @[fetch-buffer.scala:87:21]
wire in_mask_1; // @[fetch-buffer.scala:87:21]
wire in_mask_2; // @[fetch-buffer.scala:87:21]
wire in_mask_3; // @[fetch-buffer.scala:87:21]
wire in_mask_4; // @[fetch-buffer.scala:87:21]
wire in_mask_5; // @[fetch-buffer.scala:87:21]
wire in_mask_6; // @[fetch-buffer.scala:87:21]
wire in_mask_7; // @[fetch-buffer.scala:87:21]
wire _in_uops_0_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire _in_uops_0_taken_T_1; // @[fetch-buffer.scala:116:69]
wire _in_uops_1_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire [39:0] pc_1; // @[fetch-buffer.scala:95:43]
wire _in_uops_1_taken_T_1; // @[fetch-buffer.scala:116:69]
wire _in_uops_2_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire [39:0] pc_2; // @[fetch-buffer.scala:95:43]
wire _in_uops_2_taken_T_1; // @[fetch-buffer.scala:116:69]
wire _in_uops_3_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire [39:0] pc_3; // @[fetch-buffer.scala:95:43]
wire _in_uops_3_taken_T_1; // @[fetch-buffer.scala:116:69]
wire _in_uops_4_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire _in_uops_4_taken_T_1; // @[fetch-buffer.scala:116:69]
wire _in_uops_5_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire [39:0] pc_5; // @[fetch-buffer.scala:95:43]
wire _in_uops_5_taken_T_1; // @[fetch-buffer.scala:116:69]
wire _in_uops_6_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire [39:0] pc_6; // @[fetch-buffer.scala:95:43]
wire _in_uops_6_taken_T_1; // @[fetch-buffer.scala:116:69]
wire _in_uops_7_is_rvc_T_1; // @[fetch-buffer.scala:115:62]
wire [39:0] pc_7; // @[fetch-buffer.scala:95:43]
wire _in_uops_7_taken_T_1; // @[fetch-buffer.scala:116:69]
wire in_uops_0_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_0_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_0_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_0_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_0_taken; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_1_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_1_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_1_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_1_taken; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_2_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_2_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_2_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_2_taken; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_3_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_3_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_3_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_3_taken; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_4_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_4_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_4_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_4_taken; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_5_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_5_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_5_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_5_taken; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_6_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_6_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_6_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_6_taken; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_rvc; // @[fetch-buffer.scala:88:21]
wire [39:0] in_uops_7_debug_pc; // @[fetch-buffer.scala:88:21]
wire in_uops_7_is_sfb; // @[fetch-buffer.scala:88:21]
wire [5:0] in_uops_7_pc_lob; // @[fetch-buffer.scala:88:21]
wire in_uops_7_taken; // @[fetch-buffer.scala:88:21]
wire [39:0] _pc_T = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_1 = {_pc_T[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_2 = ~_pc_T_1; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_3 = {1'h0, _pc_T_2}; // @[frontend.scala:160:31]
wire [39:0] pc = _pc_T_3[39:0]; // @[fetch-buffer.scala:95:43]
wire _in_mask_0_T = io_enq_bits_mask_0[0]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_0_T_1 = io_enq_valid_0 & _in_mask_0_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_0 = _in_mask_0_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_0_is_sfb = _in_uops_0_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [39:0] _in_uops_0_debug_pc_T = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _in_uops_0_debug_pc_T_1 = {_in_uops_0_debug_pc_T[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _in_uops_0_debug_pc_T_2 = ~_in_uops_0_debug_pc_T_1; // @[frontend.scala:160:{31,39}]
wire [40:0] _in_uops_0_debug_pc_T_3 = {1'h0, _in_uops_0_debug_pc_T_2}; // @[frontend.scala:160:31]
wire [39:0] _in_uops_0_debug_pc_T_4 = _in_uops_0_debug_pc_T_3[39:0]; // @[fetch-buffer.scala:107:61]
wire [40:0] _in_uops_0_debug_pc_T_5 = {1'h0, _in_uops_0_debug_pc_T_4} - 41'h2; // @[fetch-buffer.scala:107:{61,81}]
wire [39:0] _in_uops_0_debug_pc_T_6 = _in_uops_0_debug_pc_T_5[39:0]; // @[fetch-buffer.scala:107:81]
assign in_uops_0_debug_pc = io_enq_bits_edge_inst_0_0 ? _in_uops_0_debug_pc_T_6 : pc; // @[fetch-buffer.scala:40:7, :88:21, :95:43, :100:33, :106:41, :107:{32,81}]
wire [39:0] _in_uops_0_pc_lob_T = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _in_uops_0_pc_lob_T_1 = {_in_uops_0_pc_lob_T[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _in_uops_0_pc_lob_T_2 = ~_in_uops_0_pc_lob_T_1; // @[frontend.scala:160:{31,39}]
wire [40:0] _in_uops_0_pc_lob_T_3 = {1'h0, _in_uops_0_pc_lob_T_2}; // @[frontend.scala:160:31]
wire [39:0] _in_uops_0_pc_lob_T_4 = _in_uops_0_pc_lob_T_3[39:0]; // @[fetch-buffer.scala:108:61]
assign in_uops_0_pc_lob = io_enq_bits_edge_inst_0_0 ? _in_uops_0_pc_lob_T_4[5:0] : pc[5:0]; // @[fetch-buffer.scala:40:7, :88:21, :95:43, :101:33, :106:41, :108:{32,61}]
wire [1:0] _in_uops_0_is_rvc_T = io_enq_bits_insts_0_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_0_is_rvc_T_1 = _in_uops_0_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_0_is_rvc = _in_uops_0_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_0_taken_T = io_enq_bits_cfi_idx_bits_0 == 3'h0; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_0_taken_T_1 = _in_uops_0_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_0_taken = _in_uops_0_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [39:0] _pc_T_4 = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_5 = {_pc_T_4[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_6 = ~_pc_T_5; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_7 = {1'h0, _pc_T_6} + 41'h2; // @[frontend.scala:160:31]
assign pc_1 = _pc_T_7[39:0]; // @[fetch-buffer.scala:95:43]
assign in_uops_1_debug_pc = pc_1; // @[fetch-buffer.scala:88:21, :95:43]
wire _in_mask_1_T = io_enq_bits_mask_0[1]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_1_T_1 = io_enq_valid_0 & _in_mask_1_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_1 = _in_mask_1_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_1_pc_lob = pc_1[5:0]; // @[fetch-buffer.scala:88:21, :95:43, :101:33]
assign in_uops_1_is_sfb = _in_uops_1_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [1:0] _in_uops_1_is_rvc_T = io_enq_bits_insts_1_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_1_is_rvc_T_1 = _in_uops_1_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_1_is_rvc = _in_uops_1_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_1_taken_T = io_enq_bits_cfi_idx_bits_0 == 3'h1; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_1_taken_T_1 = _in_uops_1_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_1_taken = _in_uops_1_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [39:0] _pc_T_8 = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_9 = {_pc_T_8[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_10 = ~_pc_T_9; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_11 = {1'h0, _pc_T_10} + 41'h4; // @[frontend.scala:160:31]
assign pc_2 = _pc_T_11[39:0]; // @[fetch-buffer.scala:95:43]
assign in_uops_2_debug_pc = pc_2; // @[fetch-buffer.scala:88:21, :95:43]
wire _in_mask_2_T = io_enq_bits_mask_0[2]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_2_T_1 = io_enq_valid_0 & _in_mask_2_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_2 = _in_mask_2_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_2_pc_lob = pc_2[5:0]; // @[fetch-buffer.scala:88:21, :95:43, :101:33]
assign in_uops_2_is_sfb = _in_uops_2_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [1:0] _in_uops_2_is_rvc_T = io_enq_bits_insts_2_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_2_is_rvc_T_1 = _in_uops_2_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_2_is_rvc = _in_uops_2_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_2_taken_T = io_enq_bits_cfi_idx_bits_0 == 3'h2; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_2_taken_T_1 = _in_uops_2_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_2_taken = _in_uops_2_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [39:0] _pc_T_12 = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_13 = {_pc_T_12[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_14 = ~_pc_T_13; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_15 = {1'h0, _pc_T_14} + 41'h6; // @[frontend.scala:160:31]
assign pc_3 = _pc_T_15[39:0]; // @[fetch-buffer.scala:95:43]
assign in_uops_3_debug_pc = pc_3; // @[fetch-buffer.scala:88:21, :95:43]
wire _in_mask_3_T = io_enq_bits_mask_0[3]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_3_T_1 = io_enq_valid_0 & _in_mask_3_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_3 = _in_mask_3_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_3_pc_lob = pc_3[5:0]; // @[fetch-buffer.scala:88:21, :95:43, :101:33]
assign in_uops_3_is_sfb = _in_uops_3_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [1:0] _in_uops_3_is_rvc_T = io_enq_bits_insts_3_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_3_is_rvc_T_1 = _in_uops_3_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_3_is_rvc = _in_uops_3_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_3_taken_T = io_enq_bits_cfi_idx_bits_0 == 3'h3; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_3_taken_T_1 = _in_uops_3_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_3_taken = _in_uops_3_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [39:0] _pc_T_16 = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_17 = {_pc_T_16[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_18 = ~_pc_T_17; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_19 = {1'h0, _pc_T_18} + 41'h8; // @[frontend.scala:160:31]
wire [39:0] pc_4 = _pc_T_19[39:0]; // @[fetch-buffer.scala:95:43]
wire _in_mask_4_T = io_enq_bits_mask_0[4]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_4_T_1 = io_enq_valid_0 & _in_mask_4_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_4 = _in_mask_4_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_4_is_sfb = _in_uops_4_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [39:0] _in_uops_4_debug_pc_T = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _in_uops_4_debug_pc_T_1 = {_in_uops_4_debug_pc_T[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _in_uops_4_debug_pc_T_2 = ~_in_uops_4_debug_pc_T_1; // @[frontend.scala:160:{31,39}]
wire [40:0] _in_uops_4_debug_pc_T_3 = {1'h0, _in_uops_4_debug_pc_T_2} + 41'h8; // @[frontend.scala:160:31]
wire [39:0] _in_uops_4_debug_pc_T_4 = _in_uops_4_debug_pc_T_3[39:0]; // @[fetch-buffer.scala:107:61]
wire [40:0] _in_uops_4_debug_pc_T_5 = {1'h0, _in_uops_4_debug_pc_T_4} - 41'h2; // @[fetch-buffer.scala:107:{61,81}]
wire [39:0] _in_uops_4_debug_pc_T_6 = _in_uops_4_debug_pc_T_5[39:0]; // @[fetch-buffer.scala:107:81]
assign in_uops_4_debug_pc = io_enq_bits_edge_inst_1_0 ? _in_uops_4_debug_pc_T_6 : pc_4; // @[fetch-buffer.scala:40:7, :88:21, :95:43, :100:33, :106:41, :107:{32,81}]
wire [39:0] _in_uops_4_pc_lob_T = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _in_uops_4_pc_lob_T_1 = {_in_uops_4_pc_lob_T[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _in_uops_4_pc_lob_T_2 = ~_in_uops_4_pc_lob_T_1; // @[frontend.scala:160:{31,39}]
wire [40:0] _in_uops_4_pc_lob_T_3 = {1'h0, _in_uops_4_pc_lob_T_2} + 41'h8; // @[frontend.scala:160:31]
wire [39:0] _in_uops_4_pc_lob_T_4 = _in_uops_4_pc_lob_T_3[39:0]; // @[fetch-buffer.scala:108:61]
assign in_uops_4_pc_lob = io_enq_bits_edge_inst_1_0 ? _in_uops_4_pc_lob_T_4[5:0] : pc_4[5:0]; // @[fetch-buffer.scala:40:7, :88:21, :95:43, :101:33, :106:41, :108:{32,61}]
wire [1:0] _in_uops_4_is_rvc_T = io_enq_bits_insts_4_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_4_is_rvc_T_1 = _in_uops_4_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_4_is_rvc = _in_uops_4_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_4_taken_T = io_enq_bits_cfi_idx_bits_0 == 3'h4; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_4_taken_T_1 = _in_uops_4_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_4_taken = _in_uops_4_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [39:0] _pc_T_20 = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_21 = {_pc_T_20[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_22 = ~_pc_T_21; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_23 = {1'h0, _pc_T_22} + 41'hA; // @[frontend.scala:160:31]
assign pc_5 = _pc_T_23[39:0]; // @[fetch-buffer.scala:95:43]
assign in_uops_5_debug_pc = pc_5; // @[fetch-buffer.scala:88:21, :95:43]
wire _in_mask_5_T = io_enq_bits_mask_0[5]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_5_T_1 = io_enq_valid_0 & _in_mask_5_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_5 = _in_mask_5_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_5_pc_lob = pc_5[5:0]; // @[fetch-buffer.scala:88:21, :95:43, :101:33]
assign in_uops_5_is_sfb = _in_uops_5_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [1:0] _in_uops_5_is_rvc_T = io_enq_bits_insts_5_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_5_is_rvc_T_1 = _in_uops_5_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_5_is_rvc = _in_uops_5_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_5_taken_T = io_enq_bits_cfi_idx_bits_0 == 3'h5; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_5_taken_T_1 = _in_uops_5_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_5_taken = _in_uops_5_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [39:0] _pc_T_24 = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_25 = {_pc_T_24[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_26 = ~_pc_T_25; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_27 = {1'h0, _pc_T_26} + 41'hC; // @[frontend.scala:160:31]
assign pc_6 = _pc_T_27[39:0]; // @[fetch-buffer.scala:95:43]
assign in_uops_6_debug_pc = pc_6; // @[fetch-buffer.scala:88:21, :95:43]
wire _in_mask_6_T = io_enq_bits_mask_0[6]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_6_T_1 = io_enq_valid_0 & _in_mask_6_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_6 = _in_mask_6_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_6_pc_lob = pc_6[5:0]; // @[fetch-buffer.scala:88:21, :95:43, :101:33]
assign in_uops_6_is_sfb = _in_uops_6_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [1:0] _in_uops_6_is_rvc_T = io_enq_bits_insts_6_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_6_is_rvc_T_1 = _in_uops_6_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_6_is_rvc = _in_uops_6_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_6_taken_T = io_enq_bits_cfi_idx_bits_0 == 3'h6; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_6_taken_T_1 = _in_uops_6_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_6_taken = _in_uops_6_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [39:0] _pc_T_28 = ~io_enq_bits_pc_0; // @[frontend.scala:160:33]
wire [39:0] _pc_T_29 = {_pc_T_28[39:3], 3'h7}; // @[frontend.scala:160:{33,39}]
wire [39:0] _pc_T_30 = ~_pc_T_29; // @[frontend.scala:160:{31,39}]
wire [40:0] _pc_T_31 = {1'h0, _pc_T_30} + 41'hE; // @[frontend.scala:160:31]
assign pc_7 = _pc_T_31[39:0]; // @[fetch-buffer.scala:95:43]
assign in_uops_7_debug_pc = pc_7; // @[fetch-buffer.scala:88:21, :95:43]
wire _in_mask_7_T = io_enq_bits_mask_0[7]; // @[fetch-buffer.scala:40:7, :98:68]
assign _in_mask_7_T_1 = io_enq_valid_0 & _in_mask_7_T; // @[fetch-buffer.scala:40:7, :98:{49,68}]
assign in_mask_7 = _in_mask_7_T_1; // @[fetch-buffer.scala:87:21, :98:49]
assign in_uops_7_pc_lob = pc_7[5:0]; // @[fetch-buffer.scala:88:21, :95:43, :101:33]
assign in_uops_7_is_sfb = _in_uops_7_is_sfb_T; // @[fetch-buffer.scala:88:21, :103:56]
wire [1:0] _in_uops_7_is_rvc_T = io_enq_bits_insts_7_0[1:0]; // @[fetch-buffer.scala:40:7, :115:56]
assign _in_uops_7_is_rvc_T_1 = _in_uops_7_is_rvc_T != 2'h3; // @[fetch-buffer.scala:115:{56,62}]
assign in_uops_7_is_rvc = _in_uops_7_is_rvc_T_1; // @[fetch-buffer.scala:88:21, :115:62]
wire _in_uops_7_taken_T = &io_enq_bits_cfi_idx_bits_0; // @[fetch-buffer.scala:40:7, :116:61]
assign _in_uops_7_taken_T_1 = _in_uops_7_taken_T & io_enq_bits_cfi_idx_valid_0; // @[fetch-buffer.scala:40:7, :116:{61,69}]
assign in_uops_7_taken = _in_uops_7_taken_T_1; // @[fetch-buffer.scala:88:21, :116:69]
wire [23:0] enq_idxs_1; // @[fetch-buffer.scala:128:22]
wire [23:0] enq_idxs_2; // @[fetch-buffer.scala:128:22]
wire [23:0] enq_idxs_3; // @[fetch-buffer.scala:128:22]
wire [23:0] enq_idxs_4; // @[fetch-buffer.scala:128:22]
wire [23:0] enq_idxs_5; // @[fetch-buffer.scala:128:22]
wire [23:0] enq_idxs_6; // @[fetch-buffer.scala:128:22]
wire [23:0] enq_idxs_7; // @[fetch-buffer.scala:128:22]
wire [23:0] _T_2 = {_might_hit_head_T, tail[23]}; // @[fetch-buffer.scala:62:21, :75:{11,24}, :132:8]
assign enq_idxs_1 = in_mask_0 ? _T_2 : tail; // @[fetch-buffer.scala:62:21, :87:21, :128:22, :132:8, :138:18]
wire [23:0] _T_6 = {enq_idxs_1[22:0], enq_idxs_1[23]}; // @[fetch-buffer.scala:128:22, :132:{8,12,24}]
assign enq_idxs_2 = in_mask_1 ? _T_6 : enq_idxs_1; // @[fetch-buffer.scala:87:21, :128:22, :132:8, :138:18]
wire [23:0] _T_10 = {enq_idxs_2[22:0], enq_idxs_2[23]}; // @[fetch-buffer.scala:128:22, :132:{8,12,24}]
assign enq_idxs_3 = in_mask_2 ? _T_10 : enq_idxs_2; // @[fetch-buffer.scala:87:21, :128:22, :132:8, :138:18]
wire [23:0] _T_14 = {enq_idxs_3[22:0], enq_idxs_3[23]}; // @[fetch-buffer.scala:128:22, :132:{8,12,24}]
assign enq_idxs_4 = in_mask_3 ? _T_14 : enq_idxs_3; // @[fetch-buffer.scala:87:21, :128:22, :132:8, :138:18]
wire [23:0] _T_18 = {enq_idxs_4[22:0], enq_idxs_4[23]}; // @[fetch-buffer.scala:128:22, :132:{8,12,24}]
assign enq_idxs_5 = in_mask_4 ? _T_18 : enq_idxs_4; // @[fetch-buffer.scala:87:21, :128:22, :132:8, :138:18]
wire [23:0] _T_22 = {enq_idxs_5[22:0], enq_idxs_5[23]}; // @[fetch-buffer.scala:128:22, :132:{8,12,24}]
assign enq_idxs_6 = in_mask_5 ? _T_22 : enq_idxs_5; // @[fetch-buffer.scala:87:21, :128:22, :132:8, :138:18]
wire [23:0] _T_26 = {enq_idxs_6[22:0], enq_idxs_6[23]}; // @[fetch-buffer.scala:128:22, :132:{8,12,24}]
assign enq_idxs_7 = in_mask_6 ? _T_26 : enq_idxs_6; // @[fetch-buffer.scala:87:21, :128:22, :132:8, :138:18]
wire _tail_collisions_T = head[0]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_4 = head[0]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_8 = head[0]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_1 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_2 = _tail_collisions_T_1; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_3 = _tail_collisions_T & _tail_collisions_T_2; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_0 = _tail_collisions_T_3; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_7 = _tail_collisions_T_4; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_5 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_1 = _tail_collisions_T_7; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_11 = _tail_collisions_T_8; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_9 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_2 = _tail_collisions_T_11; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_12 = head[1]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_16 = head[1]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_20 = head[1]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_13 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_14 = _tail_collisions_T_13; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_15 = _tail_collisions_T_12 & _tail_collisions_T_14; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_3 = _tail_collisions_T_15; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_19 = _tail_collisions_T_16; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_17 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_4 = _tail_collisions_T_19; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_23 = _tail_collisions_T_20; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_21 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_5 = _tail_collisions_T_23; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_24 = head[2]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_28 = head[2]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_32 = head[2]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_25 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_26 = _tail_collisions_T_25; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_27 = _tail_collisions_T_24 & _tail_collisions_T_26; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_6 = _tail_collisions_T_27; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_31 = _tail_collisions_T_28; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_29 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_7 = _tail_collisions_T_31; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_35 = _tail_collisions_T_32; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_33 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_8 = _tail_collisions_T_35; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_36 = head[3]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_40 = head[3]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_44 = head[3]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_37 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_38 = _tail_collisions_T_37; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_39 = _tail_collisions_T_36 & _tail_collisions_T_38; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_9 = _tail_collisions_T_39; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_43 = _tail_collisions_T_40; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_41 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_10 = _tail_collisions_T_43; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_47 = _tail_collisions_T_44; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_45 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_11 = _tail_collisions_T_47; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_48 = head[4]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_52 = head[4]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_56 = head[4]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_49 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_50 = _tail_collisions_T_49; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_51 = _tail_collisions_T_48 & _tail_collisions_T_50; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_12 = _tail_collisions_T_51; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_55 = _tail_collisions_T_52; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_53 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_13 = _tail_collisions_T_55; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_59 = _tail_collisions_T_56; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_57 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_14 = _tail_collisions_T_59; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_60 = head[5]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_64 = head[5]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_68 = head[5]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_61 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_62 = _tail_collisions_T_61; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_63 = _tail_collisions_T_60 & _tail_collisions_T_62; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_15 = _tail_collisions_T_63; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_67 = _tail_collisions_T_64; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_65 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_16 = _tail_collisions_T_67; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_71 = _tail_collisions_T_68; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_69 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_17 = _tail_collisions_T_71; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_72 = head[6]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_76 = head[6]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_80 = head[6]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_73 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_74 = _tail_collisions_T_73; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_75 = _tail_collisions_T_72 & _tail_collisions_T_74; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_18 = _tail_collisions_T_75; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_79 = _tail_collisions_T_76; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_77 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_19 = _tail_collisions_T_79; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_83 = _tail_collisions_T_80; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_81 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_20 = _tail_collisions_T_83; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_84 = head[7]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_88 = head[7]; // @[fetch-buffer.scala:61:21, :155:31]
wire _tail_collisions_T_92 = head[7]; // @[fetch-buffer.scala:61:21, :155:31]
wire _head_T_1 = head[7]; // @[fetch-buffer.scala:61:21, :132:24, :155:31]
wire _tail_collisions_T_85 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_T_86 = _tail_collisions_T_85; // @[fetch-buffer.scala:155:{49,61}]
wire _tail_collisions_T_87 = _tail_collisions_T_84 & _tail_collisions_T_86; // @[fetch-buffer.scala:155:{31,45,61}]
wire _tail_collisions_WIRE_21 = _tail_collisions_T_87; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_91 = _tail_collisions_T_88; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_89 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_22 = _tail_collisions_T_91; // @[fetch-buffer.scala:154:32, :155:45]
wire _tail_collisions_T_95 = _tail_collisions_T_92; // @[fetch-buffer.scala:155:{31,45}]
wire _tail_collisions_T_93 = ~maybe_full; // @[fetch-buffer.scala:64:27, :155:49]
wire _tail_collisions_WIRE_23 = _tail_collisions_T_95; // @[fetch-buffer.scala:154:32, :155:45]
wire [1:0] tail_collisions_lo_lo_lo_hi = {_tail_collisions_WIRE_2, _tail_collisions_WIRE_1}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_lo_lo_lo = {tail_collisions_lo_lo_lo_hi, _tail_collisions_WIRE_0}; // @[fetch-buffer.scala:154:32, :155:90]
wire [1:0] tail_collisions_lo_lo_hi_hi = {_tail_collisions_WIRE_5, _tail_collisions_WIRE_4}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_lo_lo_hi = {tail_collisions_lo_lo_hi_hi, _tail_collisions_WIRE_3}; // @[fetch-buffer.scala:154:32, :155:90]
wire [5:0] tail_collisions_lo_lo = {tail_collisions_lo_lo_hi, tail_collisions_lo_lo_lo}; // @[fetch-buffer.scala:155:90]
wire [1:0] tail_collisions_lo_hi_lo_hi = {_tail_collisions_WIRE_8, _tail_collisions_WIRE_7}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_lo_hi_lo = {tail_collisions_lo_hi_lo_hi, _tail_collisions_WIRE_6}; // @[fetch-buffer.scala:154:32, :155:90]
wire [1:0] tail_collisions_lo_hi_hi_hi = {_tail_collisions_WIRE_11, _tail_collisions_WIRE_10}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_lo_hi_hi = {tail_collisions_lo_hi_hi_hi, _tail_collisions_WIRE_9}; // @[fetch-buffer.scala:154:32, :155:90]
wire [5:0] tail_collisions_lo_hi = {tail_collisions_lo_hi_hi, tail_collisions_lo_hi_lo}; // @[fetch-buffer.scala:155:90]
wire [11:0] tail_collisions_lo = {tail_collisions_lo_hi, tail_collisions_lo_lo}; // @[fetch-buffer.scala:155:90]
wire [1:0] tail_collisions_hi_lo_lo_hi = {_tail_collisions_WIRE_14, _tail_collisions_WIRE_13}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_hi_lo_lo = {tail_collisions_hi_lo_lo_hi, _tail_collisions_WIRE_12}; // @[fetch-buffer.scala:154:32, :155:90]
wire [1:0] tail_collisions_hi_lo_hi_hi = {_tail_collisions_WIRE_17, _tail_collisions_WIRE_16}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_hi_lo_hi = {tail_collisions_hi_lo_hi_hi, _tail_collisions_WIRE_15}; // @[fetch-buffer.scala:154:32, :155:90]
wire [5:0] tail_collisions_hi_lo = {tail_collisions_hi_lo_hi, tail_collisions_hi_lo_lo}; // @[fetch-buffer.scala:155:90]
wire [1:0] tail_collisions_hi_hi_lo_hi = {_tail_collisions_WIRE_20, _tail_collisions_WIRE_19}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_hi_hi_lo = {tail_collisions_hi_hi_lo_hi, _tail_collisions_WIRE_18}; // @[fetch-buffer.scala:154:32, :155:90]
wire [1:0] tail_collisions_hi_hi_hi_hi = {_tail_collisions_WIRE_23, _tail_collisions_WIRE_22}; // @[fetch-buffer.scala:154:32, :155:90]
wire [2:0] tail_collisions_hi_hi_hi = {tail_collisions_hi_hi_hi_hi, _tail_collisions_WIRE_21}; // @[fetch-buffer.scala:154:32, :155:90]
wire [5:0] tail_collisions_hi_hi = {tail_collisions_hi_hi_hi, tail_collisions_hi_hi_lo}; // @[fetch-buffer.scala:155:90]
wire [11:0] tail_collisions_hi = {tail_collisions_hi_hi, tail_collisions_hi_lo}; // @[fetch-buffer.scala:155:90]
wire [23:0] _tail_collisions_T_96 = {tail_collisions_hi, tail_collisions_lo}; // @[fetch-buffer.scala:155:90]
wire [23:0] tail_collisions = _tail_collisions_T_96 & tail; // @[fetch-buffer.scala:62:21, :155:{90,97}]
wire [2:0] _slot_will_hit_tail_T = tail_collisions[2:0]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_1 = tail_collisions[5:3]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_2 = tail_collisions[8:6]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_3 = tail_collisions[11:9]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_4 = tail_collisions[14:12]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_5 = tail_collisions[17:15]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_6 = tail_collisions[20:18]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_7 = tail_collisions[23:21]; // @[fetch-buffer.scala:155:97, :156:70]
wire [2:0] _slot_will_hit_tail_T_8 = _slot_will_hit_tail_T | _slot_will_hit_tail_T_1; // @[fetch-buffer.scala:156:{70,112}]
wire [2:0] _slot_will_hit_tail_T_9 = _slot_will_hit_tail_T_8 | _slot_will_hit_tail_T_2; // @[fetch-buffer.scala:156:{70,112}]
wire [2:0] _slot_will_hit_tail_T_10 = _slot_will_hit_tail_T_9 | _slot_will_hit_tail_T_3; // @[fetch-buffer.scala:156:{70,112}]
wire [2:0] _slot_will_hit_tail_T_11 = _slot_will_hit_tail_T_10 | _slot_will_hit_tail_T_4; // @[fetch-buffer.scala:156:{70,112}]
wire [2:0] _slot_will_hit_tail_T_12 = _slot_will_hit_tail_T_11 | _slot_will_hit_tail_T_5; // @[fetch-buffer.scala:156:{70,112}]
wire [2:0] _slot_will_hit_tail_T_13 = _slot_will_hit_tail_T_12 | _slot_will_hit_tail_T_6; // @[fetch-buffer.scala:156:{70,112}]
wire [2:0] slot_will_hit_tail = _slot_will_hit_tail_T_13 | _slot_will_hit_tail_T_7; // @[fetch-buffer.scala:156:{70,112}]
wire will_hit_tail = |slot_will_hit_tail; // @[fetch-buffer.scala:156:112, :157:42]
wire _do_deq_T = ~will_hit_tail; // @[fetch-buffer.scala:157:42, :159:32]
wire do_deq = io_deq_ready_0 & _do_deq_T; // @[fetch-buffer.scala:40:7, :159:{29,32}]
wire [3:0] _deq_valids_T = {1'h0, slot_will_hit_tail}; // @[util.scala:384:30]
wire [2:0] _deq_valids_T_1 = _deq_valids_T[2:0]; // @[util.scala:384:{30,37}]
wire [3:0] _deq_valids_T_2 = {slot_will_hit_tail, 1'h0}; // @[util.scala:384:30]
wire [2:0] _deq_valids_T_3 = _deq_valids_T_2[2:0]; // @[util.scala:384:{30,37}]
wire [5:0] _deq_valids_T_4 = {1'h0, slot_will_hit_tail, 2'h0}; // @[util.scala:384:30]
wire [2:0] _deq_valids_T_5 = _deq_valids_T_4[2:0]; // @[util.scala:384:{30,37}]
wire [2:0] _deq_valids_T_6 = _deq_valids_T_1 | _deq_valids_T_3; // @[util.scala:384:{37,54}]
wire [2:0] _deq_valids_T_7 = _deq_valids_T_6 | _deq_valids_T_5; // @[util.scala:384:{37,54}]
wire [2:0] _deq_valids_T_8 = ~_deq_valids_T_7; // @[util.scala:384:54]
wire deq_valids_0 = _deq_valids_T_8[0]; // @[fetch-buffer.scala:161:{21,53}]
wire deq_valids_1 = _deq_valids_T_8[1]; // @[fetch-buffer.scala:161:{21,53}]
wire deq_valids_2 = _deq_valids_T_8[2]; // @[fetch-buffer.scala:161:{21,53}]
assign io_deq_bits_uops_0_bits_debug_fsrc_0 = (head[0] ? deq_vec_0_0_debug_fsrc : 2'h0) | (head[1] ? deq_vec_1_0_debug_fsrc : 2'h0) | (head[2] ? deq_vec_2_0_debug_fsrc : 2'h0) | (head[3] ? deq_vec_3_0_debug_fsrc : 2'h0) | (head[4] ? deq_vec_4_0_debug_fsrc : 2'h0) | (head[5] ? deq_vec_5_0_debug_fsrc : 2'h0) | (head[6] ? deq_vec_6_0_debug_fsrc : 2'h0) | (head[7] ? deq_vec_7_0_debug_fsrc : 2'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_bp_xcpt_if_0 = head[0] & deq_vec_0_0_bp_xcpt_if | head[1] & deq_vec_1_0_bp_xcpt_if | head[2] & deq_vec_2_0_bp_xcpt_if | head[3] & deq_vec_3_0_bp_xcpt_if | head[4] & deq_vec_4_0_bp_xcpt_if | head[5] & deq_vec_5_0_bp_xcpt_if | head[6] & deq_vec_6_0_bp_xcpt_if | head[7] & deq_vec_7_0_bp_xcpt_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_bp_debug_if_0 = head[0] & deq_vec_0_0_bp_debug_if | head[1] & deq_vec_1_0_bp_debug_if | head[2] & deq_vec_2_0_bp_debug_if | head[3] & deq_vec_3_0_bp_debug_if | head[4] & deq_vec_4_0_bp_debug_if | head[5] & deq_vec_5_0_bp_debug_if | head[6] & deq_vec_6_0_bp_debug_if | head[7] & deq_vec_7_0_bp_debug_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_xcpt_ae_if_0 = head[0] & deq_vec_0_0_xcpt_ae_if | head[1] & deq_vec_1_0_xcpt_ae_if | head[2] & deq_vec_2_0_xcpt_ae_if | head[3] & deq_vec_3_0_xcpt_ae_if | head[4] & deq_vec_4_0_xcpt_ae_if | head[5] & deq_vec_5_0_xcpt_ae_if | head[6] & deq_vec_6_0_xcpt_ae_if | head[7] & deq_vec_7_0_xcpt_ae_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_xcpt_pf_if_0 = head[0] & deq_vec_0_0_xcpt_pf_if | head[1] & deq_vec_1_0_xcpt_pf_if | head[2] & deq_vec_2_0_xcpt_pf_if | head[3] & deq_vec_3_0_xcpt_pf_if | head[4] & deq_vec_4_0_xcpt_pf_if | head[5] & deq_vec_5_0_xcpt_pf_if | head[6] & deq_vec_6_0_xcpt_pf_if | head[7] & deq_vec_7_0_xcpt_pf_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_taken_0 = head[0] & deq_vec_0_0_taken | head[1] & deq_vec_1_0_taken | head[2] & deq_vec_2_0_taken | head[3] & deq_vec_3_0_taken | head[4] & deq_vec_4_0_taken | head[5] & deq_vec_5_0_taken | head[6] & deq_vec_6_0_taken | head[7] & deq_vec_7_0_taken; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_pc_lob_0 = (head[0] ? deq_vec_0_0_pc_lob : 6'h0) | (head[1] ? deq_vec_1_0_pc_lob : 6'h0) | (head[2] ? deq_vec_2_0_pc_lob : 6'h0) | (head[3] ? deq_vec_3_0_pc_lob : 6'h0) | (head[4] ? deq_vec_4_0_pc_lob : 6'h0) | (head[5] ? deq_vec_5_0_pc_lob : 6'h0) | (head[6] ? deq_vec_6_0_pc_lob : 6'h0) | (head[7] ? deq_vec_7_0_pc_lob : 6'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_edge_inst_0 = head[0] & deq_vec_0_0_edge_inst | head[1] & deq_vec_1_0_edge_inst | head[2] & deq_vec_2_0_edge_inst | head[3] & deq_vec_3_0_edge_inst | head[4] & deq_vec_4_0_edge_inst | head[5] & deq_vec_5_0_edge_inst | head[6] & deq_vec_6_0_edge_inst | head[7] & deq_vec_7_0_edge_inst; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_ftq_idx_0 = (head[0] ? deq_vec_0_0_ftq_idx : 5'h0) | (head[1] ? deq_vec_1_0_ftq_idx : 5'h0) | (head[2] ? deq_vec_2_0_ftq_idx : 5'h0) | (head[3] ? deq_vec_3_0_ftq_idx : 5'h0) | (head[4] ? deq_vec_4_0_ftq_idx : 5'h0) | (head[5] ? deq_vec_5_0_ftq_idx : 5'h0) | (head[6] ? deq_vec_6_0_ftq_idx : 5'h0) | (head[7] ? deq_vec_7_0_ftq_idx : 5'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_is_sfb_0 = head[0] & deq_vec_0_0_is_sfb | head[1] & deq_vec_1_0_is_sfb | head[2] & deq_vec_2_0_is_sfb | head[3] & deq_vec_3_0_is_sfb | head[4] & deq_vec_4_0_is_sfb | head[5] & deq_vec_5_0_is_sfb | head[6] & deq_vec_6_0_is_sfb | head[7] & deq_vec_7_0_is_sfb; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_debug_pc_0 = (head[0] ? deq_vec_0_0_debug_pc : 40'h0) | (head[1] ? deq_vec_1_0_debug_pc : 40'h0) | (head[2] ? deq_vec_2_0_debug_pc : 40'h0) | (head[3] ? deq_vec_3_0_debug_pc : 40'h0) | (head[4] ? deq_vec_4_0_debug_pc : 40'h0) | (head[5] ? deq_vec_5_0_debug_pc : 40'h0) | (head[6] ? deq_vec_6_0_debug_pc : 40'h0) | (head[7] ? deq_vec_7_0_debug_pc : 40'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_is_rvc_0 = head[0] & deq_vec_0_0_is_rvc | head[1] & deq_vec_1_0_is_rvc | head[2] & deq_vec_2_0_is_rvc | head[3] & deq_vec_3_0_is_rvc | head[4] & deq_vec_4_0_is_rvc | head[5] & deq_vec_5_0_is_rvc | head[6] & deq_vec_6_0_is_rvc | head[7] & deq_vec_7_0_is_rvc; // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_debug_inst_0 = (head[0] ? deq_vec_0_0_debug_inst : 32'h0) | (head[1] ? deq_vec_1_0_debug_inst : 32'h0) | (head[2] ? deq_vec_2_0_debug_inst : 32'h0) | (head[3] ? deq_vec_3_0_debug_inst : 32'h0) | (head[4] ? deq_vec_4_0_debug_inst : 32'h0) | (head[5] ? deq_vec_5_0_debug_inst : 32'h0) | (head[6] ? deq_vec_6_0_debug_inst : 32'h0) | (head[7] ? deq_vec_7_0_debug_inst : 32'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_0_bits_inst_0 = (head[0] ? deq_vec_0_0_inst : 32'h0) | (head[1] ? deq_vec_1_0_inst : 32'h0) | (head[2] ? deq_vec_2_0_inst : 32'h0) | (head[3] ? deq_vec_3_0_inst : 32'h0) | (head[4] ? deq_vec_4_0_inst : 32'h0) | (head[5] ? deq_vec_5_0_inst : 32'h0) | (head[6] ? deq_vec_6_0_inst : 32'h0) | (head[7] ? deq_vec_7_0_inst : 32'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_debug_fsrc_0 = (head[0] ? deq_vec_0_1_debug_fsrc : 2'h0) | (head[1] ? deq_vec_1_1_debug_fsrc : 2'h0) | (head[2] ? deq_vec_2_1_debug_fsrc : 2'h0) | (head[3] ? deq_vec_3_1_debug_fsrc : 2'h0) | (head[4] ? deq_vec_4_1_debug_fsrc : 2'h0) | (head[5] ? deq_vec_5_1_debug_fsrc : 2'h0) | (head[6] ? deq_vec_6_1_debug_fsrc : 2'h0) | (head[7] ? deq_vec_7_1_debug_fsrc : 2'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_bp_xcpt_if_0 = head[0] & deq_vec_0_1_bp_xcpt_if | head[1] & deq_vec_1_1_bp_xcpt_if | head[2] & deq_vec_2_1_bp_xcpt_if | head[3] & deq_vec_3_1_bp_xcpt_if | head[4] & deq_vec_4_1_bp_xcpt_if | head[5] & deq_vec_5_1_bp_xcpt_if | head[6] & deq_vec_6_1_bp_xcpt_if | head[7] & deq_vec_7_1_bp_xcpt_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_bp_debug_if_0 = head[0] & deq_vec_0_1_bp_debug_if | head[1] & deq_vec_1_1_bp_debug_if | head[2] & deq_vec_2_1_bp_debug_if | head[3] & deq_vec_3_1_bp_debug_if | head[4] & deq_vec_4_1_bp_debug_if | head[5] & deq_vec_5_1_bp_debug_if | head[6] & deq_vec_6_1_bp_debug_if | head[7] & deq_vec_7_1_bp_debug_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_xcpt_ae_if_0 = head[0] & deq_vec_0_1_xcpt_ae_if | head[1] & deq_vec_1_1_xcpt_ae_if | head[2] & deq_vec_2_1_xcpt_ae_if | head[3] & deq_vec_3_1_xcpt_ae_if | head[4] & deq_vec_4_1_xcpt_ae_if | head[5] & deq_vec_5_1_xcpt_ae_if | head[6] & deq_vec_6_1_xcpt_ae_if | head[7] & deq_vec_7_1_xcpt_ae_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_xcpt_pf_if_0 = head[0] & deq_vec_0_1_xcpt_pf_if | head[1] & deq_vec_1_1_xcpt_pf_if | head[2] & deq_vec_2_1_xcpt_pf_if | head[3] & deq_vec_3_1_xcpt_pf_if | head[4] & deq_vec_4_1_xcpt_pf_if | head[5] & deq_vec_5_1_xcpt_pf_if | head[6] & deq_vec_6_1_xcpt_pf_if | head[7] & deq_vec_7_1_xcpt_pf_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_taken_0 = head[0] & deq_vec_0_1_taken | head[1] & deq_vec_1_1_taken | head[2] & deq_vec_2_1_taken | head[3] & deq_vec_3_1_taken | head[4] & deq_vec_4_1_taken | head[5] & deq_vec_5_1_taken | head[6] & deq_vec_6_1_taken | head[7] & deq_vec_7_1_taken; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_pc_lob_0 = (head[0] ? deq_vec_0_1_pc_lob : 6'h0) | (head[1] ? deq_vec_1_1_pc_lob : 6'h0) | (head[2] ? deq_vec_2_1_pc_lob : 6'h0) | (head[3] ? deq_vec_3_1_pc_lob : 6'h0) | (head[4] ? deq_vec_4_1_pc_lob : 6'h0) | (head[5] ? deq_vec_5_1_pc_lob : 6'h0) | (head[6] ? deq_vec_6_1_pc_lob : 6'h0) | (head[7] ? deq_vec_7_1_pc_lob : 6'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_edge_inst_0 = head[0] & deq_vec_0_1_edge_inst | head[1] & deq_vec_1_1_edge_inst | head[2] & deq_vec_2_1_edge_inst | head[3] & deq_vec_3_1_edge_inst | head[4] & deq_vec_4_1_edge_inst | head[5] & deq_vec_5_1_edge_inst | head[6] & deq_vec_6_1_edge_inst | head[7] & deq_vec_7_1_edge_inst; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_ftq_idx_0 = (head[0] ? deq_vec_0_1_ftq_idx : 5'h0) | (head[1] ? deq_vec_1_1_ftq_idx : 5'h0) | (head[2] ? deq_vec_2_1_ftq_idx : 5'h0) | (head[3] ? deq_vec_3_1_ftq_idx : 5'h0) | (head[4] ? deq_vec_4_1_ftq_idx : 5'h0) | (head[5] ? deq_vec_5_1_ftq_idx : 5'h0) | (head[6] ? deq_vec_6_1_ftq_idx : 5'h0) | (head[7] ? deq_vec_7_1_ftq_idx : 5'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_is_sfb_0 = head[0] & deq_vec_0_1_is_sfb | head[1] & deq_vec_1_1_is_sfb | head[2] & deq_vec_2_1_is_sfb | head[3] & deq_vec_3_1_is_sfb | head[4] & deq_vec_4_1_is_sfb | head[5] & deq_vec_5_1_is_sfb | head[6] & deq_vec_6_1_is_sfb | head[7] & deq_vec_7_1_is_sfb; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_debug_pc_0 = (head[0] ? deq_vec_0_1_debug_pc : 40'h0) | (head[1] ? deq_vec_1_1_debug_pc : 40'h0) | (head[2] ? deq_vec_2_1_debug_pc : 40'h0) | (head[3] ? deq_vec_3_1_debug_pc : 40'h0) | (head[4] ? deq_vec_4_1_debug_pc : 40'h0) | (head[5] ? deq_vec_5_1_debug_pc : 40'h0) | (head[6] ? deq_vec_6_1_debug_pc : 40'h0) | (head[7] ? deq_vec_7_1_debug_pc : 40'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_is_rvc_0 = head[0] & deq_vec_0_1_is_rvc | head[1] & deq_vec_1_1_is_rvc | head[2] & deq_vec_2_1_is_rvc | head[3] & deq_vec_3_1_is_rvc | head[4] & deq_vec_4_1_is_rvc | head[5] & deq_vec_5_1_is_rvc | head[6] & deq_vec_6_1_is_rvc | head[7] & deq_vec_7_1_is_rvc; // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_debug_inst_0 = (head[0] ? deq_vec_0_1_debug_inst : 32'h0) | (head[1] ? deq_vec_1_1_debug_inst : 32'h0) | (head[2] ? deq_vec_2_1_debug_inst : 32'h0) | (head[3] ? deq_vec_3_1_debug_inst : 32'h0) | (head[4] ? deq_vec_4_1_debug_inst : 32'h0) | (head[5] ? deq_vec_5_1_debug_inst : 32'h0) | (head[6] ? deq_vec_6_1_debug_inst : 32'h0) | (head[7] ? deq_vec_7_1_debug_inst : 32'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_1_bits_inst_0 = (head[0] ? deq_vec_0_1_inst : 32'h0) | (head[1] ? deq_vec_1_1_inst : 32'h0) | (head[2] ? deq_vec_2_1_inst : 32'h0) | (head[3] ? deq_vec_3_1_inst : 32'h0) | (head[4] ? deq_vec_4_1_inst : 32'h0) | (head[5] ? deq_vec_5_1_inst : 32'h0) | (head[6] ? deq_vec_6_1_inst : 32'h0) | (head[7] ? deq_vec_7_1_inst : 32'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_debug_fsrc_0 = (head[0] ? deq_vec_0_2_debug_fsrc : 2'h0) | (head[1] ? deq_vec_1_2_debug_fsrc : 2'h0) | (head[2] ? deq_vec_2_2_debug_fsrc : 2'h0) | (head[3] ? deq_vec_3_2_debug_fsrc : 2'h0) | (head[4] ? deq_vec_4_2_debug_fsrc : 2'h0) | (head[5] ? deq_vec_5_2_debug_fsrc : 2'h0) | (head[6] ? deq_vec_6_2_debug_fsrc : 2'h0) | (head[7] ? deq_vec_7_2_debug_fsrc : 2'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_bp_xcpt_if_0 = head[0] & deq_vec_0_2_bp_xcpt_if | head[1] & deq_vec_1_2_bp_xcpt_if | head[2] & deq_vec_2_2_bp_xcpt_if | head[3] & deq_vec_3_2_bp_xcpt_if | head[4] & deq_vec_4_2_bp_xcpt_if | head[5] & deq_vec_5_2_bp_xcpt_if | head[6] & deq_vec_6_2_bp_xcpt_if | head[7] & deq_vec_7_2_bp_xcpt_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_bp_debug_if_0 = head[0] & deq_vec_0_2_bp_debug_if | head[1] & deq_vec_1_2_bp_debug_if | head[2] & deq_vec_2_2_bp_debug_if | head[3] & deq_vec_3_2_bp_debug_if | head[4] & deq_vec_4_2_bp_debug_if | head[5] & deq_vec_5_2_bp_debug_if | head[6] & deq_vec_6_2_bp_debug_if | head[7] & deq_vec_7_2_bp_debug_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_xcpt_ae_if_0 = head[0] & deq_vec_0_2_xcpt_ae_if | head[1] & deq_vec_1_2_xcpt_ae_if | head[2] & deq_vec_2_2_xcpt_ae_if | head[3] & deq_vec_3_2_xcpt_ae_if | head[4] & deq_vec_4_2_xcpt_ae_if | head[5] & deq_vec_5_2_xcpt_ae_if | head[6] & deq_vec_6_2_xcpt_ae_if | head[7] & deq_vec_7_2_xcpt_ae_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_xcpt_pf_if_0 = head[0] & deq_vec_0_2_xcpt_pf_if | head[1] & deq_vec_1_2_xcpt_pf_if | head[2] & deq_vec_2_2_xcpt_pf_if | head[3] & deq_vec_3_2_xcpt_pf_if | head[4] & deq_vec_4_2_xcpt_pf_if | head[5] & deq_vec_5_2_xcpt_pf_if | head[6] & deq_vec_6_2_xcpt_pf_if | head[7] & deq_vec_7_2_xcpt_pf_if; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_taken_0 = head[0] & deq_vec_0_2_taken | head[1] & deq_vec_1_2_taken | head[2] & deq_vec_2_2_taken | head[3] & deq_vec_3_2_taken | head[4] & deq_vec_4_2_taken | head[5] & deq_vec_5_2_taken | head[6] & deq_vec_6_2_taken | head[7] & deq_vec_7_2_taken; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_pc_lob_0 = (head[0] ? deq_vec_0_2_pc_lob : 6'h0) | (head[1] ? deq_vec_1_2_pc_lob : 6'h0) | (head[2] ? deq_vec_2_2_pc_lob : 6'h0) | (head[3] ? deq_vec_3_2_pc_lob : 6'h0) | (head[4] ? deq_vec_4_2_pc_lob : 6'h0) | (head[5] ? deq_vec_5_2_pc_lob : 6'h0) | (head[6] ? deq_vec_6_2_pc_lob : 6'h0) | (head[7] ? deq_vec_7_2_pc_lob : 6'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_edge_inst_0 = head[0] & deq_vec_0_2_edge_inst | head[1] & deq_vec_1_2_edge_inst | head[2] & deq_vec_2_2_edge_inst | head[3] & deq_vec_3_2_edge_inst | head[4] & deq_vec_4_2_edge_inst | head[5] & deq_vec_5_2_edge_inst | head[6] & deq_vec_6_2_edge_inst | head[7] & deq_vec_7_2_edge_inst; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_ftq_idx_0 = (head[0] ? deq_vec_0_2_ftq_idx : 5'h0) | (head[1] ? deq_vec_1_2_ftq_idx : 5'h0) | (head[2] ? deq_vec_2_2_ftq_idx : 5'h0) | (head[3] ? deq_vec_3_2_ftq_idx : 5'h0) | (head[4] ? deq_vec_4_2_ftq_idx : 5'h0) | (head[5] ? deq_vec_5_2_ftq_idx : 5'h0) | (head[6] ? deq_vec_6_2_ftq_idx : 5'h0) | (head[7] ? deq_vec_7_2_ftq_idx : 5'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_is_sfb_0 = head[0] & deq_vec_0_2_is_sfb | head[1] & deq_vec_1_2_is_sfb | head[2] & deq_vec_2_2_is_sfb | head[3] & deq_vec_3_2_is_sfb | head[4] & deq_vec_4_2_is_sfb | head[5] & deq_vec_5_2_is_sfb | head[6] & deq_vec_6_2_is_sfb | head[7] & deq_vec_7_2_is_sfb; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_debug_pc_0 = (head[0] ? deq_vec_0_2_debug_pc : 40'h0) | (head[1] ? deq_vec_1_2_debug_pc : 40'h0) | (head[2] ? deq_vec_2_2_debug_pc : 40'h0) | (head[3] ? deq_vec_3_2_debug_pc : 40'h0) | (head[4] ? deq_vec_4_2_debug_pc : 40'h0) | (head[5] ? deq_vec_5_2_debug_pc : 40'h0) | (head[6] ? deq_vec_6_2_debug_pc : 40'h0) | (head[7] ? deq_vec_7_2_debug_pc : 40'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_is_rvc_0 = head[0] & deq_vec_0_2_is_rvc | head[1] & deq_vec_1_2_is_rvc | head[2] & deq_vec_2_2_is_rvc | head[3] & deq_vec_3_2_is_rvc | head[4] & deq_vec_4_2_is_rvc | head[5] & deq_vec_5_2_is_rvc | head[6] & deq_vec_6_2_is_rvc | head[7] & deq_vec_7_2_is_rvc; // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_debug_inst_0 = (head[0] ? deq_vec_0_2_debug_inst : 32'h0) | (head[1] ? deq_vec_1_2_debug_inst : 32'h0) | (head[2] ? deq_vec_2_2_debug_inst : 32'h0) | (head[3] ? deq_vec_3_2_debug_inst : 32'h0) | (head[4] ? deq_vec_4_2_debug_inst : 32'h0) | (head[5] ? deq_vec_5_2_debug_inst : 32'h0) | (head[6] ? deq_vec_6_2_debug_inst : 32'h0) | (head[7] ? deq_vec_7_2_debug_inst : 32'h0); // @[Mux.scala:30:73]
assign io_deq_bits_uops_2_bits_inst_0 = (head[0] ? deq_vec_0_2_inst : 32'h0) | (head[1] ? deq_vec_1_2_inst : 32'h0) | (head[2] ? deq_vec_2_2_inst : 32'h0) | (head[3] ? deq_vec_3_2_inst : 32'h0) | (head[4] ? deq_vec_4_2_inst : 32'h0) | (head[5] ? deq_vec_5_2_inst : 32'h0) | (head[6] ? deq_vec_6_2_inst : 32'h0) | (head[7] ? deq_vec_7_2_inst : 32'h0); // @[Mux.scala:30:73]
wire _io_deq_valid_T = deq_valids_0 | deq_valids_1; // @[fetch-buffer.scala:161:53, :170:38]
assign _io_deq_valid_T_1 = _io_deq_valid_T | deq_valids_2; // @[fetch-buffer.scala:161:53, :170:38]
assign io_deq_valid_0 = _io_deq_valid_T_1; // @[fetch-buffer.scala:40:7, :170:38]
wire [6:0] _head_T = head[6:0]; // @[fetch-buffer.scala:61:21, :132:12]
wire [7:0] _head_T_2 = {_head_T, _head_T_1}; // @[fetch-buffer.scala:132:{8,12,24}]
assign io_deq_bits_uops_0_valid_0 = ~reset & deq_valids_0; // @[fetch-buffer.scala:40:7, :161:53, :168:72, :195:23, :196:41]
assign io_deq_bits_uops_1_valid_0 = ~reset & deq_valids_1; // @[fetch-buffer.scala:40:7, :161:53, :168:72, :195:23, :196:41]
assign io_deq_bits_uops_2_valid_0 = ~reset & deq_valids_2; // @[fetch-buffer.scala:40:7, :161:53, :168:72, :195:23, :196:41]
wire _T_101 = do_enq & in_mask_0; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_34 = _T_101 & enq_idxs_0[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_37 = _T_101 & enq_idxs_0[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_40 = _T_101 & enq_idxs_0[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_43 = _T_101 & enq_idxs_0[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_46 = _T_101 & enq_idxs_0[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_49 = _T_101 & enq_idxs_0[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_52 = _T_101 & enq_idxs_0[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_55 = _T_101 & enq_idxs_0[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_58 = _T_101 & enq_idxs_0[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_61 = _T_101 & enq_idxs_0[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_64 = _T_101 & enq_idxs_0[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_67 = _T_101 & enq_idxs_0[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_70 = _T_101 & enq_idxs_0[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_73 = _T_101 & enq_idxs_0[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_76 = _T_101 & enq_idxs_0[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_79 = _T_101 & enq_idxs_0[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_82 = _T_101 & enq_idxs_0[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_85 = _T_101 & enq_idxs_0[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_88 = _T_101 & enq_idxs_0[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_91 = _T_101 & enq_idxs_0[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_94 = _T_101 & enq_idxs_0[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_97 = _T_101 & enq_idxs_0[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_100 = _T_101 & enq_idxs_0[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_103 = _T_101 & enq_idxs_0[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_173 = do_enq & in_mask_1; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_106 = _T_173 & enq_idxs_1[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_109 = _T_173 & enq_idxs_1[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_112 = _T_173 & enq_idxs_1[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_115 = _T_173 & enq_idxs_1[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_118 = _T_173 & enq_idxs_1[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_121 = _T_173 & enq_idxs_1[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_124 = _T_173 & enq_idxs_1[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_127 = _T_173 & enq_idxs_1[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_130 = _T_173 & enq_idxs_1[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_133 = _T_173 & enq_idxs_1[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_136 = _T_173 & enq_idxs_1[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_139 = _T_173 & enq_idxs_1[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_142 = _T_173 & enq_idxs_1[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_145 = _T_173 & enq_idxs_1[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_148 = _T_173 & enq_idxs_1[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_151 = _T_173 & enq_idxs_1[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_154 = _T_173 & enq_idxs_1[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_157 = _T_173 & enq_idxs_1[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_160 = _T_173 & enq_idxs_1[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_163 = _T_173 & enq_idxs_1[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_166 = _T_173 & enq_idxs_1[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_169 = _T_173 & enq_idxs_1[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_172 = _T_173 & enq_idxs_1[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_175 = _T_173 & enq_idxs_1[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_245 = do_enq & in_mask_2; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_178 = _T_245 & enq_idxs_2[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_181 = _T_245 & enq_idxs_2[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_184 = _T_245 & enq_idxs_2[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_187 = _T_245 & enq_idxs_2[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_190 = _T_245 & enq_idxs_2[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_193 = _T_245 & enq_idxs_2[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_196 = _T_245 & enq_idxs_2[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_199 = _T_245 & enq_idxs_2[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_202 = _T_245 & enq_idxs_2[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_205 = _T_245 & enq_idxs_2[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_208 = _T_245 & enq_idxs_2[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_211 = _T_245 & enq_idxs_2[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_214 = _T_245 & enq_idxs_2[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_217 = _T_245 & enq_idxs_2[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_220 = _T_245 & enq_idxs_2[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_223 = _T_245 & enq_idxs_2[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_226 = _T_245 & enq_idxs_2[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_229 = _T_245 & enq_idxs_2[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_232 = _T_245 & enq_idxs_2[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_235 = _T_245 & enq_idxs_2[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_238 = _T_245 & enq_idxs_2[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_241 = _T_245 & enq_idxs_2[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_244 = _T_245 & enq_idxs_2[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_247 = _T_245 & enq_idxs_2[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_317 = do_enq & in_mask_3; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_250 = _T_317 & enq_idxs_3[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_253 = _T_317 & enq_idxs_3[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_256 = _T_317 & enq_idxs_3[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_259 = _T_317 & enq_idxs_3[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_262 = _T_317 & enq_idxs_3[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_265 = _T_317 & enq_idxs_3[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_268 = _T_317 & enq_idxs_3[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_271 = _T_317 & enq_idxs_3[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_274 = _T_317 & enq_idxs_3[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_277 = _T_317 & enq_idxs_3[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_280 = _T_317 & enq_idxs_3[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_283 = _T_317 & enq_idxs_3[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_286 = _T_317 & enq_idxs_3[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_289 = _T_317 & enq_idxs_3[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_292 = _T_317 & enq_idxs_3[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_295 = _T_317 & enq_idxs_3[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_298 = _T_317 & enq_idxs_3[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_301 = _T_317 & enq_idxs_3[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_304 = _T_317 & enq_idxs_3[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_307 = _T_317 & enq_idxs_3[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_310 = _T_317 & enq_idxs_3[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_313 = _T_317 & enq_idxs_3[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_316 = _T_317 & enq_idxs_3[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_319 = _T_317 & enq_idxs_3[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_389 = do_enq & in_mask_4; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_322 = _T_389 & enq_idxs_4[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_325 = _T_389 & enq_idxs_4[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_328 = _T_389 & enq_idxs_4[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_331 = _T_389 & enq_idxs_4[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_334 = _T_389 & enq_idxs_4[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_337 = _T_389 & enq_idxs_4[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_340 = _T_389 & enq_idxs_4[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_343 = _T_389 & enq_idxs_4[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_346 = _T_389 & enq_idxs_4[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_349 = _T_389 & enq_idxs_4[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_352 = _T_389 & enq_idxs_4[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_355 = _T_389 & enq_idxs_4[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_358 = _T_389 & enq_idxs_4[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_361 = _T_389 & enq_idxs_4[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_364 = _T_389 & enq_idxs_4[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_367 = _T_389 & enq_idxs_4[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_370 = _T_389 & enq_idxs_4[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_373 = _T_389 & enq_idxs_4[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_376 = _T_389 & enq_idxs_4[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_379 = _T_389 & enq_idxs_4[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_382 = _T_389 & enq_idxs_4[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_385 = _T_389 & enq_idxs_4[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_388 = _T_389 & enq_idxs_4[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_391 = _T_389 & enq_idxs_4[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_461 = do_enq & in_mask_5; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_394 = _T_461 & enq_idxs_5[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_397 = _T_461 & enq_idxs_5[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_400 = _T_461 & enq_idxs_5[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_403 = _T_461 & enq_idxs_5[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_406 = _T_461 & enq_idxs_5[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_409 = _T_461 & enq_idxs_5[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_412 = _T_461 & enq_idxs_5[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_415 = _T_461 & enq_idxs_5[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_418 = _T_461 & enq_idxs_5[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_421 = _T_461 & enq_idxs_5[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_424 = _T_461 & enq_idxs_5[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_427 = _T_461 & enq_idxs_5[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_430 = _T_461 & enq_idxs_5[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_433 = _T_461 & enq_idxs_5[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_436 = _T_461 & enq_idxs_5[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_439 = _T_461 & enq_idxs_5[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_442 = _T_461 & enq_idxs_5[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_445 = _T_461 & enq_idxs_5[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_448 = _T_461 & enq_idxs_5[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_451 = _T_461 & enq_idxs_5[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_454 = _T_461 & enq_idxs_5[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_457 = _T_461 & enq_idxs_5[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_460 = _T_461 & enq_idxs_5[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_463 = _T_461 & enq_idxs_5[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_533 = do_enq & in_mask_6; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_466 = _T_533 & enq_idxs_6[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_469 = _T_533 & enq_idxs_6[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_472 = _T_533 & enq_idxs_6[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_475 = _T_533 & enq_idxs_6[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_478 = _T_533 & enq_idxs_6[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_481 = _T_533 & enq_idxs_6[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_484 = _T_533 & enq_idxs_6[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_487 = _T_533 & enq_idxs_6[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_490 = _T_533 & enq_idxs_6[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_493 = _T_533 & enq_idxs_6[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_496 = _T_533 & enq_idxs_6[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_499 = _T_533 & enq_idxs_6[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_502 = _T_533 & enq_idxs_6[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_505 = _T_533 & enq_idxs_6[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_508 = _T_533 & enq_idxs_6[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_511 = _T_533 & enq_idxs_6[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_514 = _T_533 & enq_idxs_6[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_517 = _T_533 & enq_idxs_6[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_520 = _T_533 & enq_idxs_6[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_523 = _T_533 & enq_idxs_6[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_526 = _T_533 & enq_idxs_6[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_529 = _T_533 & enq_idxs_6[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_532 = _T_533 & enq_idxs_6[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_535 = _T_533 & enq_idxs_6[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_605 = do_enq & in_mask_7; // @[fetch-buffer.scala:82:16, :87:21, :144:20]
wire _T_538 = _T_605 & enq_idxs_7[0]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_541 = _T_605 & enq_idxs_7[1]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_544 = _T_605 & enq_idxs_7[2]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_547 = _T_605 & enq_idxs_7[3]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_550 = _T_605 & enq_idxs_7[4]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_553 = _T_605 & enq_idxs_7[5]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_556 = _T_605 & enq_idxs_7[6]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_559 = _T_605 & enq_idxs_7[7]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_562 = _T_605 & enq_idxs_7[8]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_565 = _T_605 & enq_idxs_7[9]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_568 = _T_605 & enq_idxs_7[10]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_571 = _T_605 & enq_idxs_7[11]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_574 = _T_605 & enq_idxs_7[12]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_577 = _T_605 & enq_idxs_7[13]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_580 = _T_605 & enq_idxs_7[14]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_583 = _T_605 & enq_idxs_7[15]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_586 = _T_605 & enq_idxs_7[16]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_589 = _T_605 & enq_idxs_7[17]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_592 = _T_605 & enq_idxs_7[18]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_595 = _T_605 & enq_idxs_7[19]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_598 = _T_605 & enq_idxs_7[20]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_601 = _T_605 & enq_idxs_7[21]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_604 = _T_605 & enq_idxs_7[22]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
wire _T_607 = _T_605 & enq_idxs_7[23]; // @[fetch-buffer.scala:128:22, :144:{20,34,48}]
always @(posedge clock) begin // @[fetch-buffer.scala:40:7]
if (_T_538) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_466) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_394) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_322) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_250) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_178) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_106) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_34) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_0_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_0_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_0_edge_inst <= ~(_T_538 | _T_466 | _T_394) & (_T_322 ? in_uops_4_edge_inst : ~(_T_250 | _T_178 | _T_106) & (_T_34 ? in_uops_0_edge_inst : fb_uop_ram_0_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_541) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_469) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_397) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_325) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_253) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_181) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_109) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_37) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_1_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_1_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_1_edge_inst <= ~(_T_541 | _T_469 | _T_397) & (_T_325 ? in_uops_4_edge_inst : ~(_T_253 | _T_181 | _T_109) & (_T_37 ? in_uops_0_edge_inst : fb_uop_ram_1_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_544) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_472) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_400) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_328) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_256) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_184) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_112) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_40) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_2_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_2_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_2_edge_inst <= ~(_T_544 | _T_472 | _T_400) & (_T_328 ? in_uops_4_edge_inst : ~(_T_256 | _T_184 | _T_112) & (_T_40 ? in_uops_0_edge_inst : fb_uop_ram_2_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_547) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_475) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_403) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_331) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_259) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_187) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_115) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_43) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_3_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_3_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_3_edge_inst <= ~(_T_547 | _T_475 | _T_403) & (_T_331 ? in_uops_4_edge_inst : ~(_T_259 | _T_187 | _T_115) & (_T_43 ? in_uops_0_edge_inst : fb_uop_ram_3_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_550) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_478) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_406) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_334) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_262) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_190) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_118) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_46) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_4_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_4_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_4_edge_inst <= ~(_T_550 | _T_478 | _T_406) & (_T_334 ? in_uops_4_edge_inst : ~(_T_262 | _T_190 | _T_118) & (_T_46 ? in_uops_0_edge_inst : fb_uop_ram_4_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_553) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_481) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_409) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_337) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_265) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_193) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_121) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_49) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_5_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_5_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_5_edge_inst <= ~(_T_553 | _T_481 | _T_409) & (_T_337 ? in_uops_4_edge_inst : ~(_T_265 | _T_193 | _T_121) & (_T_49 ? in_uops_0_edge_inst : fb_uop_ram_5_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_556) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_484) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_412) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_340) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_268) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_196) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_124) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_52) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_6_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_6_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_6_edge_inst <= ~(_T_556 | _T_484 | _T_412) & (_T_340 ? in_uops_4_edge_inst : ~(_T_268 | _T_196 | _T_124) & (_T_52 ? in_uops_0_edge_inst : fb_uop_ram_6_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_559) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_487) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_415) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_343) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_271) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_199) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_127) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_55) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_7_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_7_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_7_edge_inst <= ~(_T_559 | _T_487 | _T_415) & (_T_343 ? in_uops_4_edge_inst : ~(_T_271 | _T_199 | _T_127) & (_T_55 ? in_uops_0_edge_inst : fb_uop_ram_7_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_562) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_490) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_418) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_346) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_274) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_202) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_130) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_58) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_8_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_8_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_8_edge_inst <= ~(_T_562 | _T_490 | _T_418) & (_T_346 ? in_uops_4_edge_inst : ~(_T_274 | _T_202 | _T_130) & (_T_58 ? in_uops_0_edge_inst : fb_uop_ram_8_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_565) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_493) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_421) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_349) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_277) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_205) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_133) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_61) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_9_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_9_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_9_edge_inst <= ~(_T_565 | _T_493 | _T_421) & (_T_349 ? in_uops_4_edge_inst : ~(_T_277 | _T_205 | _T_133) & (_T_61 ? in_uops_0_edge_inst : fb_uop_ram_9_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_568) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_496) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_424) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_352) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_280) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_208) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_136) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_64) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_10_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_10_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_10_edge_inst <= ~(_T_568 | _T_496 | _T_424) & (_T_352 ? in_uops_4_edge_inst : ~(_T_280 | _T_208 | _T_136) & (_T_64 ? in_uops_0_edge_inst : fb_uop_ram_10_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_571) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_499) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_427) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_355) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_283) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_211) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_139) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_67) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_11_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_11_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_11_edge_inst <= ~(_T_571 | _T_499 | _T_427) & (_T_355 ? in_uops_4_edge_inst : ~(_T_283 | _T_211 | _T_139) & (_T_67 ? in_uops_0_edge_inst : fb_uop_ram_11_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_574) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_502) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_430) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_358) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_286) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_214) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_142) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_70) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_12_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_12_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_12_edge_inst <= ~(_T_574 | _T_502 | _T_430) & (_T_358 ? in_uops_4_edge_inst : ~(_T_286 | _T_214 | _T_142) & (_T_70 ? in_uops_0_edge_inst : fb_uop_ram_12_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_577) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_505) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_433) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_361) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_289) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_217) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_145) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_73) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_13_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_13_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_13_edge_inst <= ~(_T_577 | _T_505 | _T_433) & (_T_361 ? in_uops_4_edge_inst : ~(_T_289 | _T_217 | _T_145) & (_T_73 ? in_uops_0_edge_inst : fb_uop_ram_13_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_580) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_508) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_436) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_364) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_292) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_220) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_148) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_76) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_14_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_14_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_14_edge_inst <= ~(_T_580 | _T_508 | _T_436) & (_T_364 ? in_uops_4_edge_inst : ~(_T_292 | _T_220 | _T_148) & (_T_76 ? in_uops_0_edge_inst : fb_uop_ram_14_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_583) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_511) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_439) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_367) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_295) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_223) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_151) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_79) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_15_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_15_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_15_edge_inst <= ~(_T_583 | _T_511 | _T_439) & (_T_367 ? in_uops_4_edge_inst : ~(_T_295 | _T_223 | _T_151) & (_T_79 ? in_uops_0_edge_inst : fb_uop_ram_15_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_586) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_514) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_442) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_370) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_298) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_226) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_154) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_82) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_16_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_16_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_16_edge_inst <= ~(_T_586 | _T_514 | _T_442) & (_T_370 ? in_uops_4_edge_inst : ~(_T_298 | _T_226 | _T_154) & (_T_82 ? in_uops_0_edge_inst : fb_uop_ram_16_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_589) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_517) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_445) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_373) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_301) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_229) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_157) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_85) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_17_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_17_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_17_edge_inst <= ~(_T_589 | _T_517 | _T_445) & (_T_373 ? in_uops_4_edge_inst : ~(_T_301 | _T_229 | _T_157) & (_T_85 ? in_uops_0_edge_inst : fb_uop_ram_17_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_592) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_520) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_448) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_376) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_304) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_232) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_160) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_88) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_18_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_18_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_18_edge_inst <= ~(_T_592 | _T_520 | _T_448) & (_T_376 ? in_uops_4_edge_inst : ~(_T_304 | _T_232 | _T_160) & (_T_88 ? in_uops_0_edge_inst : fb_uop_ram_18_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_595) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_523) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_451) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_379) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_307) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_235) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_163) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_91) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_19_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_19_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_19_edge_inst <= ~(_T_595 | _T_523 | _T_451) & (_T_379 ? in_uops_4_edge_inst : ~(_T_307 | _T_235 | _T_163) & (_T_91 ? in_uops_0_edge_inst : fb_uop_ram_19_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_598) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_526) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_454) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_382) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_310) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_238) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_166) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_94) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_20_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_20_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_20_edge_inst <= ~(_T_598 | _T_526 | _T_454) & (_T_382 ? in_uops_4_edge_inst : ~(_T_310 | _T_238 | _T_166) & (_T_94 ? in_uops_0_edge_inst : fb_uop_ram_20_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_601) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_529) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_457) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_385) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_313) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_241) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_169) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_97) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_21_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_21_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_21_edge_inst <= ~(_T_601 | _T_529 | _T_457) & (_T_385 ? in_uops_4_edge_inst : ~(_T_313 | _T_241 | _T_169) & (_T_97 ? in_uops_0_edge_inst : fb_uop_ram_21_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_604) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_532) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_460) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_388) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_316) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_244) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_172) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_100) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_22_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_22_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_22_edge_inst <= ~(_T_604 | _T_532 | _T_460) & (_T_388 ? in_uops_4_edge_inst : ~(_T_316 | _T_244 | _T_172) & (_T_100 ? in_uops_0_edge_inst : fb_uop_ram_22_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (_T_607) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_7_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_7_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_7_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_7_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_7_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_7_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_7_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_7_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_7_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_7_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_7_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_7_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_7_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_535) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_6_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_6_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_6_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_6_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_6_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_6_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_6_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_6_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_6_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_6_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_6_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_6_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_6_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_463) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_5_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_5_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_5_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_5_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_5_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_5_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_5_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_5_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_5_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_5_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_5_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_5_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_5_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_391) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_4_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_4_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_4_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_4_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_4_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_4_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_4_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_4_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_4_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_4_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_4_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_4_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_4_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_319) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_3_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_3_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_3_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_3_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_3_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_3_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_3_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_3_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_3_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_3_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_3_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_3_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_3_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_247) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_2_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_2_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_2_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_2_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_2_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_2_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_2_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_2_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_2_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_2_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_2_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_2_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_2_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_175) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_1_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_1_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_1_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_1_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_1_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_1_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_1_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_1_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_1_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_1_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_1_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_1_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_1_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
else if (_T_103) begin // @[fetch-buffer.scala:144:34]
fb_uop_ram_23_inst <= in_uops_0_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_inst <= in_uops_0_debug_inst; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_rvc <= in_uops_0_is_rvc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_pc <= in_uops_0_debug_pc; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_is_sfb <= in_uops_0_is_sfb; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_ftq_idx <= in_uops_0_ftq_idx; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_pc_lob <= in_uops_0_pc_lob; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_taken <= in_uops_0_taken; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_pf_if <= in_uops_0_xcpt_pf_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_xcpt_ae_if <= in_uops_0_xcpt_ae_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_debug_if <= in_uops_0_bp_debug_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_bp_xcpt_if <= in_uops_0_bp_xcpt_if; // @[fetch-buffer.scala:57:16, :88:21]
fb_uop_ram_23_debug_fsrc <= in_uops_0_debug_fsrc; // @[fetch-buffer.scala:57:16, :88:21]
end
fb_uop_ram_23_edge_inst <= ~(_T_607 | _T_535 | _T_463) & (_T_391 ? in_uops_4_edge_inst : ~(_T_319 | _T_247 | _T_175) & (_T_103 ? in_uops_0_edge_inst : fb_uop_ram_23_edge_inst)); // @[fetch-buffer.scala:57:16, :88:21, :144:{34,53}, :145:16]
if (reset) begin // @[fetch-buffer.scala:40:7]
head <= 8'h1; // @[fetch-buffer.scala:61:21]
tail <= 24'h1; // @[fetch-buffer.scala:62:21]
maybe_full <= 1'h0; // @[fetch-buffer.scala:64:27]
end
else begin // @[fetch-buffer.scala:40:7]
if (io_clear_0) begin // @[fetch-buffer.scala:40:7]
head <= 8'h1; // @[fetch-buffer.scala:61:21]
tail <= 24'h1; // @[fetch-buffer.scala:62:21]
end
else begin // @[fetch-buffer.scala:40:7]
if (do_deq) // @[fetch-buffer.scala:159:29]
head <= _head_T_2; // @[fetch-buffer.scala:61:21, :132:8]
if (do_enq) begin // @[fetch-buffer.scala:82:16]
if (in_mask_7) // @[fetch-buffer.scala:87:21]
tail <= {enq_idxs_7[22:0], enq_idxs_7[23]}; // @[fetch-buffer.scala:62:21, :128:22, :132:{8,12,24}]
else if (in_mask_6) // @[fetch-buffer.scala:87:21]
tail <= _T_26; // @[fetch-buffer.scala:62:21, :132:8]
else if (in_mask_5) // @[fetch-buffer.scala:87:21]
tail <= _T_22; // @[fetch-buffer.scala:62:21, :132:8]
else if (in_mask_4) // @[fetch-buffer.scala:87:21]
tail <= _T_18; // @[fetch-buffer.scala:62:21, :132:8]
else if (in_mask_3) // @[fetch-buffer.scala:87:21]
tail <= _T_14; // @[fetch-buffer.scala:62:21, :132:8]
else if (in_mask_2) // @[fetch-buffer.scala:87:21]
tail <= _T_10; // @[fetch-buffer.scala:62:21, :132:8]
else if (in_mask_1) // @[fetch-buffer.scala:87:21]
tail <= _T_6; // @[fetch-buffer.scala:62:21, :132:8]
else if (in_mask_0) // @[fetch-buffer.scala:87:21]
tail <= _T_2; // @[fetch-buffer.scala:62:21, :132:8]
end
end
maybe_full <= ~(io_clear_0 | do_deq) & (do_enq & (in_mask_0 | in_mask_1 | in_mask_2 | in_mask_3 | in_mask_4 | in_mask_5 | in_mask_6 | in_mask_7) | maybe_full); // @[fetch-buffer.scala:40:7, :64:27, :82:16, :87:21, :159:29, :176:17, :178:{27,33}, :179:18, :183:17, :185:16, :188:19, :191:16]
end
always @(posedge)
assign io_enq_ready = io_enq_ready_0; // @[fetch-buffer.scala:40:7]
assign io_deq_valid = io_deq_valid_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_valid = io_deq_bits_uops_0_valid_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_inst = io_deq_bits_uops_0_bits_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_debug_inst = io_deq_bits_uops_0_bits_debug_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_is_rvc = io_deq_bits_uops_0_bits_is_rvc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_debug_pc = io_deq_bits_uops_0_bits_debug_pc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_is_sfb = io_deq_bits_uops_0_bits_is_sfb_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_ftq_idx = io_deq_bits_uops_0_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_edge_inst = io_deq_bits_uops_0_bits_edge_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_pc_lob = io_deq_bits_uops_0_bits_pc_lob_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_taken = io_deq_bits_uops_0_bits_taken_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_xcpt_pf_if = io_deq_bits_uops_0_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_xcpt_ae_if = io_deq_bits_uops_0_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_bp_debug_if = io_deq_bits_uops_0_bits_bp_debug_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_bp_xcpt_if = io_deq_bits_uops_0_bits_bp_xcpt_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_0_bits_debug_fsrc = io_deq_bits_uops_0_bits_debug_fsrc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_valid = io_deq_bits_uops_1_valid_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_inst = io_deq_bits_uops_1_bits_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_debug_inst = io_deq_bits_uops_1_bits_debug_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_is_rvc = io_deq_bits_uops_1_bits_is_rvc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_debug_pc = io_deq_bits_uops_1_bits_debug_pc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_is_sfb = io_deq_bits_uops_1_bits_is_sfb_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_ftq_idx = io_deq_bits_uops_1_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_edge_inst = io_deq_bits_uops_1_bits_edge_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_pc_lob = io_deq_bits_uops_1_bits_pc_lob_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_taken = io_deq_bits_uops_1_bits_taken_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_xcpt_pf_if = io_deq_bits_uops_1_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_xcpt_ae_if = io_deq_bits_uops_1_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_bp_debug_if = io_deq_bits_uops_1_bits_bp_debug_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_bp_xcpt_if = io_deq_bits_uops_1_bits_bp_xcpt_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_1_bits_debug_fsrc = io_deq_bits_uops_1_bits_debug_fsrc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_valid = io_deq_bits_uops_2_valid_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_inst = io_deq_bits_uops_2_bits_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_debug_inst = io_deq_bits_uops_2_bits_debug_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_is_rvc = io_deq_bits_uops_2_bits_is_rvc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_debug_pc = io_deq_bits_uops_2_bits_debug_pc_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_is_sfb = io_deq_bits_uops_2_bits_is_sfb_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_ftq_idx = io_deq_bits_uops_2_bits_ftq_idx_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_edge_inst = io_deq_bits_uops_2_bits_edge_inst_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_pc_lob = io_deq_bits_uops_2_bits_pc_lob_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_taken = io_deq_bits_uops_2_bits_taken_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_xcpt_pf_if = io_deq_bits_uops_2_bits_xcpt_pf_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_xcpt_ae_if = io_deq_bits_uops_2_bits_xcpt_ae_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_bp_debug_if = io_deq_bits_uops_2_bits_bp_debug_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_bp_xcpt_if = io_deq_bits_uops_2_bits_bp_xcpt_if_0; // @[fetch-buffer.scala:40:7]
assign io_deq_bits_uops_2_bits_debug_fsrc = io_deq_bits_uops_2_bits_debug_fsrc_0; // @[fetch-buffer.scala:40:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncQueueSink_Phit_14( // @[AsyncQueue.scala:136:7]
input clock, // @[AsyncQueue.scala:136:7]
input reset, // @[AsyncQueue.scala:136:7]
input io_deq_ready, // @[AsyncQueue.scala:139:14]
output io_deq_valid, // @[AsyncQueue.scala:139:14]
output [31:0] io_deq_bits_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_0_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_1_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_2_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_3_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_4_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_5_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_6_phit, // @[AsyncQueue.scala:139:14]
input [31:0] io_async_mem_7_phit, // @[AsyncQueue.scala:139:14]
output [3:0] io_async_ridx, // @[AsyncQueue.scala:139:14]
input [3:0] io_async_widx, // @[AsyncQueue.scala:139:14]
output io_async_safe_ridx_valid, // @[AsyncQueue.scala:139:14]
input io_async_safe_widx_valid, // @[AsyncQueue.scala:139:14]
input io_async_safe_source_reset_n, // @[AsyncQueue.scala:139:14]
output io_async_safe_sink_reset_n // @[AsyncQueue.scala:139:14]
);
wire _source_extend_io_out; // @[AsyncQueue.scala:175:31]
wire _sink_valid_0_io_out; // @[AsyncQueue.scala:172:33]
wire io_deq_ready_0 = io_deq_ready; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_0_phit_0 = io_async_mem_0_phit; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_1_phit_0 = io_async_mem_1_phit; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_2_phit_0 = io_async_mem_2_phit; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_3_phit_0 = io_async_mem_3_phit; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_4_phit_0 = io_async_mem_4_phit; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_5_phit_0 = io_async_mem_5_phit; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_6_phit_0 = io_async_mem_6_phit; // @[AsyncQueue.scala:136:7]
wire [31:0] io_async_mem_7_phit_0 = io_async_mem_7_phit; // @[AsyncQueue.scala:136:7]
wire [3:0] io_async_widx_0 = io_async_widx; // @[AsyncQueue.scala:136:7]
wire io_async_safe_widx_valid_0 = io_async_safe_widx_valid; // @[AsyncQueue.scala:136:7]
wire io_async_safe_source_reset_n_0 = io_async_safe_source_reset_n; // @[AsyncQueue.scala:136:7]
wire _ridx_T = reset; // @[AsyncQueue.scala:148:30]
wire _valid_reg_T = reset; // @[AsyncQueue.scala:165:35]
wire _ridx_reg_T = reset; // @[AsyncQueue.scala:168:34]
wire _sink_valid_0_reset_T = reset; // @[AsyncQueue.scala:177:35]
wire _sink_valid_1_reset_T = reset; // @[AsyncQueue.scala:178:35]
wire _source_extend_reset_T = reset; // @[AsyncQueue.scala:179:35]
wire _source_valid_reset_T = reset; // @[AsyncQueue.scala:180:34]
wire _io_async_safe_sink_reset_n_T = reset; // @[AsyncQueue.scala:193:32]
wire _io_deq_valid_T; // @[AsyncQueue.scala:166:29]
wire [31:0] _io_deq_bits_WIRE_phit; // @[SynchronizerReg.scala:211:26]
wire _io_async_safe_sink_reset_n_T_1; // @[AsyncQueue.scala:193:25]
wire [31:0] io_deq_bits_phit_0; // @[AsyncQueue.scala:136:7]
wire io_deq_valid_0; // @[AsyncQueue.scala:136:7]
wire io_async_safe_ridx_valid_0; // @[AsyncQueue.scala:136:7]
wire io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:136:7]
wire [3:0] io_async_ridx_0; // @[AsyncQueue.scala:136:7]
wire source_ready; // @[AsyncQueue.scala:147:30]
wire _ridx_T_1 = io_deq_ready_0 & io_deq_valid_0; // @[Decoupled.scala:51:35]
wire _ridx_T_2 = ~source_ready; // @[AsyncQueue.scala:147:30, :148:77]
wire [3:0] _ridx_incremented_T_2; // @[AsyncQueue.scala:53:23]
wire [3:0] ridx_incremented; // @[AsyncQueue.scala:51:27]
reg [3:0] ridx_ridx_bin; // @[AsyncQueue.scala:52:25]
wire [4:0] _ridx_incremented_T = {1'h0, ridx_ridx_bin} + {4'h0, _ridx_T_1}; // @[Decoupled.scala:51:35]
wire [3:0] _ridx_incremented_T_1 = _ridx_incremented_T[3:0]; // @[AsyncQueue.scala:53:43]
assign _ridx_incremented_T_2 = _ridx_T_2 ? 4'h0 : _ridx_incremented_T_1; // @[AsyncQueue.scala:52:25, :53:{23,43}, :148:77]
assign ridx_incremented = _ridx_incremented_T_2; // @[AsyncQueue.scala:51:27, :53:23]
wire [2:0] _ridx_T_3 = ridx_incremented[3:1]; // @[AsyncQueue.scala:51:27, :54:32]
wire [3:0] ridx = {ridx_incremented[3], ridx_incremented[2:0] ^ _ridx_T_3}; // @[AsyncQueue.scala:51:27, :54:{17,32}]
wire [3:0] widx; // @[ShiftReg.scala:48:24]
wire _valid_T = ridx != widx; // @[ShiftReg.scala:48:24]
wire valid = source_ready & _valid_T; // @[AsyncQueue.scala:147:30, :150:{28,36}]
wire [2:0] _index_T = ridx[2:0]; // @[AsyncQueue.scala:54:17, :156:43]
wire _index_T_1 = ridx[3]; // @[AsyncQueue.scala:54:17, :156:62]
wire [2:0] _index_T_2 = {_index_T_1, 2'h0}; // @[AsyncQueue.scala:156:{62,75}]
wire [2:0] index = _index_T ^ _index_T_2; // @[AsyncQueue.scala:156:{43,55,75}]
wire [7:0][31:0] _GEN = {{io_async_mem_7_phit_0}, {io_async_mem_6_phit_0}, {io_async_mem_5_phit_0}, {io_async_mem_4_phit_0}, {io_async_mem_3_phit_0}, {io_async_mem_2_phit_0}, {io_async_mem_1_phit_0}, {io_async_mem_0_phit_0}}; // @[SynchronizerReg.scala:209:18]
wire [31:0] _io_deq_bits_T; // @[SynchronizerReg.scala:211:26]
assign io_deq_bits_phit_0 = _io_deq_bits_WIRE_phit; // @[SynchronizerReg.scala:211:26]
wire [31:0] _io_deq_bits_WIRE_1; // @[SynchronizerReg.scala:211:26]
assign _io_deq_bits_T = _io_deq_bits_WIRE_1; // @[SynchronizerReg.scala:211:26]
assign _io_deq_bits_WIRE_phit = _io_deq_bits_T; // @[SynchronizerReg.scala:211:26]
reg valid_reg; // @[AsyncQueue.scala:165:56]
assign _io_deq_valid_T = valid_reg & source_ready; // @[AsyncQueue.scala:147:30, :165:56, :166:29]
assign io_deq_valid_0 = _io_deq_valid_T; // @[AsyncQueue.scala:136:7, :166:29]
reg [3:0] ridx_gray; // @[AsyncQueue.scala:168:55]
assign io_async_ridx_0 = ridx_gray; // @[AsyncQueue.scala:136:7, :168:55]
wire _sink_valid_0_reset_T_1 = ~io_async_safe_source_reset_n_0; // @[AsyncQueue.scala:136:7, :177:45]
wire _sink_valid_0_reset_T_2 = _sink_valid_0_reset_T | _sink_valid_0_reset_T_1; // @[AsyncQueue.scala:177:{35,42,45}]
wire _sink_valid_0_reset_T_3 = _sink_valid_0_reset_T_2; // @[AsyncQueue.scala:177:{42,66}]
wire _sink_valid_1_reset_T_1 = ~io_async_safe_source_reset_n_0; // @[AsyncQueue.scala:136:7, :177:45, :178:45]
wire _sink_valid_1_reset_T_2 = _sink_valid_1_reset_T | _sink_valid_1_reset_T_1; // @[AsyncQueue.scala:178:{35,42,45}]
wire _sink_valid_1_reset_T_3 = _sink_valid_1_reset_T_2; // @[AsyncQueue.scala:178:{42,66}]
wire _source_extend_reset_T_1 = ~io_async_safe_source_reset_n_0; // @[AsyncQueue.scala:136:7, :177:45, :179:45]
wire _source_extend_reset_T_2 = _source_extend_reset_T | _source_extend_reset_T_1; // @[AsyncQueue.scala:179:{35,42,45}]
wire _source_extend_reset_T_3 = _source_extend_reset_T_2; // @[AsyncQueue.scala:179:{42,66}]
assign _io_async_safe_sink_reset_n_T_1 = ~_io_async_safe_sink_reset_n_T; // @[AsyncQueue.scala:193:{25,32}]
assign io_async_safe_sink_reset_n_0 = _io_async_safe_sink_reset_n_T_1; // @[AsyncQueue.scala:136:7, :193:25]
always @(posedge clock or posedge _ridx_T) begin // @[AsyncQueue.scala:136:7, :148:30]
if (_ridx_T) // @[AsyncQueue.scala:136:7, :148:30]
ridx_ridx_bin <= 4'h0; // @[AsyncQueue.scala:52:25]
else // @[AsyncQueue.scala:136:7]
ridx_ridx_bin <= ridx_incremented; // @[AsyncQueue.scala:51:27, :52:25]
always @(posedge, posedge)
always @(posedge clock or posedge _valid_reg_T) begin // @[AsyncQueue.scala:136:7, :165:35]
if (_valid_reg_T) // @[AsyncQueue.scala:136:7, :165:35]
valid_reg <= 1'h0; // @[AsyncQueue.scala:165:56]
else // @[AsyncQueue.scala:136:7]
valid_reg <= valid; // @[AsyncQueue.scala:150:28, :165:56]
always @(posedge, posedge)
always @(posedge clock or posedge _ridx_reg_T) begin // @[AsyncQueue.scala:136:7, :168:34]
if (_ridx_reg_T) // @[AsyncQueue.scala:136:7, :168:34]
ridx_gray <= 4'h0; // @[AsyncQueue.scala:52:25, :168:55]
else // @[AsyncQueue.scala:136:7]
ridx_gray <= ridx; // @[AsyncQueue.scala:54:17, :168:55]
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File UnsafeAXI4ToTL.scala:
package ara
import chisel3._
import chisel3.util._
import freechips.rocketchip.amba._
import freechips.rocketchip.amba.axi4._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle {
val data = UInt(dataWidth.W)
val resp = UInt(respWidth.W)
val last = Bool()
val user = BundleMap(userFields)
}
/** Parameters for [[BaseReservableListBuffer]] and all child classes.
*
* @param numEntries Total number of elements that can be stored in the 'data' RAM
* @param numLists Maximum number of linked lists
* @param numBeats Maximum number of beats per entry
*/
case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) {
// Avoid zero-width wires when we call 'log2Ceil'
val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries)
val listBits = if (numLists == 1) 1 else log2Ceil(numLists)
val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats)
}
case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName)
extends MixedAdapterNode(AXI4Imp, TLImp)(
dFn = { case mp =>
TLMasterPortParameters.v2(
masters = mp.masters.zipWithIndex.map { case (m, i) =>
// Support 'numTlTxns' read requests and 'numTlTxns' write requests at once.
val numSourceIds = numTlTxns * 2
TLMasterParameters.v2(
name = m.name,
sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds),
nodePath = m.nodePath
)
},
echoFields = mp.echoFields,
requestFields = AMBAProtField() +: mp.requestFields,
responseKeys = mp.responseKeys
)
},
uFn = { mp =>
AXI4SlavePortParameters(
slaves = mp.managers.map { m =>
val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits))
AXI4SlaveParameters(
address = m.address,
resources = m.resources,
regionType = m.regionType,
executable = m.executable,
nodePath = m.nodePath,
supportsWrite = m.supportsPutPartial.intersect(maxXfer),
supportsRead = m.supportsGet.intersect(maxXfer),
interleavedId = Some(0) // TL2 never interleaves D beats
)
},
beatBytes = mp.beatBytes,
minLatency = mp.minLatency,
responseFields = mp.responseFields,
requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt)
)
}
)
class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule {
require(numTlTxns >= 1)
require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2")
val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt)
lazy val module = new LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
edgeIn.master.masters.foreach { m =>
require(m.aligned, "AXI4ToTL requires aligned requests")
}
val numIds = edgeIn.master.endId
val beatBytes = edgeOut.slave.beatBytes
val maxTransfer = edgeOut.slave.maxTransfer
val maxBeats = maxTransfer / beatBytes
// Look for an Error device to redirect bad requests
val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError")
require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.")
val errorDev = errorDevs.maxBy(_.maxTransfer)
val errorDevAddr = errorDev.address.head.base
require(
errorDev.supportsPutPartial.contains(maxTransfer),
s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer"
)
require(
errorDev.supportsGet.contains(maxTransfer),
s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer"
)
// All of the read-response reordering logic.
val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields)
val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats)
val listBuffer = if (numTlTxns > 1) {
Module(new ReservableListBuffer(listBufData, listBufParams))
} else {
Module(new PassthroughListBuffer(listBufData, listBufParams))
}
// To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to
// 0 for read requests and 1 for write requests.
val isReadSourceBit = 0.U(1.W)
val isWriteSourceBit = 1.U(1.W)
/* Read request logic */
val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val rBytes1 = in.ar.bits.bytes1()
val rSize = OH1ToUInt(rBytes1)
val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize)
val rId = if (numTlTxns > 1) {
Cat(isReadSourceBit, listBuffer.ioReservedIndex)
} else {
isReadSourceBit
}
val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Indicates if there are still valid TileLink source IDs left to use.
val canIssueR = listBuffer.ioReserve.ready
listBuffer.ioReserve.bits := in.ar.bits.id
listBuffer.ioReserve.valid := in.ar.valid && rOut.ready
in.ar.ready := rOut.ready && canIssueR
rOut.valid := in.ar.valid && canIssueR
rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2
rOut.bits.user :<= in.ar.bits.user
rOut.bits.user.lift(AMBAProt).foreach { rProt =>
rProt.privileged := in.ar.bits.prot(0)
rProt.secure := !in.ar.bits.prot(1)
rProt.fetch := in.ar.bits.prot(2)
rProt.bufferable := in.ar.bits.cache(0)
rProt.modifiable := in.ar.bits.cache(1)
rProt.readalloc := in.ar.bits.cache(2)
rProt.writealloc := in.ar.bits.cache(3)
}
/* Write request logic */
// Strip off the MSB, which identifies the transaction as read vs write.
val strippedResponseSourceId = if (numTlTxns > 1) {
out.d.bits.source((out.d.bits.source).getWidth - 2, 0)
} else {
// When there's only 1 TileLink transaction allowed for read/write, then this field is always 0.
0.U(1.W)
}
// Track when a write request burst is in progress.
val writeBurstBusy = RegInit(false.B)
when(in.w.fire) {
writeBurstBusy := !in.w.bits.last
}
val usedWriteIds = RegInit(0.U(numTlTxns.W))
val canIssueW = !usedWriteIds.andR
val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W))
val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W))
usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet
// Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't
// change mid-burst.
val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W))
val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy
val freeWriteIdIndex = OHToUInt(freeWriteIdOH)
freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds
val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle)))
val wBytes1 = in.aw.bits.bytes1()
val wSize = OH1ToUInt(wBytes1)
val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize)
val wId = if (numTlTxns > 1) {
Cat(isWriteSourceBit, freeWriteIdIndex)
} else {
isWriteSourceBit
}
val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0))
// Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain
// asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but
// the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb
// bits during a W-channel burst.
in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW
in.w.ready := wOut.ready && in.aw.valid && canIssueW
wOut.valid := in.aw.valid && in.w.valid && canIssueW
wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2
in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ }
wOut.bits.user :<= in.aw.bits.user
wOut.bits.user.lift(AMBAProt).foreach { wProt =>
wProt.privileged := in.aw.bits.prot(0)
wProt.secure := !in.aw.bits.prot(1)
wProt.fetch := in.aw.bits.prot(2)
wProt.bufferable := in.aw.bits.cache(0)
wProt.modifiable := in.aw.bits.cache(1)
wProt.readalloc := in.aw.bits.cache(2)
wProt.writealloc := in.aw.bits.cache(3)
}
// Merge the AXI4 read/write requests into the TL-A channel.
TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut))
/* Read/write response logic */
val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle)))
val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle)))
val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY)
val dHasData = edgeOut.hasData(out.d.bits)
val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d)
val dNumBeats1 = edgeOut.numBeats1(out.d.bits)
// Handle cases where writeack arrives before write is done
val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U
out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck)
listBuffer.ioDataOut.ready := okR.ready
okR.valid := listBuffer.ioDataOut.valid
okB.valid := out.d.valid && !dHasData && !writeEarlyAck
listBuffer.ioResponse.valid := out.d.valid && dHasData
listBuffer.ioResponse.bits.index := strippedResponseSourceId
listBuffer.ioResponse.bits.data.data := out.d.bits.data
listBuffer.ioResponse.bits.data.resp := dResp
listBuffer.ioResponse.bits.data.last := dLast
listBuffer.ioResponse.bits.data.user :<= out.d.bits.user
listBuffer.ioResponse.bits.count := dCount
listBuffer.ioResponse.bits.numBeats1 := dNumBeats1
okR.bits.id := listBuffer.ioDataOut.bits.listIndex
okR.bits.data := listBuffer.ioDataOut.bits.payload.data
okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp
okR.bits.last := listBuffer.ioDataOut.bits.payload.last
okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user
// Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write
// response, mark the write transaction as complete.
val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W))
val writeResponseId = writeIdMap.read(strippedResponseSourceId)
when(wOut.fire) {
writeIdMap.write(freeWriteIdIndex, in.aw.bits.id)
}
when(edgeOut.done(wOut)) {
usedWriteIdsSet := freeWriteIdOH
}
when(okB.fire) {
usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns)
}
okB.bits.id := writeResponseId
okB.bits.resp := dResp
okB.bits.user :<= out.d.bits.user
// AXI4 needs irrevocable behaviour
in.r <> Queue.irrevocable(okR, 1, flow = true)
in.b <> Queue.irrevocable(okB, 1, flow = true)
// Unused channels
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
/* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */
def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = {
val lReqType = reqType.toLowerCase
when(a.valid) {
assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U)
// Narrow transfers and FIXED bursts must be single-beat bursts.
when(a.bits.len =/= 0.U) {
assert(
a.bits.size === log2Ceil(beatBytes).U,
s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)",
1.U << a.bits.size,
a.bits.len + 1.U
)
assert(
a.bits.burst =/= AXI4Parameters.BURST_FIXED,
s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)",
a.bits.len + 1.U
)
}
// Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in
// particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink
// Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts.
}
}
checkRequest(in.ar, "Read")
checkRequest(in.aw, "Write")
}
}
}
object UnsafeAXI4ToTL {
def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = {
val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt))
axi42tl.node
}
}
/* ReservableListBuffer logic, and associated classes. */
class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle {
val index = UInt(params.entryBits.W)
val count = UInt(params.beatBits.W)
val numBeats1 = UInt(params.beatBits.W)
}
class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle {
val listIndex = UInt(params.listBits.W)
}
/** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */
abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends Module {
require(params.numEntries > 0)
require(params.numLists > 0)
val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W))))
val ioReservedIndex = IO(Output(UInt(params.entryBits.W)))
val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params))))
val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params)))
}
/** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve
* linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the
* 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a
* given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order.
*
* ==Constructor==
* @param gen Chisel type of linked list data element
* @param params Other parameters
*
* ==Module IO==
* @param ioReserve Index of list to reserve a new element in
* @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire'
* @param ioResponse Payload containing response data and linked-list-entry index
* @param ioDataOut Payload containing data read from response linked list and linked list index
*/
class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
val valid = RegInit(0.U(params.numLists.W))
val head = Mem(params.numLists, UInt(params.entryBits.W))
val tail = Mem(params.numLists, UInt(params.entryBits.W))
val used = RegInit(0.U(params.numEntries.W))
val next = Mem(params.numEntries, UInt(params.entryBits.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) }
val dataIsPresent = RegInit(0.U(params.numEntries.W))
val beats = Mem(params.numEntries, UInt(params.beatBits.W))
// The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower.
val dataMemReadEnable = WireDefault(false.B)
val dataMemWriteEnable = WireDefault(false.B)
assert(!(dataMemReadEnable && dataMemWriteEnable))
// 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the
// lowest-index entry in the 'data' RAM which is free.
val freeOH = Wire(UInt(params.numEntries.W))
val freeIndex = OHToUInt(freeOH)
freeOH := ~(leftOR(~used) << 1) & ~used
ioReservedIndex := freeIndex
val validSet = WireDefault(0.U(params.numLists.W))
val validClr = WireDefault(0.U(params.numLists.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
val dataIsPresentSet = WireDefault(0.U(params.numEntries.W))
val dataIsPresentClr = WireDefault(0.U(params.numEntries.W))
valid := (valid & ~validClr) | validSet
used := (used & ~usedClr) | usedSet
dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet
/* Reservation logic signals */
val reserveTail = Wire(UInt(params.entryBits.W))
val reserveIsValid = Wire(Bool())
/* Response logic signals */
val responseIndex = Wire(UInt(params.entryBits.W))
val responseListIndex = Wire(UInt(params.listBits.W))
val responseHead = Wire(UInt(params.entryBits.W))
val responseTail = Wire(UInt(params.entryBits.W))
val nextResponseHead = Wire(UInt(params.entryBits.W))
val nextDataIsPresent = Wire(Bool())
val isResponseInOrder = Wire(Bool())
val isEndOfList = Wire(Bool())
val isLastBeat = Wire(Bool())
val isLastResponseBeat = Wire(Bool())
val isLastUnwindBeat = Wire(Bool())
/* Reservation logic */
reserveTail := tail.read(ioReserve.bits)
reserveIsValid := valid(ioReserve.bits)
ioReserve.ready := !used.andR
// When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we
// actually start a new list, rather than appending to a list that's about to disappear.
val reserveResponseSameList = ioReserve.bits === responseListIndex
val appendToAndDestroyList =
ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat
when(ioReserve.fire) {
validSet := UIntToOH(ioReserve.bits, params.numLists)
usedSet := freeOH
when(reserveIsValid && !appendToAndDestroyList) {
next.write(reserveTail, freeIndex)
}.otherwise {
head.write(ioReserve.bits, freeIndex)
}
tail.write(ioReserve.bits, freeIndex)
map.write(freeIndex, ioReserve.bits)
}
/* Response logic */
// The majority of the response logic (reading from and writing to the various RAMs) is common between the
// response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid).
// The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the
// 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and
// response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after
// two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker.
responseHead := head.read(responseListIndex)
responseTail := tail.read(responseListIndex)
nextResponseHead := next.read(responseIndex)
nextDataIsPresent := dataIsPresent(nextResponseHead)
// Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since
// there isn't a next element in the linked list.
isResponseInOrder := responseHead === responseIndex
isEndOfList := responseHead === responseTail
isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1
// When a response's last beat is sent to the output channel, mark it as completed. This can happen in two
// situations:
// 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM
// reservation was never needed.
// 2. An entry is read out of the 'data' SRAM (within the unwind FSM).
when(ioDataOut.fire && isLastBeat) {
// Mark the reservation as no-longer-used.
usedClr := UIntToOH(responseIndex, params.numEntries)
// If the response is in-order, then we're popping an element from this linked list.
when(isEndOfList) {
// Once we pop the last element from a linked list, mark it as no-longer-present.
validClr := UIntToOH(responseListIndex, params.numLists)
}.otherwise {
// Move the linked list's head pointer to the new head pointer.
head.write(responseListIndex, nextResponseHead)
}
}
// If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding.
when(ioResponse.fire && !isResponseInOrder) {
dataMemWriteEnable := true.B
when(isLastResponseBeat) {
dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries)
beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1)
}
}
// Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to.
val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats)
(responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) =>
when(select && dataMemWriteEnable) {
seqMem.write(ioResponse.bits.index, ioResponse.bits.data)
}
}
/* Response unwind logic */
// Unwind FSM state definitions
val sIdle :: sUnwinding :: Nil = Enum(2)
val unwindState = RegInit(sIdle)
val busyUnwinding = unwindState === sUnwinding
val startUnwind = Wire(Bool())
val stopUnwind = Wire(Bool())
when(startUnwind) {
unwindState := sUnwinding
}.elsewhen(stopUnwind) {
unwindState := sIdle
}
assert(!(startUnwind && stopUnwind))
// Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to
// become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is
// invalid.
//
// Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to
// worry about overwriting the 'data' SRAM's output when we start the unwind FSM.
startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent
// Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of
// two things happens:
// 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent)
// 2. There are no more outstanding responses in this list (isEndOfList)
//
// Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are
// passing from 'ioResponse' to 'ioDataOut'.
stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList)
val isUnwindBurstOver = Wire(Bool())
val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable)
// Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of
// beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we
// increment 'beatCounter' until it reaches 'unwindBeats1'.
val unwindBeats1 = Reg(UInt(params.beatBits.W))
val nextBeatCounter = Wire(UInt(params.beatBits.W))
val beatCounter = RegNext(nextBeatCounter)
isUnwindBurstOver := beatCounter === unwindBeats1
when(startNewBurst) {
unwindBeats1 := beats.read(nextResponseHead)
nextBeatCounter := 0.U
}.elsewhen(dataMemReadEnable) {
nextBeatCounter := beatCounter + 1.U
}.otherwise {
nextBeatCounter := beatCounter
}
// When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next
// entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which
// happens at the start of reading a new stored burst).
val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst)
responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index)
// Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the
// SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead
// holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'.
val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex)
// The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid
// until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle).
val unwindDataIsValid = RegInit(false.B)
when(dataMemReadEnable) {
unwindDataIsValid := true.B
}.elsewhen(ioDataOut.fire) {
unwindDataIsValid := false.B
}
isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid
// Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats.
isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat)
// Select which SRAM to read from based on the beat counter.
val dataOutputVec = Wire(Vec(params.numBeats, gen))
val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats)
(nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) =>
dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable)
}
// Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured
// by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading
// from.
val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable)
// Mark 'data' burst entries as no-longer-present as they get read out of the SRAM.
when(dataMemReadEnable) {
dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries)
}
// As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue
// a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know
// we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be
// consumed by the output channel).
val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready
dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem)
// While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need
// 'responseListIndex' to be coherent for the entire unwind process.
val rawResponseListIndex = map.read(responseIndex)
val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst)
responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex)
// Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are
// just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that
// could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be
// single-ported.
ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding
// Either pass an in-order response to the output channel, or data read from the unwind FSM.
ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder)
ioDataOut.bits.listIndex := responseListIndex
ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data)
// It's an error to get a response that isn't associated with a valid linked list.
when(ioResponse.fire || unwindDataIsValid) {
assert(
valid(responseListIndex),
"No linked list exists at index %d, mapped from %d",
responseListIndex,
responseIndex
)
}
when(busyUnwinding && dataMemReadEnable) {
assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order")
}
}
/** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1.
*
* Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to
* reorder any responses, or store any linked lists.
*/
class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters)
extends BaseReservableListBuffer(gen, params) {
require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1")
val used = RegInit(0.U(params.numEntries.W))
val map = Mem(params.numEntries, UInt(params.listBits.W))
val usedSet = WireDefault(0.U(params.numEntries.W))
val usedClr = WireDefault(0.U(params.numEntries.W))
used := (used & ~usedClr) | usedSet
ioReserve.ready := used === 0.U
// Store which list index was reserved, we need to return this value when we get a response.
when(ioReserve.fire) {
usedSet := 1.U
map.write(0.U, ioReserve.bits)
}
// There's only one valid linked list entry, which is at index 0.
ioReservedIndex := 0.U
val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1
// Mark the linked list as empty when we get the last beat in a response.
// Note that 'ioResponse.fire === ioDataOut.fire'.
when(ioResponse.fire && isLastResponseBeat) {
usedClr := 1.U
}
// Always pass the response data straight through, since we never need to reorder the response data.
ioDataOut.bits.listIndex := map.read(0.U)
ioDataOut.bits.payload := ioResponse.bits.data
ioDataOut.valid := ioResponse.valid
ioResponse.ready := ioDataOut.ready
}
| module dataMems_468( // @[UnsafeAXI4ToTL.scala:365:62]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [66:0] R0_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [66:0] W0_data
);
dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (W0_en),
.W0_clk (W0_clk),
.W0_data (W0_data)
); // @[UnsafeAXI4ToTL.scala:365:62]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w4_d3_i0_10( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input [3:0] io_d, // @[ShiftReg.scala:36:14]
output [3:0] io_q // @[ShiftReg.scala:36:14]
);
wire [3:0] io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_2 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_4 = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_6 = reset; // @[SynchronizerReg.scala:86:21]
wire [3:0] _io_q_T; // @[SynchronizerReg.scala:90:14]
wire [3:0] io_q_0; // @[SynchronizerReg.scala:80:7]
wire _output_T_1 = io_d_0[0]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire _output_T_3 = io_d_0[1]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_1; // @[ShiftReg.scala:48:24]
wire _output_T_5 = io_d_0[2]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_2; // @[ShiftReg.scala:48:24]
wire _output_T_7 = io_d_0[3]; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_3; // @[ShiftReg.scala:48:24]
wire [1:0] io_q_lo = {output_1, output_0}; // @[SynchronizerReg.scala:90:14]
wire [1:0] io_q_hi = {output_3, output_2}; // @[SynchronizerReg.scala:90:14]
assign _io_q_T = {io_q_hi, io_q_lo}; // @[SynchronizerReg.scala:90:14]
assign io_q_0 = _io_q_T; // @[SynchronizerReg.scala:80:7, :90:14]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_80 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_81 output_chain_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_2), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_3), // @[SynchronizerReg.scala:87:41]
.io_q (output_1)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_82 output_chain_2 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_4), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_5), // @[SynchronizerReg.scala:87:41]
.io_q (output_2)
); // @[ShiftReg.scala:45:23]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_83 output_chain_3 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T_6), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_7), // @[SynchronizerReg.scala:87:41]
.io_q (output_3)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File decode.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
package boom.v3.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket.Instructions32
import freechips.rocketchip.rocket.CustomInstructions._
import freechips.rocketchip.rocket.RVCExpander
import freechips.rocketchip.rocket.{CSR,Causes}
import freechips.rocketchip.util.{uintToBitPat,UIntIsOneOf}
import FUConstants._
import boom.v3.common._
import boom.v3.util._
// scalastyle:off
/**
* Abstract trait giving defaults and other relevant values to different Decode constants/
*/
abstract trait DecodeConstants
extends freechips.rocketchip.rocket.constants.ScalarOpConstants
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
val xpr64 = Y // TODO inform this from xLen
val DC2 = BitPat.dontCare(2) // Makes the listing below more readable
def decode_default: List[BitPat] =
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// | | | | | | | | | | | | | | | | | | | | | | | |
List(N, N, X, uopX , IQT_INT, FU_X , RT_X , DC2 ,DC2 ,X, IS_X, X, X, X, X, N, M_X, DC2, X, X, N, N, X, CSR.X)
val table: Array[(BitPat, List[BitPat])]
}
// scalastyle:on
/**
* Decoded control signals
*/
class CtrlSigs extends Bundle
{
val legal = Bool()
val fp_val = Bool()
val fp_single = Bool()
val uopc = UInt(UOPC_SZ.W)
val iq_type = UInt(IQT_SZ.W)
val fu_code = UInt(FUC_SZ.W)
val dst_type = UInt(2.W)
val rs1_type = UInt(2.W)
val rs2_type = UInt(2.W)
val frs3_en = Bool()
val imm_sel = UInt(IS_X.getWidth.W)
val uses_ldq = Bool()
val uses_stq = Bool()
val is_amo = Bool()
val is_fence = Bool()
val is_fencei = Bool()
val mem_cmd = UInt(freechips.rocketchip.rocket.M_SZ.W)
val wakeup_delay = UInt(2.W)
val bypassable = Bool()
val is_br = Bool()
val is_sys_pc2epc = Bool()
val inst_unique = Bool()
val flush_on_commit = Bool()
val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W)
val rocc = Bool()
def decode(inst: UInt, table: Iterable[(BitPat, List[BitPat])]) = {
val decoder = freechips.rocketchip.rocket.DecodeLogic(inst, XDecode.decode_default, table)
val sigs =
Seq(legal, fp_val, fp_single, uopc, iq_type, fu_code, dst_type, rs1_type,
rs2_type, frs3_en, imm_sel, uses_ldq, uses_stq, is_amo,
is_fence, is_fencei, mem_cmd, wakeup_delay, bypassable,
is_br, is_sys_pc2epc, inst_unique, flush_on_commit, csr_cmd)
sigs zip decoder map {case(s,d) => s := d}
rocc := false.B
this
}
}
// scalastyle:off
/**
* Decode constants for RV32
*/
object X32Decode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
Instructions32.SLLI ->
List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
Instructions32.SRLI ->
List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
Instructions32.SRAI ->
List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N)
)
}
/**
* Decode constants for RV64
*/
object X64Decode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
LD -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LWU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
SD -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SLLI -> List(Y, N, X, uopSLLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLI -> List(Y, N, X, uopSRLI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAI -> List(Y, N, X, uopSRAI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDIW -> List(Y, N, X, uopADDIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLLIW -> List(Y, N, X, uopSLLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAIW -> List(Y, N, X, uopSRAIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLIW -> List(Y, N, X, uopSRLIW, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDW -> List(Y, N, X, uopADDW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SUBW -> List(Y, N, X, uopSUBW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLLW -> List(Y, N, X, uopSLLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRAW -> List(Y, N, X, uopSRAW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRLW -> List(Y, N, X, uopSRLW , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N)
)
}
/**
* Overall Decode constants
*/
object XDecode extends DecodeConstants
{
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | |
LW -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LH -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LHU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LB -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
LBU -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM , RT_FIX, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 3.U, N, N, N, N, N, CSR.N),
SW -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SH -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
SB -> List(Y, N, X, uopSTA , IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
LUI -> List(Y, N, X, uopLUI , IQT_INT, FU_ALU , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADDI -> List(Y, N, X, uopADDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ANDI -> List(Y, N, X, uopANDI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ORI -> List(Y, N, X, uopORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
XORI -> List(Y, N, X, uopXORI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTI -> List(Y, N, X, uopSLTI , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTIU -> List(Y, N, X, uopSLTIU, IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLL -> List(Y, N, X, uopSLL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
ADD -> List(Y, N, X, uopADD , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SUB -> List(Y, N, X, uopSUB , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLT -> List(Y, N, X, uopSLT , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SLTU -> List(Y, N, X, uopSLTU , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
AND -> List(Y, N, X, uopAND , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
OR -> List(Y, N, X, uopOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
XOR -> List(Y, N, X, uopXOR , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRA -> List(Y, N, X, uopSRA , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_I, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
SRL -> List(Y, N, X, uopSRL , IQT_INT, FU_ALU , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 1.U, Y, N, N, N, N, CSR.N),
MUL -> List(Y, N, X, uopMUL , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULH -> List(Y, N, X, uopMULH , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULHU -> List(Y, N, X, uopMULHU, IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULHSU -> List(Y, N, X, uopMULHSU,IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
MULW -> List(Y, N, X, uopMULW , IQT_INT, FU_MUL , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIV -> List(Y, N, X, uopDIV , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVU -> List(Y, N, X, uopDIVU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REM -> List(Y, N, X, uopREM , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMU -> List(Y, N, X, uopREMU , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVW -> List(Y, N, X, uopDIVW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
DIVUW -> List(Y, N, X, uopDIVUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMW -> List(Y, N, X, uopREMW , IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
REMUW -> List(Y, N, X, uopREMUW, IQT_INT, FU_DIV , RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
AUIPC -> List(Y, N, X, uopAUIPC, IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_U, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N), // use BRU for the PC read
JAL -> List(Y, N, X, uopJAL , IQT_INT, FU_JMP , RT_FIX, RT_X , RT_X , N, IS_J, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N),
JALR -> List(Y, N, X, uopJALR , IQT_INT, FU_JMP , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 1.U, N, N, N, N, N, CSR.N),
BEQ -> List(Y, N, X, uopBEQ , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BNE -> List(Y, N, X, uopBNE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BGE -> List(Y, N, X, uopBGE , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BGEU -> List(Y, N, X, uopBGEU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BLT -> List(Y, N, X, uopBLT , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
BLTU -> List(Y, N, X, uopBLTU , IQT_INT, FU_ALU , RT_X , RT_FIX, RT_FIX, N, IS_B, N, N, N, N, N, M_X , 0.U, N, Y, N, N, N, CSR.N),
// I-type, the immediate12 holds the CSR register.
CSRRW -> List(Y, N, X, uopCSRRW, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W),
CSRRS -> List(Y, N, X, uopCSRRS, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S),
CSRRC -> List(Y, N, X, uopCSRRC, IQT_INT, FU_CSR , RT_FIX, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C),
CSRRWI -> List(Y, N, X, uopCSRRWI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.W),
CSRRSI -> List(Y, N, X, uopCSRRSI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.S),
CSRRCI -> List(Y, N, X, uopCSRRCI,IQT_INT, FU_CSR , RT_FIX, RT_PAS, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.C),
SFENCE_VMA->List(Y,N, X, uopSFENCE,IQT_MEM, FU_MEM , RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N,M_SFENCE,0.U,N, N, N, Y, Y, CSR.N),
ECALL -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I),
EBREAK -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, Y, Y, Y, CSR.I),
SRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
MRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
DRET -> List(Y, N, X, uopERET ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
WFI -> List(Y, N, X, uopWFI ,IQT_INT, FU_CSR , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, Y, Y, CSR.I),
FENCE_I -> List(Y, N, X, uopNOP , IQT_INT, FU_X , RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, Y, M_X , 0.U, N, N, N, Y, Y, CSR.N),
FENCE -> List(Y, N, X, uopFENCE, IQT_INT, FU_MEM , RT_X , RT_X , RT_X , N, IS_X, N, Y, N, Y, N, M_X , 0.U, N, N, N, Y, Y, CSR.N), // TODO PERF make fence higher performance
// currently serializes pipeline
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec? rs1 regtype | | | uses_stq | | |
// | | | micro-code | rs2 type| | | | is_amo | | |
// | | | | iq-type func unit | | | | | | | is_fence | | |
// | | | | | | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// A-type | | | | | | | | | | | | | | | | | | | | | | | |
AMOADD_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N), // TODO make AMOs higherperformance
AMOXOR_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N),
AMOSWAP_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N),
AMOAND_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N),
AMOOR_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N),
AMOMIN_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N),
AMOMINU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N),
AMOMAX_W-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N),
AMOMAXU_W->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N),
AMOADD_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_ADD, 0.U,N, N, N, Y, Y, CSR.N),
AMOXOR_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_XOR, 0.U,N, N, N, Y, Y, CSR.N),
AMOSWAP_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_SWAP,0.U,N, N, N, Y, Y, CSR.N),
AMOAND_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_AND, 0.U,N, N, N, Y, Y, CSR.N),
AMOOR_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_OR, 0.U,N, N, N, Y, Y, CSR.N),
AMOMIN_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MIN, 0.U,N, N, N, Y, Y, CSR.N),
AMOMINU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MINU,0.U,N, N, N, Y, Y, CSR.N),
AMOMAX_D-> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAX, 0.U,N, N, N, Y, Y, CSR.N),
AMOMAXU_D->List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XA_MAXU,0.U,N, N, N, Y, Y, CSR.N),
LR_W -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N),
LR_D -> List(Y, N, X, uopLD , IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_X , N, IS_X, Y, N, N, N, N, M_XLR , 0.U,N, N, N, Y, Y, CSR.N),
SC_W -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N),
SC_D -> List(Y, N, X, uopAMO_AG, IQT_MEM, FU_MEM, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, Y, Y, N, N, M_XSC , 0.U,N, N, N, Y, Y, CSR.N)
)
}
/**
* FP Decode constants
*/
object FDecode extends DecodeConstants
{
val table: Array[(BitPat, List[BitPat])] = Array(
// frs3_en wakeup_delay
// | imm sel | bypassable (aka, known/fixed latency)
// | | uses_ldq | | is_br
// is val inst? rs1 regtype | | | uses_stq | | |
// | is fp inst? | rs2 type| | | | is_amo | | |
// | | is dst single-prec? | | | | | | | is_fence | | |
// | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall
// | | | | iq_type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
FLW -> List(Y, Y, Y, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N),
FLD -> List(Y, Y, N, uopLD , IQT_MEM, FU_MEM, RT_FLT, RT_FIX, RT_X , N, IS_I, Y, N, N, N, N, M_XRD, 0.U, N, N, N, N, N, CSR.N),
FSW -> List(Y, Y, Y, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N), // sort of a lie; broken into two micro-ops
FSD -> List(Y, Y, N, uopSTA , IQT_MFP,FU_F2IMEM,RT_X , RT_FIX, RT_FLT, N, IS_S, N, Y, N, N, N, M_XWR, 0.U, N, N, N, N, N, CSR.N),
FCLASS_S-> List(Y, Y, Y, uopFCLASS_S,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCLASS_D-> List(Y, Y, N, uopFCLASS_D,IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_W_X -> List(Y, Y, Y, uopFMV_W_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_D_X -> List(Y, Y, N, uopFMV_D_X, IQT_INT, FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_X_W -> List(Y, Y, Y, uopFMV_X_W, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMV_X_D -> List(Y, Y, N, uopFMV_X_D, IQT_FP , FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJ_S -> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJ_D -> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJX_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJX_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJN_S-> List(Y, Y, Y, uopFSGNJ_S, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSGNJN_D-> List(Y, Y, N, uopFSGNJ_D, IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// FP to FP
FCVT_S_D-> List(Y, Y, Y, uopFCVT_S_D,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_S-> List(Y, Y, N, uopFCVT_D_S,IQT_FP , FU_FPU, RT_FLT, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// Int to FP
FCVT_S_W-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_WU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_L-> List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_S_LU->List(Y, Y, Y, uopFCVT_S_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_W-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_WU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_L-> List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_D_LU->List(Y, Y, N, uopFCVT_D_X, IQT_INT,FU_I2F, RT_FLT, RT_FIX, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// FP to Int
FCVT_W_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_WU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_L_S-> List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_LU_S->List(Y, Y, Y, uopFCVT_X_S, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_W_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_WU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_L_D-> List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FCVT_LU_D->List(Y, Y, N, uopFCVT_X_D, IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_X , N, IS_I, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
// "fp_single" is used for wb_data formatting (and debugging)
FEQ_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLT_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLE_S ->List(Y, Y, Y, uopCMPR_S , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FEQ_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLT_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FLE_D ->List(Y, Y, N, uopCMPR_D , IQT_FP, FU_F2I, RT_FIX, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMIN_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMAX_S ->List(Y, Y, Y,uopFMINMAX_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMIN_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMAX_D ->List(Y, Y, N,uopFMINMAX_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FADD_S ->List(Y, Y, Y, uopFADD_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSUB_S ->List(Y, Y, Y, uopFSUB_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMUL_S ->List(Y, Y, Y, uopFMUL_S , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FADD_D ->List(Y, Y, N, uopFADD_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSUB_D ->List(Y, Y, N, uopFSUB_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMUL_D ->List(Y, Y, N, uopFMUL_D , IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMADD_S ->List(Y, Y, Y, uopFMADD_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMSUB_S ->List(Y, Y, Y, uopFMSUB_S, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMADD_S ->List(Y, Y, Y, uopFNMADD_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMSUB_S ->List(Y, Y, Y, uopFNMSUB_S,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMADD_D ->List(Y, Y, N, uopFMADD_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FMSUB_D ->List(Y, Y, N, uopFMSUB_D, IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMADD_D ->List(Y, Y, N, uopFNMADD_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FNMSUB_D ->List(Y, Y, N, uopFNMSUB_D,IQT_FP, FU_FPU, RT_FLT, RT_FLT, RT_FLT, Y, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
/**
* FP Divide SquareRoot Constants
*/
object FDivSqrtDecode extends DecodeConstants
{
val table: Array[(BitPat, List[BitPat])] = Array(
// frs3_en wakeup_delay
// | imm sel | bypassable (aka, known/fixed latency)
// | | uses_ldq | | is_br
// is val inst? rs1 regtype | | | uses_stq | | |
// | is fp inst? | rs2 type| | | | is_amo | | |
// | | is dst single-prec? | | | | | | | is_fence | | |
// | | | micro-opcode | | | | | | | | is_fencei | | | is breakpoint or ecall
// | | | | iq-type func dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | unit regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
FDIV_S ->List(Y, Y, Y, uopFDIV_S , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FDIV_D ->List(Y, Y, N, uopFDIV_D , IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_FLT, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSQRT_S ->List(Y, Y, Y, uopFSQRT_S, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
FSQRT_D ->List(Y, Y, N, uopFSQRT_D, IQT_FP, FU_FDV, RT_FLT, RT_FLT, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
//scalastyle:on
/**
* RoCC initial decode
*/
object RoCCDecode extends DecodeConstants
{
// Note: We use FU_CSR since CSR instructions cannot co-execute with RoCC instructions
// frs3_en wakeup_delay
// is val inst? | imm sel | bypassable (aka, known/fixed latency)
// | is fp inst? | | uses_ldq | | is_br
// | | is single-prec rs1 regtype | | | uses_stq | | |
// | | | | rs2 type| | | | is_amo | | |
// | | | micro-code func unit | | | | | | | is_fence | | |
// | | | | iq-type | | | | | | | | | is_fencei | | | is breakpoint or ecall?
// | | | | | | dst | | | | | | | | | mem | | | | is unique? (clear pipeline for it)
// | | | | | | regtype | | | | | | | | | cmd | | | | | flush on commit
// | | | | | | | | | | | | | | | | | | | | | | | csr cmd
// | | | | | | | | | | | | | | | | | | | | | | | |
val table: Array[(BitPat, List[BitPat])] = Array(// | | | | | | | | | | | | | | | | | | |
CUSTOM0 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM0_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM1_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM2_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_X , RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_X , RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD_RS1 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_X , N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N),
CUSTOM3_RD_RS1_RS2 ->List(Y, N, X, uopROCC , IQT_INT, FU_CSR, RT_FIX, RT_FIX, RT_FIX, N, IS_X, N, N, N, N, N, M_X , 0.U, N, N, N, N, N, CSR.N)
)
}
/**
* IO bundle for the Decode unit
*/
class DecodeUnitIo(implicit p: Parameters) extends BoomBundle
{
val enq = new Bundle { val uop = Input(new MicroOp()) }
val deq = new Bundle { val uop = Output(new MicroOp()) }
// from CSRFile
val status = Input(new freechips.rocketchip.rocket.MStatus())
val csr_decode = Flipped(new freechips.rocketchip.rocket.CSRDecodeIO)
val interrupt = Input(Bool())
val interrupt_cause = Input(UInt(xLen.W))
}
/**
* Decode unit that takes in a single instruction and generates a MicroOp.
*/
class DecodeUnit(implicit p: Parameters) extends BoomModule
with freechips.rocketchip.rocket.constants.MemoryOpConstants
{
val io = IO(new DecodeUnitIo)
val uop = Wire(new MicroOp())
uop := io.enq.uop
var decode_table = XDecode.table
if (usingFPU) decode_table ++= FDecode.table
if (usingFPU && usingFDivSqrt) decode_table ++= FDivSqrtDecode.table
if (usingRoCC) decode_table ++= RoCCDecode.table
decode_table ++= (if (xLen == 64) X64Decode.table else X32Decode.table)
val inst = uop.inst
val cs = Wire(new CtrlSigs()).decode(inst, decode_table)
// Exception Handling
io.csr_decode.inst := inst
val csr_en = cs.csr_cmd.isOneOf(CSR.S, CSR.C, CSR.W)
val csr_ren = cs.csr_cmd.isOneOf(CSR.S, CSR.C) && uop.lrs1 === 0.U
val system_insn = cs.csr_cmd === CSR.I
val sfence = cs.uopc === uopSFENCE
val cs_legal = cs.legal
// dontTouch(cs_legal)
val id_illegal_insn = !cs_legal ||
cs.fp_val && io.csr_decode.fp_illegal || // TODO check for illegal rm mode: (io.fpu.illegal_rm)
cs.rocc && io.csr_decode.rocc_illegal ||
cs.is_amo && !io.status.isa('a'-'a') ||
(cs.fp_val && !cs.fp_single) && !io.status.isa('d'-'a') ||
csr_en && (io.csr_decode.read_illegal || !csr_ren && io.csr_decode.write_illegal) ||
((sfence || system_insn) && io.csr_decode.system_illegal)
// cs.div && !csr.io.status.isa('m'-'a') || TODO check for illegal div instructions
def checkExceptions(x: Seq[(Bool, UInt)]) =
(x.map(_._1).reduce(_||_), PriorityMux(x))
val (xcpt_valid, xcpt_cause) = checkExceptions(List(
(io.interrupt && !io.enq.uop.is_sfb, io.interrupt_cause), // Disallow interrupts while we are handling a SFB
(uop.bp_debug_if, (CSR.debugTriggerCause).U),
(uop.bp_xcpt_if, (Causes.breakpoint).U),
(uop.xcpt_pf_if, (Causes.fetch_page_fault).U),
(uop.xcpt_ae_if, (Causes.fetch_access).U),
(id_illegal_insn, (Causes.illegal_instruction).U)))
uop.exception := xcpt_valid
uop.exc_cause := xcpt_cause
//-------------------------------------------------------------
uop.uopc := cs.uopc
uop.iq_type := cs.iq_type
uop.fu_code := cs.fu_code
// x-registers placed in 0-31, f-registers placed in 32-63.
// This allows us to straight-up compare register specifiers and not need to
// verify the rtypes (e.g., bypassing in rename).
uop.ldst := inst(RD_MSB,RD_LSB)
uop.lrs1 := inst(RS1_MSB,RS1_LSB)
uop.lrs2 := inst(RS2_MSB,RS2_LSB)
uop.lrs3 := inst(RS3_MSB,RS3_LSB)
uop.ldst_val := cs.dst_type =/= RT_X && !(uop.ldst === 0.U && uop.dst_rtype === RT_FIX)
uop.dst_rtype := cs.dst_type
uop.lrs1_rtype := cs.rs1_type
uop.lrs2_rtype := cs.rs2_type
uop.frs3_en := cs.frs3_en
uop.ldst_is_rs1 := uop.is_sfb_shadow
// SFB optimization
when (uop.is_sfb_shadow && cs.rs2_type === RT_X) {
uop.lrs2_rtype := RT_FIX
uop.lrs2 := inst(RD_MSB,RD_LSB)
uop.ldst_is_rs1 := false.B
} .elsewhen (uop.is_sfb_shadow && cs.uopc === uopADD && inst(RS1_MSB,RS1_LSB) === 0.U) {
uop.uopc := uopMOV
uop.lrs1 := inst(RD_MSB, RD_LSB)
uop.ldst_is_rs1 := true.B
}
when (uop.is_sfb_br) {
uop.fu_code := FU_JMP
}
uop.fp_val := cs.fp_val
uop.fp_single := cs.fp_single // TODO use this signal instead of the FPU decode's table signal?
uop.mem_cmd := cs.mem_cmd
uop.mem_size := Mux(cs.mem_cmd.isOneOf(M_SFENCE, M_FLUSH_ALL), Cat(uop.lrs2 =/= 0.U, uop.lrs1 =/= 0.U), inst(13,12))
uop.mem_signed := !inst(14)
uop.uses_ldq := cs.uses_ldq
uop.uses_stq := cs.uses_stq
uop.is_amo := cs.is_amo
uop.is_fence := cs.is_fence
uop.is_fencei := cs.is_fencei
uop.is_sys_pc2epc := cs.is_sys_pc2epc
uop.is_unique := cs.inst_unique
uop.flush_on_commit := cs.flush_on_commit || (csr_en && !csr_ren && io.csr_decode.write_flush)
uop.bypassable := cs.bypassable
//-------------------------------------------------------------
// immediates
// repackage the immediate, and then pass the fewest number of bits around
val di24_20 = Mux(cs.imm_sel === IS_B || cs.imm_sel === IS_S, inst(11,7), inst(24,20))
uop.imm_packed := Cat(inst(31,25), di24_20, inst(19,12))
//-------------------------------------------------------------
uop.is_br := cs.is_br
uop.is_jal := (uop.uopc === uopJAL)
uop.is_jalr := (uop.uopc === uopJALR)
// uop.is_jump := cs.is_jal || (uop.uopc === uopJALR)
// uop.is_ret := (uop.uopc === uopJALR) &&
// (uop.ldst === X0) &&
// (uop.lrs1 === RA)
// uop.is_call := (uop.uopc === uopJALR || uop.uopc === uopJAL) &&
// (uop.ldst === RA)
//-------------------------------------------------------------
io.deq.uop := uop
}
/**
* Smaller Decode unit for the Frontend to decode different
* branches.
* Accepts EXPANDED RVC instructions
*/
class BranchDecodeSignals(implicit p: Parameters) extends BoomBundle
{
val is_ret = Bool()
val is_call = Bool()
val target = UInt(vaddrBitsExtended.W)
val cfi_type = UInt(CFI_SZ.W)
// Is this branch a short forwards jump?
val sfb_offset = Valid(UInt(log2Ceil(icBlockBytes).W))
// Is this instruction allowed to be inside a sfb?
val shadowable = Bool()
}
class BranchDecode(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
val inst = Input(UInt(32.W))
val pc = Input(UInt(vaddrBitsExtended.W))
val out = Output(new BranchDecodeSignals)
})
val bpd_csignals =
freechips.rocketchip.rocket.DecodeLogic(io.inst,
List[BitPat](N, N, N, N, X),
//// is br?
//// | is jal?
//// | | is jalr?
//// | | |
//// | | | shadowable
//// | | | | has_rs2
//// | | | | |
Array[(BitPat, List[BitPat])](
JAL -> List(N, Y, N, N, X),
JALR -> List(N, N, Y, N, X),
BEQ -> List(Y, N, N, N, X),
BNE -> List(Y, N, N, N, X),
BGE -> List(Y, N, N, N, X),
BGEU -> List(Y, N, N, N, X),
BLT -> List(Y, N, N, N, X),
BLTU -> List(Y, N, N, N, X),
SLLI -> List(N, N, N, Y, N),
SRLI -> List(N, N, N, Y, N),
SRAI -> List(N, N, N, Y, N),
ADDIW -> List(N, N, N, Y, N),
SLLIW -> List(N, N, N, Y, N),
SRAIW -> List(N, N, N, Y, N),
SRLIW -> List(N, N, N, Y, N),
ADDW -> List(N, N, N, Y, Y),
SUBW -> List(N, N, N, Y, Y),
SLLW -> List(N, N, N, Y, Y),
SRAW -> List(N, N, N, Y, Y),
SRLW -> List(N, N, N, Y, Y),
LUI -> List(N, N, N, Y, N),
ADDI -> List(N, N, N, Y, N),
ANDI -> List(N, N, N, Y, N),
ORI -> List(N, N, N, Y, N),
XORI -> List(N, N, N, Y, N),
SLTI -> List(N, N, N, Y, N),
SLTIU -> List(N, N, N, Y, N),
SLL -> List(N, N, N, Y, Y),
ADD -> List(N, N, N, Y, Y),
SUB -> List(N, N, N, Y, Y),
SLT -> List(N, N, N, Y, Y),
SLTU -> List(N, N, N, Y, Y),
AND -> List(N, N, N, Y, Y),
OR -> List(N, N, N, Y, Y),
XOR -> List(N, N, N, Y, Y),
SRA -> List(N, N, N, Y, Y),
SRL -> List(N, N, N, Y, Y)
))
val cs_is_br = bpd_csignals(0)(0)
val cs_is_jal = bpd_csignals(1)(0)
val cs_is_jalr = bpd_csignals(2)(0)
val cs_is_shadowable = bpd_csignals(3)(0)
val cs_has_rs2 = bpd_csignals(4)(0)
io.out.is_call := (cs_is_jal || cs_is_jalr) && GetRd(io.inst) === RA
io.out.is_ret := cs_is_jalr && GetRs1(io.inst) === BitPat("b00?01") && GetRd(io.inst) === X0
io.out.target := Mux(cs_is_br, ComputeBranchTarget(io.pc, io.inst, xLen),
ComputeJALTarget(io.pc, io.inst, xLen))
io.out.cfi_type :=
Mux(cs_is_jalr,
CFI_JALR,
Mux(cs_is_jal,
CFI_JAL,
Mux(cs_is_br,
CFI_BR,
CFI_X)))
val br_offset = Cat(io.inst(7), io.inst(30,25), io.inst(11,8), 0.U(1.W))
// Is a sfb if it points forwards (offset is positive)
io.out.sfb_offset.valid := cs_is_br && !io.inst(31) && br_offset =/= 0.U && (br_offset >> log2Ceil(icBlockBytes)) === 0.U
io.out.sfb_offset.bits := br_offset
io.out.shadowable := cs_is_shadowable && (
!cs_has_rs2 ||
(GetRs1(io.inst) === GetRd(io.inst)) ||
(io.inst === ADD && GetRs1(io.inst) === X0)
)
}
/**
* Track the current "branch mask", and give out the branch mask to each micro-op in Decode
* (each micro-op in the machine has a branch mask which says which branches it
* is being speculated under).
*
* @param pl_width pipeline width for the processor
*/
class BranchMaskGenerationLogic(val pl_width: Int)(implicit p: Parameters) extends BoomModule
{
val io = IO(new Bundle {
// guess if the uop is a branch (we'll catch this later)
val is_branch = Input(Vec(pl_width, Bool()))
// lock in that it's actually a branch and will fire, so we update
// the branch_masks.
val will_fire = Input(Vec(pl_width, Bool()))
// give out tag immediately (needed in rename)
// mask can come later in the cycle
val br_tag = Output(Vec(pl_width, UInt(brTagSz.W)))
val br_mask = Output(Vec(pl_width, UInt(maxBrCount.W)))
// tell decoders the branch mask has filled up, but on the granularity
// of an individual micro-op (so some micro-ops can go through)
val is_full = Output(Vec(pl_width, Bool()))
val brupdate = Input(new BrUpdateInfo())
val flush_pipeline = Input(Bool())
val debug_branch_mask = Output(UInt(maxBrCount.W))
})
val branch_mask = RegInit(0.U(maxBrCount.W))
//-------------------------------------------------------------
// Give out the branch tag to each branch micro-op
var allocate_mask = branch_mask
val tag_masks = Wire(Vec(pl_width, UInt(maxBrCount.W)))
for (w <- 0 until pl_width) {
// TODO this is a loss of performance as we're blocking branches based on potentially fake branches
io.is_full(w) := (allocate_mask === ~(0.U(maxBrCount.W))) && io.is_branch(w)
// find br_tag and compute next br_mask
val new_br_tag = Wire(UInt(brTagSz.W))
new_br_tag := 0.U
tag_masks(w) := 0.U
for (i <- maxBrCount-1 to 0 by -1) {
when (~allocate_mask(i)) {
new_br_tag := i.U
tag_masks(w) := (1.U << i.U)
}
}
io.br_tag(w) := new_br_tag
allocate_mask = Mux(io.is_branch(w), tag_masks(w) | allocate_mask, allocate_mask)
}
//-------------------------------------------------------------
// Give out the branch mask to each micro-op
// (kill off the bits that corresponded to branches that aren't going to fire)
var curr_mask = branch_mask
for (w <- 0 until pl_width) {
io.br_mask(w) := GetNewBrMask(io.brupdate, curr_mask)
curr_mask = Mux(io.will_fire(w), tag_masks(w) | curr_mask, curr_mask)
}
//-------------------------------------------------------------
// Update the current branch_mask
when (io.flush_pipeline) {
branch_mask := 0.U
} .otherwise {
val mask = Mux(io.brupdate.b2.mispredict,
io.brupdate.b2.uop.br_mask,
~(0.U(maxBrCount.W)))
branch_mask := GetNewBrMask(io.brupdate, curr_mask) & mask
}
io.debug_branch_mask := branch_mask
}
File consts.scala:
//******************************************************************************
// Copyright (c) 2011 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Constants
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v3.common.constants
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util.Str
import freechips.rocketchip.rocket.RVCExpander
/**
* Mixin for issue queue types
*/
trait IQType
{
val IQT_SZ = 3
val IQT_INT = 1.U(IQT_SZ.W)
val IQT_MEM = 2.U(IQT_SZ.W)
val IQT_FP = 4.U(IQT_SZ.W)
val IQT_MFP = 6.U(IQT_SZ.W)
}
/**
* Mixin for scalar operation constants
*/
trait ScalarOpConstants
{
val X = BitPat("b?")
val Y = BitPat("b1")
val N = BitPat("b0")
//************************************
// Extra Constants
// Which branch predictor predicted us
val BSRC_SZ = 2
val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred
val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred
val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred
val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution
//************************************
// Control Signals
// CFI types
val CFI_SZ = 3
val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction
val CFI_BR = 1.U(CFI_SZ.W) // Branch
val CFI_JAL = 2.U(CFI_SZ.W) // JAL
val CFI_JALR = 3.U(CFI_SZ.W) // JALR
// PC Select Signal
val PC_PLUS4 = 0.U(2.W) // PC + 4
val PC_BRJMP = 1.U(2.W) // brjmp_target
val PC_JALR = 2.U(2.W) // jump_reg_target
// Branch Type
val BR_N = 0.U(4.W) // Next
val BR_NE = 1.U(4.W) // Branch on NotEqual
val BR_EQ = 2.U(4.W) // Branch on Equal
val BR_GE = 3.U(4.W) // Branch on Greater/Equal
val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned
val BR_LT = 5.U(4.W) // Branch on Less Than
val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned
val BR_J = 7.U(4.W) // Jump
val BR_JR = 8.U(4.W) // Jump Register
// RS1 Operand Select Signal
val OP1_RS1 = 0.U(2.W) // Register Source #1
val OP1_ZERO= 1.U(2.W)
val OP1_PC = 2.U(2.W)
val OP1_X = BitPat("b??")
// RS2 Operand Select Signal
val OP2_RS2 = 0.U(3.W) // Register Source #2
val OP2_IMM = 1.U(3.W) // immediate
val OP2_ZERO= 2.U(3.W) // constant 0
val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4)
val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1
val OP2_X = BitPat("b???")
// Register File Write Enable Signal
val REN_0 = false.B
val REN_1 = true.B
// Is 32b Word or 64b Doubldword?
val SZ_DW = 1
val DW_X = true.B // Bool(xLen==64)
val DW_32 = false.B
val DW_64 = true.B
val DW_XPR = true.B // Bool(xLen==64)
// Memory Enable Signal
val MEN_0 = false.B
val MEN_1 = true.B
val MEN_X = false.B
// Immediate Extend Select
val IS_I = 0.U(3.W) // I-Type (LD,ALU)
val IS_S = 1.U(3.W) // S-Type (ST)
val IS_B = 2.U(3.W) // SB-Type (BR)
val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC)
val IS_J = 4.U(3.W) // UJ-Type (J/JAL)
val IS_X = BitPat("b???")
// Decode Stage Control Signals
val RT_FIX = 0.U(2.W)
val RT_FLT = 1.U(2.W)
val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc)
val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.)
// TODO rename RT_NAR
// Micro-op opcodes
// TODO change micro-op opcodes into using enum
val UOPC_SZ = 7
val uopX = BitPat.dontCare(UOPC_SZ)
val uopNOP = 0.U(UOPC_SZ.W)
val uopLD = 1.U(UOPC_SZ.W)
val uopSTA = 2.U(UOPC_SZ.W) // store address generation
val uopSTD = 3.U(UOPC_SZ.W) // store data generation
val uopLUI = 4.U(UOPC_SZ.W)
val uopADDI = 5.U(UOPC_SZ.W)
val uopANDI = 6.U(UOPC_SZ.W)
val uopORI = 7.U(UOPC_SZ.W)
val uopXORI = 8.U(UOPC_SZ.W)
val uopSLTI = 9.U(UOPC_SZ.W)
val uopSLTIU= 10.U(UOPC_SZ.W)
val uopSLLI = 11.U(UOPC_SZ.W)
val uopSRAI = 12.U(UOPC_SZ.W)
val uopSRLI = 13.U(UOPC_SZ.W)
val uopSLL = 14.U(UOPC_SZ.W)
val uopADD = 15.U(UOPC_SZ.W)
val uopSUB = 16.U(UOPC_SZ.W)
val uopSLT = 17.U(UOPC_SZ.W)
val uopSLTU = 18.U(UOPC_SZ.W)
val uopAND = 19.U(UOPC_SZ.W)
val uopOR = 20.U(UOPC_SZ.W)
val uopXOR = 21.U(UOPC_SZ.W)
val uopSRA = 22.U(UOPC_SZ.W)
val uopSRL = 23.U(UOPC_SZ.W)
val uopBEQ = 24.U(UOPC_SZ.W)
val uopBNE = 25.U(UOPC_SZ.W)
val uopBGE = 26.U(UOPC_SZ.W)
val uopBGEU = 27.U(UOPC_SZ.W)
val uopBLT = 28.U(UOPC_SZ.W)
val uopBLTU = 29.U(UOPC_SZ.W)
val uopCSRRW= 30.U(UOPC_SZ.W)
val uopCSRRS= 31.U(UOPC_SZ.W)
val uopCSRRC= 32.U(UOPC_SZ.W)
val uopCSRRWI=33.U(UOPC_SZ.W)
val uopCSRRSI=34.U(UOPC_SZ.W)
val uopCSRRCI=35.U(UOPC_SZ.W)
val uopJ = 36.U(UOPC_SZ.W)
val uopJAL = 37.U(UOPC_SZ.W)
val uopJALR = 38.U(UOPC_SZ.W)
val uopAUIPC= 39.U(UOPC_SZ.W)
//val uopSRET = 40.U(UOPC_SZ.W)
val uopCFLSH= 41.U(UOPC_SZ.W)
val uopFENCE= 42.U(UOPC_SZ.W)
val uopADDIW= 43.U(UOPC_SZ.W)
val uopADDW = 44.U(UOPC_SZ.W)
val uopSUBW = 45.U(UOPC_SZ.W)
val uopSLLIW= 46.U(UOPC_SZ.W)
val uopSLLW = 47.U(UOPC_SZ.W)
val uopSRAIW= 48.U(UOPC_SZ.W)
val uopSRAW = 49.U(UOPC_SZ.W)
val uopSRLIW= 50.U(UOPC_SZ.W)
val uopSRLW = 51.U(UOPC_SZ.W)
val uopMUL = 52.U(UOPC_SZ.W)
val uopMULH = 53.U(UOPC_SZ.W)
val uopMULHU= 54.U(UOPC_SZ.W)
val uopMULHSU=55.U(UOPC_SZ.W)
val uopMULW = 56.U(UOPC_SZ.W)
val uopDIV = 57.U(UOPC_SZ.W)
val uopDIVU = 58.U(UOPC_SZ.W)
val uopREM = 59.U(UOPC_SZ.W)
val uopREMU = 60.U(UOPC_SZ.W)
val uopDIVW = 61.U(UOPC_SZ.W)
val uopDIVUW= 62.U(UOPC_SZ.W)
val uopREMW = 63.U(UOPC_SZ.W)
val uopREMUW= 64.U(UOPC_SZ.W)
val uopFENCEI = 65.U(UOPC_SZ.W)
// = 66.U(UOPC_SZ.W)
val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen)
val uopFMV_W_X = 68.U(UOPC_SZ.W)
val uopFMV_D_X = 69.U(UOPC_SZ.W)
val uopFMV_X_W = 70.U(UOPC_SZ.W)
val uopFMV_X_D = 71.U(UOPC_SZ.W)
val uopFSGNJ_S = 72.U(UOPC_SZ.W)
val uopFSGNJ_D = 73.U(UOPC_SZ.W)
val uopFCVT_S_D = 74.U(UOPC_SZ.W)
val uopFCVT_D_S = 75.U(UOPC_SZ.W)
val uopFCVT_S_X = 76.U(UOPC_SZ.W)
val uopFCVT_D_X = 77.U(UOPC_SZ.W)
val uopFCVT_X_S = 78.U(UOPC_SZ.W)
val uopFCVT_X_D = 79.U(UOPC_SZ.W)
val uopCMPR_S = 80.U(UOPC_SZ.W)
val uopCMPR_D = 81.U(UOPC_SZ.W)
val uopFCLASS_S = 82.U(UOPC_SZ.W)
val uopFCLASS_D = 83.U(UOPC_SZ.W)
val uopFMINMAX_S = 84.U(UOPC_SZ.W)
val uopFMINMAX_D = 85.U(UOPC_SZ.W)
// = 86.U(UOPC_SZ.W)
val uopFADD_S = 87.U(UOPC_SZ.W)
val uopFSUB_S = 88.U(UOPC_SZ.W)
val uopFMUL_S = 89.U(UOPC_SZ.W)
val uopFADD_D = 90.U(UOPC_SZ.W)
val uopFSUB_D = 91.U(UOPC_SZ.W)
val uopFMUL_D = 92.U(UOPC_SZ.W)
val uopFMADD_S = 93.U(UOPC_SZ.W)
val uopFMSUB_S = 94.U(UOPC_SZ.W)
val uopFNMADD_S = 95.U(UOPC_SZ.W)
val uopFNMSUB_S = 96.U(UOPC_SZ.W)
val uopFMADD_D = 97.U(UOPC_SZ.W)
val uopFMSUB_D = 98.U(UOPC_SZ.W)
val uopFNMADD_D = 99.U(UOPC_SZ.W)
val uopFNMSUB_D = 100.U(UOPC_SZ.W)
val uopFDIV_S = 101.U(UOPC_SZ.W)
val uopFDIV_D = 102.U(UOPC_SZ.W)
val uopFSQRT_S = 103.U(UOPC_SZ.W)
val uopFSQRT_D = 104.U(UOPC_SZ.W)
val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline
val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET
val uopSFENCE = 107.U(UOPC_SZ.W)
val uopROCC = 108.U(UOPC_SZ.W)
val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2"
// The Bubble Instruction (Machine generated NOP)
// Insert (XOR x0,x0,x0) which is different from software compiler
// generated NOPs which are (ADDI x0, x0, 0).
// Reasoning for this is to let visualizers and stat-trackers differentiate
// between software NOPs and machine-generated Bubbles in the pipeline.
val BUBBLE = (0x4033).U(32.W)
def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = {
val uop = Wire(new boom.v3.common.MicroOp)
uop := DontCare // Overridden in the following lines
uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior
uop.bypassable := false.B
uop.fp_val := false.B
uop.uses_stq := false.B
uop.uses_ldq := false.B
uop.pdst := 0.U
uop.dst_rtype := RT_X
val cs = Wire(new boom.v3.common.CtrlSignals())
cs := DontCare // Overridden in the following lines
cs.br_type := BR_N
cs.csr_cmd := freechips.rocketchip.rocket.CSR.N
cs.is_load := false.B
cs.is_sta := false.B
cs.is_std := false.B
uop.ctrl := cs
uop
}
}
/**
* Mixin for RISCV constants
*/
trait RISCVConstants
{
// abstract out instruction decode magic numbers
val RD_MSB = 11
val RD_LSB = 7
val RS1_MSB = 19
val RS1_LSB = 15
val RS2_MSB = 24
val RS2_LSB = 20
val RS3_MSB = 31
val RS3_LSB = 27
val CSR_ADDR_MSB = 31
val CSR_ADDR_LSB = 20
val CSR_ADDR_SZ = 12
// location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.)
val SHAMT_5_BIT = 25
val LONGEST_IMM_SZ = 20
val X0 = 0.U
val RA = 1.U // return address register
// memory consistency model
// The C/C++ atomics MCM requires that two loads to the same address maintain program order.
// The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior).
val MCM_ORDER_DEPENDENT_LOADS = true
val jal_opc = (0x6f).U
val jalr_opc = (0x67).U
def GetUop(inst: UInt): UInt = inst(6,0)
def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB)
def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB)
def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = {
val rvc_exp = Module(new RVCExpander)
rvc_exp.io.in := inst
Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst)
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = {
val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt
}
// Note: Accepts only EXPANDED rvc instructions
def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = {
val bdecode = Module(new boom.v3.exu.BranchDecode)
bdecode.io.inst := inst
bdecode.io.pc := 0.U
bdecode.io.out.cfi_type
}
}
/**
* Mixin for exception cause constants
*/
trait ExcCauseConstants
{
// a memory disambigious misspeculation occurred
val MINI_EXCEPTION_MEM_ORDERING = 16.U
val MINI_EXCEPTION_CSR_REPLAY = 17.U
require (!freechips.rocketchip.rocket.Causes.all.contains(16))
require (!freechips.rocketchip.rocket.Causes.all.contains(17))
}
| module BranchDecode_12( // @[decode.scala:623:7]
input clock, // @[decode.scala:623:7]
input reset, // @[decode.scala:623:7]
input [31:0] io_inst, // @[decode.scala:625:14]
input [39:0] io_pc, // @[decode.scala:625:14]
output io_out_is_ret, // @[decode.scala:625:14]
output io_out_is_call, // @[decode.scala:625:14]
output [39:0] io_out_target, // @[decode.scala:625:14]
output [2:0] io_out_cfi_type, // @[decode.scala:625:14]
output io_out_sfb_offset_valid, // @[decode.scala:625:14]
output [5:0] io_out_sfb_offset_bits, // @[decode.scala:625:14]
output io_out_shadowable // @[decode.scala:625:14]
);
wire [31:0] io_inst_0 = io_inst; // @[decode.scala:623:7]
wire [39:0] io_pc_0 = io_pc; // @[decode.scala:623:7]
wire [31:0] bpd_csignals_decoded_plaInput = io_inst_0; // @[pla.scala:77:22]
wire _io_out_is_ret_T_6; // @[decode.scala:695:72]
wire [39:0] _io_out_target_T = io_pc_0; // @[decode.scala:623:7]
wire [39:0] _io_out_target_T_8 = io_pc_0; // @[decode.scala:623:7]
wire _io_out_is_call_T_3; // @[decode.scala:694:47]
wire [39:0] _io_out_target_T_16; // @[decode.scala:697:23]
wire [2:0] _io_out_cfi_type_T_2; // @[decode.scala:700:8]
wire _io_out_sfb_offset_valid_T_7; // @[decode.scala:710:76]
wire _io_out_shadowable_T_11; // @[decode.scala:712:41]
wire io_out_sfb_offset_valid_0; // @[decode.scala:623:7]
wire [5:0] io_out_sfb_offset_bits_0; // @[decode.scala:623:7]
wire io_out_is_ret_0; // @[decode.scala:623:7]
wire io_out_is_call_0; // @[decode.scala:623:7]
wire [39:0] io_out_target_0; // @[decode.scala:623:7]
wire [2:0] io_out_cfi_type_0; // @[decode.scala:623:7]
wire io_out_shadowable_0; // @[decode.scala:623:7]
wire [31:0] bpd_csignals_decoded_invInputs = ~bpd_csignals_decoded_plaInput; // @[pla.scala:77:22, :78:21]
wire [4:0] bpd_csignals_decoded_invMatrixOutputs; // @[pla.scala:120:37]
wire [4:0] bpd_csignals_decoded; // @[pla.scala:81:23]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_1 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_2 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_3 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_4 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_5 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_6 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_7 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_8 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_9 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_10 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_11 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_12 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_13 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_14 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_15 = bpd_csignals_decoded_plaInput[0]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_1 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_2 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_3 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_4 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_5 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_6 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_7 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_8 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_9 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_10 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_11 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_12 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_13 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_14 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_15 = bpd_csignals_decoded_plaInput[1]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_1 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_2 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_3 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_4 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_6 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_9 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_10 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_11 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_12 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_13 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_14 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_15 = bpd_csignals_decoded_invInputs[2]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_3 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_5 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_6 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_7 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_9 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_11 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_12 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_13 = bpd_csignals_decoded_invInputs[3]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_1 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_2 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_3 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_4 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_5 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_9 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_10 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_11 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_13 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_14 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_15 = bpd_csignals_decoded_plaInput[4]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_1 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_9 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_11 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_13 = bpd_csignals_decoded_invInputs[5]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_1 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_2 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_3 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_4 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_5 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_9 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_10 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_11 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_13 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_14 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_15 = bpd_csignals_decoded_invInputs[6]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_1 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_2 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_6 = bpd_csignals_decoded_invInputs[12]; // @[pla.scala:78:21, :91:29]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo = {bpd_csignals_decoded_andMatrixOutputs_lo_hi, bpd_csignals_decoded_andMatrixOutputs_lo_lo}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi = {bpd_csignals_decoded_andMatrixOutputs_hi_hi, bpd_csignals_decoded_andMatrixOutputs_hi_lo}; // @[pla.scala:98:53]
wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T = {bpd_csignals_decoded_andMatrixOutputs_hi, bpd_csignals_decoded_andMatrixOutputs_lo}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_5_2 = &_bpd_csignals_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_1 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_2 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_4 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_5 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_4 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_5 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_8 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_7 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_12 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_13 = bpd_csignals_decoded_invInputs[13]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_1 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_4 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_3 = bpd_csignals_decoded_invInputs[14]; // @[pla.scala:78:21, :91:29]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_1}; // @[pla.scala:91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_lo_1}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_1}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_1}; // @[pla.scala:90:45, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_1}; // @[pla.scala:91:29, :98:53]
wire [4:0] bpd_csignals_decoded_andMatrixOutputs_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_hi_lo_1}; // @[pla.scala:98:53]
wire [8:0] _bpd_csignals_decoded_andMatrixOutputs_T_1 = {bpd_csignals_decoded_andMatrixOutputs_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_1}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_9_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_1; // @[pla.scala:98:{53,70}]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_2 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_3 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_4 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_5 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_6 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_7 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_8 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_12 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_15 = bpd_csignals_decoded_plaInput[5]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_3 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_3 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_6 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_7 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_8 = bpd_csignals_decoded_invInputs[25]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_2 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_2 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_4 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_5 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_5 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_6 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_7 = bpd_csignals_decoded_invInputs[26]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_1 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_2 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_3 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_4 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_5 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_6 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_7 = bpd_csignals_decoded_invInputs[27]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_1 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_2 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_3 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_4 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_5 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_6 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_7 = bpd_csignals_decoded_invInputs[28]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_1 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_2 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_3 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_4 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_5 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_6 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_7 = bpd_csignals_decoded_invInputs[29]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_1 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_1 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_2 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_3 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_4 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_5 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_6 = bpd_csignals_decoded_invInputs[31]; // @[pla.scala:78:21, :91:29]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9}; // @[pla.scala:91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_lo_2}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_2}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_2}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_2}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_2}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo}; // @[pla.scala:98:53]
wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_lo_2}; // @[pla.scala:98:53]
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_2 = {bpd_csignals_decoded_andMatrixOutputs_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_2}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_14_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_2; // @[pla.scala:98:{53,70}]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_1 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_2 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_3 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_4 = bpd_csignals_decoded_invInputs[30]; // @[pla.scala:78:21, :91:29]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_1}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_1}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_1}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_2}; // @[pla.scala:91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_1, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_1}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_lo_lo_3}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_3}; // @[pla.scala:90:45, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_1, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_3}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_3}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_3}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_1}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_lo_3}; // @[pla.scala:98:53]
wire [13:0] _bpd_csignals_decoded_andMatrixOutputs_T_3 = {bpd_csignals_decoded_andMatrixOutputs_hi_3, bpd_csignals_decoded_andMatrixOutputs_lo_3}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_0_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_3; // @[pla.scala:98:{53,70}]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_2}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_1}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_2, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_2}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_2}; // @[pla.scala:91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_2, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_2}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_lo_4}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_1 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_4}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_4}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_2, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_1}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_4}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_4}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_2}; // @[pla.scala:98:53]
wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_hi_lo_4}; // @[pla.scala:98:53]
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_4 = {bpd_csignals_decoded_andMatrixOutputs_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_4}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_2_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_4; // @[pla.scala:98:{53,70}]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_5 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_7 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_8 = bpd_csignals_decoded_plaInput[2]; // @[pla.scala:77:22, :90:45]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_5}; // @[pla.scala:90:45, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_5}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_5}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_5}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_lo_5}; // @[pla.scala:98:53]
wire [6:0] _bpd_csignals_decoded_andMatrixOutputs_T_5 = {bpd_csignals_decoded_andMatrixOutputs_hi_5, bpd_csignals_decoded_andMatrixOutputs_lo_5}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_12_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_5; // @[pla.scala:98:{53,70}]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_6 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_7 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_8 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_12 = bpd_csignals_decoded_invInputs[4]; // @[pla.scala:78:21, :91:29]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_6 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_7 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_8 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_12 = bpd_csignals_decoded_plaInput[6]; // @[pla.scala:77:22, :90:45]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_5}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_6}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_lo_5}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_6}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_6}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_lo_6}; // @[pla.scala:98:53]
wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_6 = {bpd_csignals_decoded_andMatrixOutputs_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_6}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_6_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_6; // @[pla.scala:98:{53,70}]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_3}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_7}; // @[pla.scala:90:45, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_6}; // @[pla.scala:91:29, :98:53]
wire [4:0] bpd_csignals_decoded_andMatrixOutputs_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_lo_6}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_7}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_7}; // @[pla.scala:90:45, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_7}; // @[pla.scala:90:45, :98:53]
wire [4:0] bpd_csignals_decoded_andMatrixOutputs_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_lo_7}; // @[pla.scala:98:53]
wire [9:0] _bpd_csignals_decoded_andMatrixOutputs_T_7 = {bpd_csignals_decoded_andMatrixOutputs_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_7}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_15_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_7; // @[pla.scala:98:{53,70}]
wire _bpd_csignals_decoded_orMatrixOutputs_T_4 = bpd_csignals_decoded_andMatrixOutputs_15_2; // @[pla.scala:98:70, :114:36]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_8 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_10 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_14 = bpd_csignals_decoded_plaInput[3]; // @[pla.scala:77:22, :90:45]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_8}; // @[pla.scala:90:45, :91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_8}; // @[pla.scala:90:45, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_8}; // @[pla.scala:90:45, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_8}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_hi_lo_8}; // @[pla.scala:98:53]
wire [6:0] _bpd_csignals_decoded_andMatrixOutputs_T_8 = {bpd_csignals_decoded_andMatrixOutputs_hi_8, bpd_csignals_decoded_andMatrixOutputs_lo_8}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_11_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_8; // @[pla.scala:98:{53,70}]
wire _bpd_csignals_decoded_orMatrixOutputs_T_5 = bpd_csignals_decoded_andMatrixOutputs_11_2; // @[pla.scala:98:70, :114:36]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_7 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_10 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_11 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_14 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_15 = bpd_csignals_decoded_plaInput[12]; // @[pla.scala:77:22, :90:45]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_3}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_2}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_3, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_3}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_4}; // @[pla.scala:91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_4, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_3}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_9, bpd_csignals_decoded_andMatrixOutputs_lo_lo_7}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_2 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_7}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_9}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_3, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_2}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_9}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_9}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_3}; // @[pla.scala:98:53]
wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_9, bpd_csignals_decoded_andMatrixOutputs_hi_lo_9}; // @[pla.scala:98:53]
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_9 = {bpd_csignals_decoded_andMatrixOutputs_hi_9, bpd_csignals_decoded_andMatrixOutputs_lo_9}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_3_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_9; // @[pla.scala:98:{53,70}]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_4}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_8 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_3}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_4, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_4}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_5}; // @[pla.scala:91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_5, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_4}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_10, bpd_csignals_decoded_andMatrixOutputs_lo_lo_8}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_3 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_8}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_10}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_4, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_3}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_10}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_10, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_10}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_4}; // @[pla.scala:98:53]
wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_10, bpd_csignals_decoded_andMatrixOutputs_hi_lo_10}; // @[pla.scala:98:53]
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_10 = {bpd_csignals_decoded_andMatrixOutputs_hi_10, bpd_csignals_decoded_andMatrixOutputs_lo_10}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_7_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_10; // @[pla.scala:98:{53,70}]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_9 = bpd_csignals_decoded_plaInput[13]; // @[pla.scala:77:22, :90:45]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_9 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_9}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_11}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_11, bpd_csignals_decoded_andMatrixOutputs_lo_lo_9}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_11}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_11, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_11}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_11 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_11, bpd_csignals_decoded_andMatrixOutputs_hi_lo_11}; // @[pla.scala:98:53]
wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_11 = {bpd_csignals_decoded_andMatrixOutputs_hi_11, bpd_csignals_decoded_andMatrixOutputs_lo_11}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_1_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_11; // @[pla.scala:98:{53,70}]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_10 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_6 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_8 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45]
wire bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_9 = bpd_csignals_decoded_plaInput[14]; // @[pla.scala:77:22, :90:45]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_10 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_10}; // @[pla.scala:90:45, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_12}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_12, bpd_csignals_decoded_andMatrixOutputs_lo_lo_10}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_12}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_12, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_12}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_12 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_12, bpd_csignals_decoded_andMatrixOutputs_hi_lo_12}; // @[pla.scala:98:53]
wire [7:0] _bpd_csignals_decoded_andMatrixOutputs_T_12 = {bpd_csignals_decoded_andMatrixOutputs_hi_12, bpd_csignals_decoded_andMatrixOutputs_lo_12}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_13_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_12; // @[pla.scala:98:{53,70}]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_5}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_11 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_4}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_5, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_5}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_6}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_6, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_5}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_13, bpd_csignals_decoded_andMatrixOutputs_lo_lo_11}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_4 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_11}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_13}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_5, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_4}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_13}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_13, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_13}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_5}; // @[pla.scala:98:53]
wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_13, bpd_csignals_decoded_andMatrixOutputs_hi_lo_13}; // @[pla.scala:98:53]
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_13 = {bpd_csignals_decoded_andMatrixOutputs_hi_13, bpd_csignals_decoded_andMatrixOutputs_lo_13}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_4_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_13; // @[pla.scala:98:{53,70}]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_6}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_12 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_5}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_6, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_6}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_8, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_7}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_7, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_6}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_14 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_14, bpd_csignals_decoded_andMatrixOutputs_lo_lo_12}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_5 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_12}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_14}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_6, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_5}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_14}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_14, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_14}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_6}; // @[pla.scala:98:53]
wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_14, bpd_csignals_decoded_andMatrixOutputs_hi_lo_14}; // @[pla.scala:98:53]
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_14 = {bpd_csignals_decoded_andMatrixOutputs_hi_14, bpd_csignals_decoded_andMatrixOutputs_lo_14}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_8_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_14; // @[pla.scala:98:{53,70}]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_12_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_13_7}; // @[pla.scala:91:29, :98:53]
wire [2:0] bpd_csignals_decoded_andMatrixOutputs_lo_lo_13 = {bpd_csignals_decoded_andMatrixOutputs_lo_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_14_6}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_10_7, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_11_7}; // @[pla.scala:91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_8 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_8_9, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_9_8}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_lo_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_hi_8, bpd_csignals_decoded_andMatrixOutputs_lo_hi_lo_7}; // @[pla.scala:98:53]
wire [6:0] bpd_csignals_decoded_andMatrixOutputs_lo_15 = {bpd_csignals_decoded_andMatrixOutputs_lo_hi_15, bpd_csignals_decoded_andMatrixOutputs_lo_lo_13}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_6 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_6_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_7_13}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_4_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_5_15}; // @[pla.scala:90:45, :91:29, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_lo_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_lo_hi_7, bpd_csignals_decoded_andMatrixOutputs_hi_lo_lo_6}; // @[pla.scala:98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_7 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_2_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_3_15}; // @[pla.scala:90:45, :91:29, :98:53]
wire [1:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_9 = {bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_0_15, bpd_csignals_decoded_andMatrixOutputs_andMatrixInput_1_15}; // @[pla.scala:90:45, :98:53]
wire [3:0] bpd_csignals_decoded_andMatrixOutputs_hi_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_hi_9, bpd_csignals_decoded_andMatrixOutputs_hi_hi_lo_7}; // @[pla.scala:98:53]
wire [7:0] bpd_csignals_decoded_andMatrixOutputs_hi_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_hi_15, bpd_csignals_decoded_andMatrixOutputs_hi_lo_15}; // @[pla.scala:98:53]
wire [14:0] _bpd_csignals_decoded_andMatrixOutputs_T_15 = {bpd_csignals_decoded_andMatrixOutputs_hi_15, bpd_csignals_decoded_andMatrixOutputs_lo_15}; // @[pla.scala:98:53]
wire bpd_csignals_decoded_andMatrixOutputs_10_2 = &_bpd_csignals_decoded_andMatrixOutputs_T_15; // @[pla.scala:98:{53,70}]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo = {bpd_csignals_decoded_andMatrixOutputs_2_2, bpd_csignals_decoded_andMatrixOutputs_10_2}; // @[pla.scala:98:70, :114:19]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi = {bpd_csignals_decoded_andMatrixOutputs_14_2, bpd_csignals_decoded_andMatrixOutputs_0_2}; // @[pla.scala:98:70, :114:19]
wire [3:0] _bpd_csignals_decoded_orMatrixOutputs_T = {bpd_csignals_decoded_orMatrixOutputs_hi, bpd_csignals_decoded_orMatrixOutputs_lo}; // @[pla.scala:114:19]
wire _bpd_csignals_decoded_orMatrixOutputs_T_1 = |_bpd_csignals_decoded_orMatrixOutputs_T; // @[pla.scala:114:{19,36}]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_lo = {bpd_csignals_decoded_andMatrixOutputs_8_2, bpd_csignals_decoded_andMatrixOutputs_10_2}; // @[pla.scala:98:70, :114:19]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_7_2, bpd_csignals_decoded_andMatrixOutputs_1_2}; // @[pla.scala:98:70, :114:19]
wire [2:0] bpd_csignals_decoded_orMatrixOutputs_lo_hi = {bpd_csignals_decoded_orMatrixOutputs_lo_hi_hi, bpd_csignals_decoded_andMatrixOutputs_4_2}; // @[pla.scala:98:70, :114:19]
wire [4:0] bpd_csignals_decoded_orMatrixOutputs_lo_1 = {bpd_csignals_decoded_orMatrixOutputs_lo_hi, bpd_csignals_decoded_orMatrixOutputs_lo_lo}; // @[pla.scala:114:19]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_lo_hi = {bpd_csignals_decoded_andMatrixOutputs_0_2, bpd_csignals_decoded_andMatrixOutputs_12_2}; // @[pla.scala:98:70, :114:19]
wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_lo = {bpd_csignals_decoded_orMatrixOutputs_hi_lo_hi, bpd_csignals_decoded_andMatrixOutputs_3_2}; // @[pla.scala:98:70, :114:19]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi_hi = {bpd_csignals_decoded_andMatrixOutputs_5_2, bpd_csignals_decoded_andMatrixOutputs_9_2}; // @[pla.scala:98:70, :114:19]
wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi = {bpd_csignals_decoded_orMatrixOutputs_hi_hi_hi, bpd_csignals_decoded_andMatrixOutputs_14_2}; // @[pla.scala:98:70, :114:19]
wire [5:0] bpd_csignals_decoded_orMatrixOutputs_hi_1 = {bpd_csignals_decoded_orMatrixOutputs_hi_hi, bpd_csignals_decoded_orMatrixOutputs_hi_lo}; // @[pla.scala:114:19]
wire [10:0] _bpd_csignals_decoded_orMatrixOutputs_T_2 = {bpd_csignals_decoded_orMatrixOutputs_hi_1, bpd_csignals_decoded_orMatrixOutputs_lo_1}; // @[pla.scala:114:19]
wire _bpd_csignals_decoded_orMatrixOutputs_T_3 = |_bpd_csignals_decoded_orMatrixOutputs_T_2; // @[pla.scala:114:{19,36}]
wire [1:0] _bpd_csignals_decoded_orMatrixOutputs_T_6 = {bpd_csignals_decoded_andMatrixOutputs_6_2, bpd_csignals_decoded_andMatrixOutputs_13_2}; // @[pla.scala:98:70, :114:19]
wire _bpd_csignals_decoded_orMatrixOutputs_T_7 = |_bpd_csignals_decoded_orMatrixOutputs_T_6; // @[pla.scala:114:{19,36}]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_lo_2 = {_bpd_csignals_decoded_orMatrixOutputs_T_3, _bpd_csignals_decoded_orMatrixOutputs_T_1}; // @[pla.scala:102:36, :114:36]
wire [1:0] bpd_csignals_decoded_orMatrixOutputs_hi_hi_1 = {_bpd_csignals_decoded_orMatrixOutputs_T_7, _bpd_csignals_decoded_orMatrixOutputs_T_5}; // @[pla.scala:102:36, :114:36]
wire [2:0] bpd_csignals_decoded_orMatrixOutputs_hi_2 = {bpd_csignals_decoded_orMatrixOutputs_hi_hi_1, _bpd_csignals_decoded_orMatrixOutputs_T_4}; // @[pla.scala:102:36, :114:36]
wire [4:0] bpd_csignals_decoded_orMatrixOutputs = {bpd_csignals_decoded_orMatrixOutputs_hi_2, bpd_csignals_decoded_orMatrixOutputs_lo_2}; // @[pla.scala:102:36]
wire _bpd_csignals_decoded_invMatrixOutputs_T = bpd_csignals_decoded_orMatrixOutputs[0]; // @[pla.scala:102:36, :124:31]
wire _bpd_csignals_decoded_invMatrixOutputs_T_1 = bpd_csignals_decoded_orMatrixOutputs[1]; // @[pla.scala:102:36, :124:31]
wire _bpd_csignals_decoded_invMatrixOutputs_T_2 = bpd_csignals_decoded_orMatrixOutputs[2]; // @[pla.scala:102:36, :124:31]
wire _bpd_csignals_decoded_invMatrixOutputs_T_3 = bpd_csignals_decoded_orMatrixOutputs[3]; // @[pla.scala:102:36, :124:31]
wire _bpd_csignals_decoded_invMatrixOutputs_T_4 = bpd_csignals_decoded_orMatrixOutputs[4]; // @[pla.scala:102:36, :124:31]
wire [1:0] bpd_csignals_decoded_invMatrixOutputs_lo = {_bpd_csignals_decoded_invMatrixOutputs_T_1, _bpd_csignals_decoded_invMatrixOutputs_T}; // @[pla.scala:120:37, :124:31]
wire [1:0] bpd_csignals_decoded_invMatrixOutputs_hi_hi = {_bpd_csignals_decoded_invMatrixOutputs_T_4, _bpd_csignals_decoded_invMatrixOutputs_T_3}; // @[pla.scala:120:37, :124:31]
wire [2:0] bpd_csignals_decoded_invMatrixOutputs_hi = {bpd_csignals_decoded_invMatrixOutputs_hi_hi, _bpd_csignals_decoded_invMatrixOutputs_T_2}; // @[pla.scala:120:37, :124:31]
assign bpd_csignals_decoded_invMatrixOutputs = {bpd_csignals_decoded_invMatrixOutputs_hi, bpd_csignals_decoded_invMatrixOutputs_lo}; // @[pla.scala:120:37]
assign bpd_csignals_decoded = bpd_csignals_decoded_invMatrixOutputs; // @[pla.scala:81:23, :120:37]
wire bpd_csignals_0 = bpd_csignals_decoded[4]; // @[pla.scala:81:23]
wire cs_is_br = bpd_csignals_0; // @[Decode.scala:50:77]
wire bpd_csignals_1 = bpd_csignals_decoded[3]; // @[pla.scala:81:23]
wire cs_is_jal = bpd_csignals_1; // @[Decode.scala:50:77]
wire bpd_csignals_2 = bpd_csignals_decoded[2]; // @[pla.scala:81:23]
wire cs_is_jalr = bpd_csignals_2; // @[Decode.scala:50:77]
wire bpd_csignals_3 = bpd_csignals_decoded[1]; // @[pla.scala:81:23]
wire cs_is_shadowable = bpd_csignals_3; // @[Decode.scala:50:77]
wire bpd_csignals_4 = bpd_csignals_decoded[0]; // @[pla.scala:81:23]
wire cs_has_rs2 = bpd_csignals_4; // @[Decode.scala:50:77]
wire _io_out_is_call_T = cs_is_jal | cs_is_jalr; // @[decode.scala:689:34, :690:35, :694:32]
wire [4:0] _io_out_is_call_T_1 = io_inst_0[11:7]; // @[decode.scala:623:7]
wire [4:0] _io_out_is_ret_T_4 = io_inst_0[11:7]; // @[decode.scala:623:7]
wire [4:0] _io_out_shadowable_T_2 = io_inst_0[11:7]; // @[decode.scala:623:7]
wire _io_out_is_call_T_2 = _io_out_is_call_T_1 == 5'h1; // @[decode.scala:694:65]
assign _io_out_is_call_T_3 = _io_out_is_call_T & _io_out_is_call_T_2; // @[decode.scala:694:{32,47,65}]
assign io_out_is_call_0 = _io_out_is_call_T_3; // @[decode.scala:623:7, :694:47]
wire [4:0] _io_out_is_ret_T = io_inst_0[19:15]; // @[decode.scala:623:7]
wire [4:0] _io_out_shadowable_T_1 = io_inst_0[19:15]; // @[decode.scala:623:7]
wire [4:0] _io_out_shadowable_T_7 = io_inst_0[19:15]; // @[decode.scala:623:7]
wire [4:0] _io_out_is_ret_T_1 = _io_out_is_ret_T & 5'h1B; // @[decode.scala:695:51]
wire _io_out_is_ret_T_2 = _io_out_is_ret_T_1 == 5'h1; // @[decode.scala:695:51]
wire _io_out_is_ret_T_3 = cs_is_jalr & _io_out_is_ret_T_2; // @[decode.scala:690:35, :695:{32,51}]
wire _io_out_is_ret_T_5 = _io_out_is_ret_T_4 == 5'h0; // @[decode.scala:695:90]
assign _io_out_is_ret_T_6 = _io_out_is_ret_T_3 & _io_out_is_ret_T_5; // @[decode.scala:695:{32,72,90}]
assign io_out_is_ret_0 = _io_out_is_ret_T_6; // @[decode.scala:623:7, :695:72]
wire _io_out_target_b_imm32_T = io_inst_0[31]; // @[decode.scala:623:7]
wire _io_out_target_j_imm32_T = io_inst_0[31]; // @[decode.scala:623:7]
wire _io_out_sfb_offset_valid_T = io_inst_0[31]; // @[decode.scala:623:7, :710:50]
wire [19:0] _io_out_target_b_imm32_T_1 = {20{_io_out_target_b_imm32_T}}; // @[consts.scala:337:{27,35}]
wire _io_out_target_b_imm32_T_2 = io_inst_0[7]; // @[decode.scala:623:7]
wire _br_offset_T = io_inst_0[7]; // @[decode.scala:623:7, :708:30]
wire [5:0] _io_out_target_b_imm32_T_3 = io_inst_0[30:25]; // @[decode.scala:623:7]
wire [5:0] _io_out_target_j_imm32_T_4 = io_inst_0[30:25]; // @[decode.scala:623:7]
wire [5:0] _br_offset_T_1 = io_inst_0[30:25]; // @[decode.scala:623:7, :708:42]
wire [3:0] _io_out_target_b_imm32_T_4 = io_inst_0[11:8]; // @[decode.scala:623:7]
wire [3:0] _br_offset_T_2 = io_inst_0[11:8]; // @[decode.scala:623:7, :708:58]
wire [4:0] io_out_target_b_imm32_lo = {_io_out_target_b_imm32_T_4, 1'h0}; // @[consts.scala:337:{22,68}]
wire [20:0] io_out_target_b_imm32_hi_hi = {_io_out_target_b_imm32_T_1, _io_out_target_b_imm32_T_2}; // @[consts.scala:337:{22,27,46}]
wire [26:0] io_out_target_b_imm32_hi = {io_out_target_b_imm32_hi_hi, _io_out_target_b_imm32_T_3}; // @[consts.scala:337:{22,55}]
wire [31:0] io_out_target_b_imm32 = {io_out_target_b_imm32_hi, io_out_target_b_imm32_lo}; // @[consts.scala:337:22]
wire [31:0] _io_out_target_T_1 = io_out_target_b_imm32; // @[consts.scala:337:22, :338:27]
wire [40:0] _io_out_target_T_2 = {_io_out_target_T[39], _io_out_target_T} + {{9{_io_out_target_T_1[31]}}, _io_out_target_T_1}; // @[consts.scala:338:{10,17,27}]
wire [39:0] _io_out_target_T_3 = _io_out_target_T_2[39:0]; // @[consts.scala:338:17]
wire [39:0] _io_out_target_T_4 = _io_out_target_T_3; // @[consts.scala:338:17]
wire [39:0] _io_out_target_T_5 = _io_out_target_T_4 & 40'hFFFFFFFFFE; // @[consts.scala:338:{17,42}]
wire [39:0] _io_out_target_T_6 = _io_out_target_T_5; // @[consts.scala:338:42]
wire [39:0] _io_out_target_T_7 = _io_out_target_T_6; // @[consts.scala:338:{42,52}]
wire [11:0] _io_out_target_j_imm32_T_1 = {12{_io_out_target_j_imm32_T}}; // @[consts.scala:343:{27,35}]
wire [7:0] _io_out_target_j_imm32_T_2 = io_inst_0[19:12]; // @[decode.scala:623:7]
wire _io_out_target_j_imm32_T_3 = io_inst_0[20]; // @[decode.scala:623:7]
wire [3:0] _io_out_target_j_imm32_T_5 = io_inst_0[24:21]; // @[decode.scala:623:7]
wire [9:0] io_out_target_j_imm32_lo_hi = {_io_out_target_j_imm32_T_4, _io_out_target_j_imm32_T_5}; // @[consts.scala:343:{22,69,82}]
wire [10:0] io_out_target_j_imm32_lo = {io_out_target_j_imm32_lo_hi, 1'h0}; // @[consts.scala:343:22]
wire [19:0] io_out_target_j_imm32_hi_hi = {_io_out_target_j_imm32_T_1, _io_out_target_j_imm32_T_2}; // @[consts.scala:343:{22,27,46}]
wire [20:0] io_out_target_j_imm32_hi = {io_out_target_j_imm32_hi_hi, _io_out_target_j_imm32_T_3}; // @[consts.scala:343:{22,59}]
wire [31:0] io_out_target_j_imm32 = {io_out_target_j_imm32_hi, io_out_target_j_imm32_lo}; // @[consts.scala:343:22]
wire [31:0] _io_out_target_T_9 = io_out_target_j_imm32; // @[consts.scala:343:22, :344:27]
wire [40:0] _io_out_target_T_10 = {_io_out_target_T_8[39], _io_out_target_T_8} + {{9{_io_out_target_T_9[31]}}, _io_out_target_T_9}; // @[consts.scala:344:{10,17,27}]
wire [39:0] _io_out_target_T_11 = _io_out_target_T_10[39:0]; // @[consts.scala:344:17]
wire [39:0] _io_out_target_T_12 = _io_out_target_T_11; // @[consts.scala:344:17]
wire [39:0] _io_out_target_T_13 = _io_out_target_T_12 & 40'hFFFFFFFFFE; // @[consts.scala:344:{17,42}]
wire [39:0] _io_out_target_T_14 = _io_out_target_T_13; // @[consts.scala:344:42]
wire [39:0] _io_out_target_T_15 = _io_out_target_T_14; // @[consts.scala:344:{42,52}]
assign _io_out_target_T_16 = cs_is_br ? _io_out_target_T_7 : _io_out_target_T_15; // @[decode.scala:688:33, :697:23]
assign io_out_target_0 = _io_out_target_T_16; // @[decode.scala:623:7, :697:23]
wire [2:0] _io_out_cfi_type_T = {2'h0, cs_is_br}; // @[decode.scala:688:33, :704:8]
wire [2:0] _io_out_cfi_type_T_1 = cs_is_jal ? 3'h2 : _io_out_cfi_type_T; // @[decode.scala:689:34, :702:8, :704:8]
assign _io_out_cfi_type_T_2 = cs_is_jalr ? 3'h3 : _io_out_cfi_type_T_1; // @[decode.scala:690:35, :700:8, :702:8]
assign io_out_cfi_type_0 = _io_out_cfi_type_T_2; // @[decode.scala:623:7, :700:8]
wire [4:0] br_offset_lo = {_br_offset_T_2, 1'h0}; // @[decode.scala:708:{22,58}]
wire [6:0] br_offset_hi = {_br_offset_T, _br_offset_T_1}; // @[decode.scala:708:{22,30,42}]
wire [11:0] br_offset = {br_offset_hi, br_offset_lo}; // @[decode.scala:708:22]
wire _io_out_sfb_offset_valid_T_1 = ~_io_out_sfb_offset_valid_T; // @[decode.scala:710:{42,50}]
wire _io_out_sfb_offset_valid_T_2 = cs_is_br & _io_out_sfb_offset_valid_T_1; // @[decode.scala:688:33, :710:{39,42}]
wire _io_out_sfb_offset_valid_T_3 = |br_offset; // @[decode.scala:708:22, :710:68]
wire _io_out_sfb_offset_valid_T_4 = _io_out_sfb_offset_valid_T_2 & _io_out_sfb_offset_valid_T_3; // @[decode.scala:710:{39,55,68}]
wire [5:0] _io_out_sfb_offset_valid_T_5 = br_offset[11:6]; // @[decode.scala:708:22, :710:90]
wire _io_out_sfb_offset_valid_T_6 = _io_out_sfb_offset_valid_T_5 == 6'h0; // @[decode.scala:710:{90,117}]
assign _io_out_sfb_offset_valid_T_7 = _io_out_sfb_offset_valid_T_4 & _io_out_sfb_offset_valid_T_6; // @[decode.scala:710:{55,76,117}]
assign io_out_sfb_offset_valid_0 = _io_out_sfb_offset_valid_T_7; // @[decode.scala:623:7, :710:76]
assign io_out_sfb_offset_bits_0 = br_offset[5:0]; // @[decode.scala:623:7, :708:22, :711:27]
wire _io_out_shadowable_T = ~cs_has_rs2; // @[decode.scala:692:35, :713:5]
wire _io_out_shadowable_T_3 = _io_out_shadowable_T_1 == _io_out_shadowable_T_2; // @[decode.scala:714:22]
wire _io_out_shadowable_T_4 = _io_out_shadowable_T | _io_out_shadowable_T_3; // @[decode.scala:713:{5,17}, :714:22]
wire [31:0] _io_out_shadowable_T_5 = io_inst_0 & 32'hFE00707F; // @[decode.scala:623:7, :715:14]
wire _io_out_shadowable_T_6 = _io_out_shadowable_T_5 == 32'h33; // @[decode.scala:715:14]
wire _io_out_shadowable_T_8 = _io_out_shadowable_T_7 == 5'h0; // @[decode.scala:695:90, :715:41]
wire _io_out_shadowable_T_9 = _io_out_shadowable_T_6 & _io_out_shadowable_T_8; // @[decode.scala:715:{14,22,41}]
wire _io_out_shadowable_T_10 = _io_out_shadowable_T_4 | _io_out_shadowable_T_9; // @[decode.scala:713:17, :714:42, :715:22]
assign _io_out_shadowable_T_11 = cs_is_shadowable & _io_out_shadowable_T_10; // @[decode.scala:691:41, :712:41, :714:42]
assign io_out_shadowable_0 = _io_out_shadowable_T_11; // @[decode.scala:623:7, :712:41]
assign io_out_is_ret = io_out_is_ret_0; // @[decode.scala:623:7]
assign io_out_is_call = io_out_is_call_0; // @[decode.scala:623:7]
assign io_out_target = io_out_target_0; // @[decode.scala:623:7]
assign io_out_cfi_type = io_out_cfi_type_0; // @[decode.scala:623:7]
assign io_out_sfb_offset_valid = io_out_sfb_offset_valid_0; // @[decode.scala:623:7]
assign io_out_sfb_offset_bits = io_out_sfb_offset_bits_0; // @[decode.scala:623:7]
assign io_out_shadowable = io_out_shadowable_0; // @[decode.scala:623:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File EgressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
class EgressUnit(coupleSAVA: Boolean, combineSAST: Boolean, inParams: Seq[ChannelParams], ingressParams: Seq[IngressChannelParams], cParam: EgressChannelParams)
(implicit p: Parameters) extends AbstractOutputUnit(inParams, ingressParams, cParam)(p) {
class EgressUnitIO extends AbstractOutputUnitIO(inParams, ingressParams, cParam) {
val out = Decoupled(new EgressFlit(cParam.payloadBits))
}
val io = IO(new EgressUnitIO)
val channel_empty = RegInit(true.B)
val flow = Reg(new FlowRoutingBundle)
val q = Module(new Queue(new EgressFlit(cParam.payloadBits), 3 - (if (combineSAST) 1 else 0), flow=true))
q.io.enq.valid := io.in(0).valid
q.io.enq.bits.head := io.in(0).bits.head
q.io.enq.bits.tail := io.in(0).bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
q.io.enq.bits.ingress_id := 0.U(1.W)
} else {
q.io.enq.bits.ingress_id := Mux1H(
flows.map(f => (f.ingressNode.U === io.in(0).bits.flow.ingress_node &&
f.ingressNodeId.U === io.in(0).bits.flow.ingress_node_id)),
flows.map(f => f.ingressId.U(ingressIdBits.W))
)
}
q.io.enq.bits.payload := io.in(0).bits.payload
io.out <> q.io.deq
assert(!(q.io.enq.valid && !q.io.enq.ready))
io.credit_available(0) := q.io.count === 0.U
io.channel_status(0).occupied := !channel_empty
io.channel_status(0).flow := flow
when (io.credit_alloc(0).alloc && io.credit_alloc(0).tail) {
channel_empty := true.B
if (coupleSAVA) io.channel_status(0).occupied := false.B
}
when (io.allocs(0).alloc) {
channel_empty := false.B
flow := io.allocs(0).flow
}
}
| module EgressUnit_66( // @[EgressUnit.scala:12:7]
input clock, // @[EgressUnit.scala:12:7]
input reset, // @[EgressUnit.scala:12:7]
input io_in_0_valid, // @[EgressUnit.scala:18:14]
input io_in_0_bits_head, // @[EgressUnit.scala:18:14]
input io_in_0_bits_tail, // @[EgressUnit.scala:18:14]
input [72:0] io_in_0_bits_payload, // @[EgressUnit.scala:18:14]
input [3:0] io_in_0_bits_flow_ingress_node, // @[EgressUnit.scala:18:14]
input [1:0] io_in_0_bits_flow_ingress_node_id, // @[EgressUnit.scala:18:14]
output io_credit_available_0, // @[EgressUnit.scala:18:14]
output io_channel_status_0_occupied, // @[EgressUnit.scala:18:14]
input io_allocs_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_tail, // @[EgressUnit.scala:18:14]
input io_out_ready, // @[EgressUnit.scala:18:14]
output io_out_valid, // @[EgressUnit.scala:18:14]
output io_out_bits_head, // @[EgressUnit.scala:18:14]
output io_out_bits_tail, // @[EgressUnit.scala:18:14]
output [72:0] io_out_bits_payload // @[EgressUnit.scala:18:14]
);
wire _q_io_enq_ready; // @[EgressUnit.scala:22:17]
wire [1:0] _q_io_count; // @[EgressUnit.scala:22:17]
reg channel_empty; // @[EgressUnit.scala:20:30]
wire _q_io_enq_bits_ingress_id_T_3 = io_in_0_bits_flow_ingress_node == 4'h0; // @[EgressUnit.scala:31:39] |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File SingleVCAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.random.{LFSR}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{ChannelRoutingInfo, FlowRoutingBundle}
// Allocates 1 VC per cycle
abstract class SingleVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends VCAllocator(vP)(p) {
// get single input
val mask = RegInit(0.U(allInParams.size.W))
val in_arb_reqs = Wire(Vec(allInParams.size, MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })))
val in_arb_vals = Wire(Vec(allInParams.size, Bool()))
val in_arb_filter = PriorityEncoderOH(Cat(in_arb_vals.asUInt, in_arb_vals.asUInt & ~mask))
val in_arb_sel = (in_arb_filter(allInParams.size-1,0) | (in_arb_filter >> allInParams.size))
when (in_arb_vals.orR) {
mask := Mux1H(in_arb_sel, (0 until allInParams.size).map { w => ~(0.U((w+1).W)) })
}
for (i <- 0 until allInParams.size) {
(0 until allOutParams.size).map { m =>
(0 until allOutParams(m).nVirtualChannels).map { n =>
in_arb_reqs(i)(m)(n) := io.req(i).bits.vc_sel(m)(n) && !io.channel_status(m)(n).occupied
}
}
in_arb_vals(i) := io.req(i).valid && in_arb_reqs(i).map(_.orR).toSeq.orR
}
// Input arbitration
io.req.foreach(_.ready := false.B)
val in_alloc = Wire(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val in_flow = Mux1H(in_arb_sel, io.req.map(_.bits.flow).toSeq)
val in_vc = Mux1H(in_arb_sel, io.req.map(_.bits.in_vc).toSeq)
val in_vc_sel = Mux1H(in_arb_sel, in_arb_reqs)
in_alloc := Mux(in_arb_vals.orR,
inputAllocPolicy(in_flow, in_vc_sel, OHToUInt(in_arb_sel), in_vc, io.req.map(_.fire).toSeq.orR),
0.U.asTypeOf(in_alloc))
// send allocation to inputunits
for (i <- 0 until allInParams.size) {
io.req(i).ready := in_arb_sel(i)
for (m <- 0 until allOutParams.size) {
(0 until allOutParams(m).nVirtualChannels).map { n =>
io.resp(i).vc_sel(m)(n) := in_alloc(m)(n)
}
}
assert(PopCount(io.resp(i).vc_sel.asUInt) <= 1.U)
}
// send allocation to output units
for (i <- 0 until allOutParams.size) {
(0 until allOutParams(i).nVirtualChannels).map { j =>
io.out_allocs(i)(j).alloc := in_alloc(i)(j)
io.out_allocs(i)(j).flow := in_flow
}
}
}
File VCAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.{DecodeLogic}
import constellation.channel._
import constellation.noc.{HasNoCParams}
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo, ChannelRoutingInfo}
class VCAllocReq(
val inParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])
(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams
with HasNoCParams {
val flow = new FlowRoutingBundle
val in_vc = UInt(log2Ceil(inParam.nVirtualChannels).W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class VCAllocResp(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
case class VCAllocatorParams(
routerParams: RouterParams,
inParams: Seq[ChannelParams],
outParams: Seq[ChannelParams],
ingressParams: Seq[IngressChannelParams],
egressParams: Seq[EgressChannelParams])
abstract class VCAllocator(val vP: VCAllocatorParams)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val routerParams = vP.routerParams
val inParams = vP.inParams
val outParams = vP.outParams
val ingressParams = vP.ingressParams
val egressParams = vP.egressParams
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u =>
Flipped(Decoupled(new VCAllocReq(u, outParams, egressParams)))
})
val resp = MixedVec(allInParams.map { u =>
Output(new VCAllocResp(outParams, egressParams))
})
val channel_status = MixedVec(allOutParams.map { u =>
Vec(u.nVirtualChannels, Input(new OutputChannelStatus)) })
val out_allocs = MixedVec(allOutParams.map { u =>
Vec(u.nVirtualChannels, Output(new OutputChannelAlloc)) })
})
val nOutChannels = allOutParams.map(_.nVirtualChannels).sum
def inputAllocPolicy(
flow: FlowRoutingBundle, vc_sel: MixedVec[Vec[Bool]],
inId: UInt, inVId: UInt, fire: Bool): MixedVec[Vec[Bool]]
def outputAllocPolicy(
out: ChannelRoutingInfo,
flows: Seq[FlowRoutingBundle], reqs: Seq[Bool], fire: Bool): Vec[Bool]
}
File ISLIP.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.random.{LFSR}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{ChannelRoutingInfo, FlowRoutingBundle}
trait ISLIP { this: VCAllocator =>
def islip(in: UInt, fire: Bool): UInt = {
val w = in.getWidth
if (w > 1) {
val mask = RegInit(0.U(w.W))
val full = Cat(in, in & ~mask)
val oh = PriorityEncoderOH(full)
val sel = (oh(w-1,0) | (oh >> w))
when (fire) {
mask := MuxCase(0.U, (0 until w).map { i =>
sel(i) -> ~(0.U((i+1).W))
})
}
sel
} else {
in
}
}
def inputAllocPolicy(flow: FlowRoutingBundle, vc_sel: MixedVec[Vec[Bool]], inId: UInt, inVId: UInt, fire: Bool) = {
islip(vc_sel.asUInt, fire).asTypeOf(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool())}))
}
def outputAllocPolicy(channel: ChannelRoutingInfo, flows: Seq[FlowRoutingBundle], reqs: Seq[Bool], fire: Bool) = {
islip(VecInit(reqs).asUInt, fire).asTypeOf(Vec(allInParams.size, Bool()))
}
}
class ISLIPMultiVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends MultiVCAllocator(vP)(p)
with ISLIP
class RotatingSingleVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends SingleVCAllocator(vP)(p)
with ISLIP
| module RotatingSingleVCAllocator_38( // @[ISLIP.scala:43:7]
input clock, // @[ISLIP.scala:43:7]
input reset, // @[ISLIP.scala:43:7]
output io_req_1_ready, // @[VCAllocator.scala:49:14]
input io_req_1_valid, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_1, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_4, // @[VCAllocator.scala:49:14]
output io_req_0_ready, // @[VCAllocator.scala:49:14]
input io_req_0_valid, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_1, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_4, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_1, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_4, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_1, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_4, // @[VCAllocator.scala:49:14]
input io_channel_status_1_1_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_4_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_1_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_4_occupied, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_1_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_4_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_1_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_4_alloc // @[VCAllocator.scala:49:14]
);
wire in_arb_vals_1; // @[SingleVCAllocator.scala:32:39]
wire in_arb_vals_0; // @[SingleVCAllocator.scala:32:39]
reg [1:0] mask; // @[SingleVCAllocator.scala:16:21]
wire [1:0] _in_arb_filter_T_3 = {in_arb_vals_1, in_arb_vals_0} & ~mask; // @[SingleVCAllocator.scala:16:21, :19:{57,84,86}, :32:39]
wire [3:0] in_arb_filter = _in_arb_filter_T_3[0] ? 4'h1 : _in_arb_filter_T_3[1] ? 4'h2 : in_arb_vals_0 ? 4'h4 : {in_arb_vals_1, 3'h0}; // @[OneHot.scala:85:71]
wire [1:0] in_arb_sel = in_arb_filter[1:0] | in_arb_filter[3:2]; // @[Mux.scala:50:70]
wire _GEN = in_arb_vals_0 | in_arb_vals_1; // @[package.scala:81:59]
wire in_arb_reqs_0_1_1 = io_req_0_bits_vc_sel_1_1 & ~io_channel_status_1_1_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_1_4 = io_req_0_bits_vc_sel_1_4 & ~io_channel_status_1_4_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_0 = io_req_0_valid & (in_arb_reqs_0_1_1 | in_arb_reqs_0_1_4); // @[package.scala:81:59]
wire in_arb_reqs_1_0_1 = io_req_1_bits_vc_sel_0_1 & ~io_channel_status_0_1_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_4 = io_req_1_bits_vc_sel_0_4 & ~io_channel_status_0_4_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_1 = io_req_1_valid & (in_arb_reqs_1_0_1 | in_arb_reqs_1_0_4); // @[package.scala:81:59]
wire _in_vc_sel_T_6 = in_arb_sel[1] & in_arb_reqs_1_0_1; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_15 = in_arb_sel[1] & in_arb_reqs_1_0_4; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_20 = in_arb_sel[0] & in_arb_reqs_0_1_1; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_29 = in_arb_sel[0] & in_arb_reqs_0_1_4; // @[Mux.scala:30:73, :32:36]
reg [9:0] mask_1; // @[ISLIP.scala:17:25]
wire [9:0] _full_T_1 = {_in_vc_sel_T_29, 2'h0, _in_vc_sel_T_20, 1'h0, _in_vc_sel_T_15, 2'h0, _in_vc_sel_T_6, 1'h0} & ~mask_1; // @[Mux.scala:30:73]
wire [19:0] oh = _full_T_1[0] ? 20'h1 : _full_T_1[1] ? 20'h2 : _full_T_1[2] ? 20'h4 : _full_T_1[3] ? 20'h8 : _full_T_1[4] ? 20'h10 : _full_T_1[5] ? 20'h20 : _full_T_1[6] ? 20'h40 : _full_T_1[7] ? 20'h80 : _full_T_1[8] ? 20'h100 : _full_T_1[9] ? 20'h200 : _in_vc_sel_T_6 ? 20'h800 : _in_vc_sel_T_15 ? 20'h4000 : _in_vc_sel_T_20 ? 20'h10000 : {_in_vc_sel_T_29, 19'h0}; // @[OneHot.scala:85:71]
wire [9:0] sel = oh[9:0] | oh[19:10]; // @[Mux.scala:50:70]
wire in_alloc_1_1 = _GEN & sel[6]; // @[package.scala:81:59]
wire in_alloc_1_4 = _GEN & sel[9]; // @[package.scala:81:59]
wire in_alloc_0_1 = _GEN & sel[1]; // @[package.scala:81:59]
wire in_alloc_0_4 = _GEN & sel[4]; // @[package.scala:81:59] |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLBuffer_a32d64s6k1z4u( // @[Buffer.scala:40:9]
input clock, // @[Buffer.scala:40:9]
input reset, // @[Buffer.scala:40:9]
output auto_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_user_amba_prot_bufferable, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_user_amba_prot_modifiable, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_user_amba_prot_readalloc, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_user_amba_prot_writealloc, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_user_amba_prot_privileged, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_user_amba_prot_secure, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_user_amba_prot_fetch, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_user_amba_prot_bufferable, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_user_amba_prot_modifiable, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_user_amba_prot_readalloc, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_user_amba_prot_writealloc, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_user_amba_prot_privileged, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_user_amba_prot_secure, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_user_amba_prot_fetch, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire auto_in_a_valid_0 = auto_in_a_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_opcode_0 = auto_in_a_bits_opcode; // @[Buffer.scala:40:9]
wire [2:0] auto_in_a_bits_param_0 = auto_in_a_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_in_a_bits_size_0 = auto_in_a_bits_size; // @[Buffer.scala:40:9]
wire [5:0] auto_in_a_bits_source_0 = auto_in_a_bits_source; // @[Buffer.scala:40:9]
wire [31:0] auto_in_a_bits_address_0 = auto_in_a_bits_address; // @[Buffer.scala:40:9]
wire auto_in_a_bits_user_amba_prot_bufferable_0 = auto_in_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
wire auto_in_a_bits_user_amba_prot_modifiable_0 = auto_in_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
wire auto_in_a_bits_user_amba_prot_readalloc_0 = auto_in_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
wire auto_in_a_bits_user_amba_prot_writealloc_0 = auto_in_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
wire auto_in_a_bits_user_amba_prot_privileged_0 = auto_in_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
wire auto_in_a_bits_user_amba_prot_secure_0 = auto_in_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
wire auto_in_a_bits_user_amba_prot_fetch_0 = auto_in_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
wire [7:0] auto_in_a_bits_mask_0 = auto_in_a_bits_mask; // @[Buffer.scala:40:9]
wire [63:0] auto_in_a_bits_data_0 = auto_in_a_bits_data; // @[Buffer.scala:40:9]
wire auto_in_a_bits_corrupt_0 = auto_in_a_bits_corrupt; // @[Buffer.scala:40:9]
wire auto_in_d_ready_0 = auto_in_d_ready; // @[Buffer.scala:40:9]
wire auto_out_a_ready_0 = auto_out_a_ready; // @[Buffer.scala:40:9]
wire auto_out_d_valid_0 = auto_out_d_valid; // @[Buffer.scala:40:9]
wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[Buffer.scala:40:9]
wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[Buffer.scala:40:9]
wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[Buffer.scala:40:9]
wire [5:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[Buffer.scala:40:9]
wire auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[Buffer.scala:40:9]
wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[Buffer.scala:40:9]
wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[Buffer.scala:40:9]
wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[Buffer.scala:40:9]
wire nodeIn_a_ready; // @[MixedNode.scala:551:17]
wire nodeIn_a_valid = auto_in_a_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_opcode = auto_in_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] nodeIn_a_bits_param = auto_in_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeIn_a_bits_size = auto_in_a_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] nodeIn_a_bits_source = auto_in_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] nodeIn_a_bits_address = auto_in_a_bits_address_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_user_amba_prot_bufferable = auto_in_a_bits_user_amba_prot_bufferable_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_user_amba_prot_modifiable = auto_in_a_bits_user_amba_prot_modifiable_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_user_amba_prot_readalloc = auto_in_a_bits_user_amba_prot_readalloc_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_user_amba_prot_writealloc = auto_in_a_bits_user_amba_prot_writealloc_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_user_amba_prot_privileged = auto_in_a_bits_user_amba_prot_privileged_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_user_amba_prot_secure = auto_in_a_bits_user_amba_prot_secure_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_user_amba_prot_fetch = auto_in_a_bits_user_amba_prot_fetch_0; // @[Buffer.scala:40:9]
wire [7:0] nodeIn_a_bits_mask = auto_in_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] nodeIn_a_bits_data = auto_in_a_bits_data_0; // @[Buffer.scala:40:9]
wire nodeIn_a_bits_corrupt = auto_in_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire nodeIn_d_ready = auto_in_d_ready_0; // @[Buffer.scala:40:9]
wire nodeIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] nodeIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] nodeIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] nodeIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [5:0] nodeIn_d_bits_source; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] nodeIn_d_bits_data; // @[MixedNode.scala:551:17]
wire nodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire nodeOut_a_ready = auto_out_a_ready_0; // @[Buffer.scala:40:9]
wire nodeOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [5:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_user_amba_prot_bufferable; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_user_amba_prot_modifiable; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_user_amba_prot_readalloc; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_user_amba_prot_writealloc; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_user_amba_prot_privileged; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_user_amba_prot_secure; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_user_amba_prot_fetch; // @[MixedNode.scala:542:17]
wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17]
wire nodeOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire nodeOut_d_ready; // @[MixedNode.scala:542:17]
wire nodeOut_d_valid = auto_out_d_valid_0; // @[Buffer.scala:40:9]
wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[Buffer.scala:40:9]
wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_a_ready_0; // @[Buffer.scala:40:9]
wire [2:0] auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
wire [1:0] auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
wire [63:0] auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
wire auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_in_d_valid_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_user_amba_prot_bufferable_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_user_amba_prot_modifiable_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_user_amba_prot_readalloc_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_user_amba_prot_writealloc_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_user_amba_prot_privileged_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_user_amba_prot_secure_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_user_amba_prot_fetch_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
wire [2:0] auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
wire [3:0] auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
wire [5:0] auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
wire [31:0] auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
wire [7:0] auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
wire [63:0] auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
wire auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
wire auto_out_a_valid_0; // @[Buffer.scala:40:9]
wire auto_out_d_ready_0; // @[Buffer.scala:40:9]
assign auto_in_a_ready_0 = nodeIn_a_ready; // @[Buffer.scala:40:9]
assign auto_in_d_valid_0 = nodeIn_d_valid; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode_0 = nodeIn_d_bits_opcode; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param_0 = nodeIn_d_bits_param; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size_0 = nodeIn_d_bits_size; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source_0 = nodeIn_d_bits_source; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink_0 = nodeIn_d_bits_sink; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied_0 = nodeIn_d_bits_denied; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data_0 = nodeIn_d_bits_data; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt_0 = nodeIn_d_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_a_valid_0 = nodeOut_a_valid; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_bufferable_0 = nodeOut_a_bits_user_amba_prot_bufferable; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_modifiable_0 = nodeOut_a_bits_user_amba_prot_modifiable; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_readalloc_0 = nodeOut_a_bits_user_amba_prot_readalloc; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_writealloc_0 = nodeOut_a_bits_user_amba_prot_writealloc; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_privileged_0 = nodeOut_a_bits_user_amba_prot_privileged; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_secure_0 = nodeOut_a_bits_user_amba_prot_secure; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_fetch_0 = nodeOut_a_bits_user_amba_prot_fetch; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt_0 = nodeOut_a_bits_corrupt; // @[Buffer.scala:40:9]
assign auto_out_d_ready_0 = nodeOut_d_ready; // @[Buffer.scala:40:9]
TLMonitor_16 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (nodeIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_user_amba_prot_bufferable (nodeIn_a_bits_user_amba_prot_bufferable), // @[MixedNode.scala:551:17]
.io_in_a_bits_user_amba_prot_modifiable (nodeIn_a_bits_user_amba_prot_modifiable), // @[MixedNode.scala:551:17]
.io_in_a_bits_user_amba_prot_readalloc (nodeIn_a_bits_user_amba_prot_readalloc), // @[MixedNode.scala:551:17]
.io_in_a_bits_user_amba_prot_writealloc (nodeIn_a_bits_user_amba_prot_writealloc), // @[MixedNode.scala:551:17]
.io_in_a_bits_user_amba_prot_privileged (nodeIn_a_bits_user_amba_prot_privileged), // @[MixedNode.scala:551:17]
.io_in_a_bits_user_amba_prot_secure (nodeIn_a_bits_user_amba_prot_secure), // @[MixedNode.scala:551:17]
.io_in_a_bits_user_amba_prot_fetch (nodeIn_a_bits_user_amba_prot_fetch), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (nodeIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (nodeIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (nodeIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (nodeIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (nodeIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (nodeIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (nodeIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (nodeIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (nodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Queue2_TLBundleA_a32d64s6k1z4u nodeOut_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeIn_a_ready),
.io_enq_valid (nodeIn_a_valid), // @[MixedNode.scala:551:17]
.io_enq_bits_opcode (nodeIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_enq_bits_param (nodeIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_enq_bits_size (nodeIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_enq_bits_source (nodeIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_enq_bits_address (nodeIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_enq_bits_user_amba_prot_bufferable (nodeIn_a_bits_user_amba_prot_bufferable), // @[MixedNode.scala:551:17]
.io_enq_bits_user_amba_prot_modifiable (nodeIn_a_bits_user_amba_prot_modifiable), // @[MixedNode.scala:551:17]
.io_enq_bits_user_amba_prot_readalloc (nodeIn_a_bits_user_amba_prot_readalloc), // @[MixedNode.scala:551:17]
.io_enq_bits_user_amba_prot_writealloc (nodeIn_a_bits_user_amba_prot_writealloc), // @[MixedNode.scala:551:17]
.io_enq_bits_user_amba_prot_privileged (nodeIn_a_bits_user_amba_prot_privileged), // @[MixedNode.scala:551:17]
.io_enq_bits_user_amba_prot_secure (nodeIn_a_bits_user_amba_prot_secure), // @[MixedNode.scala:551:17]
.io_enq_bits_user_amba_prot_fetch (nodeIn_a_bits_user_amba_prot_fetch), // @[MixedNode.scala:551:17]
.io_enq_bits_mask (nodeIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_enq_bits_data (nodeIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_enq_bits_corrupt (nodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_deq_ready (nodeOut_a_ready), // @[MixedNode.scala:542:17]
.io_deq_valid (nodeOut_a_valid),
.io_deq_bits_opcode (nodeOut_a_bits_opcode),
.io_deq_bits_param (nodeOut_a_bits_param),
.io_deq_bits_size (nodeOut_a_bits_size),
.io_deq_bits_source (nodeOut_a_bits_source),
.io_deq_bits_address (nodeOut_a_bits_address),
.io_deq_bits_user_amba_prot_bufferable (nodeOut_a_bits_user_amba_prot_bufferable),
.io_deq_bits_user_amba_prot_modifiable (nodeOut_a_bits_user_amba_prot_modifiable),
.io_deq_bits_user_amba_prot_readalloc (nodeOut_a_bits_user_amba_prot_readalloc),
.io_deq_bits_user_amba_prot_writealloc (nodeOut_a_bits_user_amba_prot_writealloc),
.io_deq_bits_user_amba_prot_privileged (nodeOut_a_bits_user_amba_prot_privileged),
.io_deq_bits_user_amba_prot_secure (nodeOut_a_bits_user_amba_prot_secure),
.io_deq_bits_user_amba_prot_fetch (nodeOut_a_bits_user_amba_prot_fetch),
.io_deq_bits_mask (nodeOut_a_bits_mask),
.io_deq_bits_data (nodeOut_a_bits_data),
.io_deq_bits_corrupt (nodeOut_a_bits_corrupt)
); // @[Decoupled.scala:362:21]
Queue2_TLBundleD_a32d64s6k1z4u nodeIn_d_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (nodeOut_d_ready),
.io_enq_valid (nodeOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (nodeOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (nodeOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (nodeOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (nodeOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (nodeOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (nodeOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (nodeOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (nodeOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (nodeIn_d_ready), // @[MixedNode.scala:551:17]
.io_deq_valid (nodeIn_d_valid),
.io_deq_bits_opcode (nodeIn_d_bits_opcode),
.io_deq_bits_param (nodeIn_d_bits_param),
.io_deq_bits_size (nodeIn_d_bits_size),
.io_deq_bits_source (nodeIn_d_bits_source),
.io_deq_bits_sink (nodeIn_d_bits_sink),
.io_deq_bits_denied (nodeIn_d_bits_denied),
.io_deq_bits_data (nodeIn_d_bits_data),
.io_deq_bits_corrupt (nodeIn_d_bits_corrupt)
); // @[Decoupled.scala:362:21]
assign auto_in_a_ready = auto_in_a_ready_0; // @[Buffer.scala:40:9]
assign auto_in_d_valid = auto_in_d_valid_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_opcode = auto_in_d_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_param = auto_in_d_bits_param_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_size = auto_in_d_bits_size_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_source = auto_in_d_bits_source_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_sink = auto_in_d_bits_sink_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_denied = auto_in_d_bits_denied_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_data = auto_in_d_bits_data_0; // @[Buffer.scala:40:9]
assign auto_in_d_bits_corrupt = auto_in_d_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_a_valid = auto_out_a_valid_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_opcode = auto_out_a_bits_opcode_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_param = auto_out_a_bits_param_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_size = auto_out_a_bits_size_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_source = auto_out_a_bits_source_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_address = auto_out_a_bits_address_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_bufferable = auto_out_a_bits_user_amba_prot_bufferable_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_modifiable = auto_out_a_bits_user_amba_prot_modifiable_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_readalloc = auto_out_a_bits_user_amba_prot_readalloc_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_writealloc = auto_out_a_bits_user_amba_prot_writealloc_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_privileged = auto_out_a_bits_user_amba_prot_privileged_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_secure = auto_out_a_bits_user_amba_prot_secure_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_user_amba_prot_fetch = auto_out_a_bits_user_amba_prot_fetch_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_mask = auto_out_a_bits_mask_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_data = auto_out_a_bits_data_0; // @[Buffer.scala:40:9]
assign auto_out_a_bits_corrupt = auto_out_a_bits_corrupt_0; // @[Buffer.scala:40:9]
assign auto_out_d_ready = auto_out_d_ready_0; // @[Buffer.scala:40:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File Nodes.scala:
package constellation.channel
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Parameters, Field}
import freechips.rocketchip.diplomacy._
case class EmptyParams()
case class ChannelEdgeParams(cp: ChannelParams, p: Parameters)
object ChannelImp extends SimpleNodeImp[EmptyParams, ChannelParams, ChannelEdgeParams, Channel] {
def edge(pd: EmptyParams, pu: ChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
ChannelEdgeParams(pu, p)
}
def bundle(e: ChannelEdgeParams) = new Channel(e.cp)(e.p)
def render(e: ChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#0000ff", label = e.cp.payloadBits.toString)
}
override def monitor(bundle: Channel, edge: ChannelEdgeParams): Unit = {
val monitor = Module(new NoCMonitor(edge.cp)(edge.p))
monitor.io.in := bundle
}
// TODO: Add nodepath stuff? override def mixO, override def mixI
}
case class ChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(ChannelImp)(Seq(EmptyParams()))
case class ChannelDestNode(val destParams: ChannelParams)(implicit valName: ValName) extends SinkNode(ChannelImp)(Seq(destParams))
case class ChannelAdapterNode(
slaveFn: ChannelParams => ChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(ChannelImp)((e: EmptyParams) => e, slaveFn)
case class ChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(ChannelImp)()
case class ChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(ChannelImp)()
case class IngressChannelEdgeParams(cp: IngressChannelParams, p: Parameters)
case class EgressChannelEdgeParams(cp: EgressChannelParams, p: Parameters)
object IngressChannelImp extends SimpleNodeImp[EmptyParams, IngressChannelParams, IngressChannelEdgeParams, IngressChannel] {
def edge(pd: EmptyParams, pu: IngressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
IngressChannelEdgeParams(pu, p)
}
def bundle(e: IngressChannelEdgeParams) = new IngressChannel(e.cp)(e.p)
def render(e: IngressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#00ff00", label = e.cp.payloadBits.toString)
}
}
object EgressChannelImp extends SimpleNodeImp[EmptyParams, EgressChannelParams, EgressChannelEdgeParams, EgressChannel] {
def edge(pd: EmptyParams, pu: EgressChannelParams, p: Parameters, sourceInfo: SourceInfo) = {
EgressChannelEdgeParams(pu, p)
}
def bundle(e: EgressChannelEdgeParams) = new EgressChannel(e.cp)(e.p)
def render(e: EgressChannelEdgeParams) = if (e.cp.possibleFlows.size == 0) {
RenderedEdge(colour = "ffffff", label = "X")
} else {
RenderedEdge(colour = "#ff0000", label = e.cp.payloadBits.toString)
}
}
case class IngressChannelSourceNode(val destId: Int)(implicit valName: ValName) extends SourceNode(IngressChannelImp)(Seq(EmptyParams()))
case class IngressChannelDestNode(val destParams: IngressChannelParams)(implicit valName: ValName) extends SinkNode(IngressChannelImp)(Seq(destParams))
case class EgressChannelSourceNode(val egressId: Int)(implicit valName: ValName) extends SourceNode(EgressChannelImp)(Seq(EmptyParams()))
case class EgressChannelDestNode(val destParams: EgressChannelParams)(implicit valName: ValName) extends SinkNode(EgressChannelImp)(Seq(destParams))
case class IngressChannelAdapterNode(
slaveFn: IngressChannelParams => IngressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(IngressChannelImp)(m => m, slaveFn)
case class EgressChannelAdapterNode(
slaveFn: EgressChannelParams => EgressChannelParams = { d => d })(
implicit valName: ValName) extends AdapterNode(EgressChannelImp)(m => m, slaveFn)
case class IngressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(IngressChannelImp)()
case class EgressChannelIdentityNode()(implicit valName: ValName) extends IdentityNode(EgressChannelImp)()
case class IngressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(IngressChannelImp)()
case class EgressChannelEphemeralNode()(implicit valName: ValName) extends EphemeralNode(EgressChannelImp)()
File Router.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{RoutingRelation}
import constellation.noc.{HasNoCParams}
case class UserRouterParams(
// Payload width. Must match payload width on all channels attached to this routing node
payloadBits: Int = 64,
// Combines SA and ST stages (removes pipeline register)
combineSAST: Boolean = false,
// Combines RC and VA stages (removes pipeline register)
combineRCVA: Boolean = false,
// Adds combinational path from SA to VA
coupleSAVA: Boolean = false,
vcAllocator: VCAllocatorParams => Parameters => VCAllocator = (vP) => (p) => new RotatingSingleVCAllocator(vP)(p)
)
case class RouterParams(
nodeId: Int,
nIngress: Int,
nEgress: Int,
user: UserRouterParams
)
trait HasRouterOutputParams {
def outParams: Seq[ChannelParams]
def egressParams: Seq[EgressChannelParams]
def allOutParams = outParams ++ egressParams
def nOutputs = outParams.size
def nEgress = egressParams.size
def nAllOutputs = allOutParams.size
}
trait HasRouterInputParams {
def inParams: Seq[ChannelParams]
def ingressParams: Seq[IngressChannelParams]
def allInParams = inParams ++ ingressParams
def nInputs = inParams.size
def nIngress = ingressParams.size
def nAllInputs = allInParams.size
}
trait HasRouterParams
{
def routerParams: RouterParams
def nodeId = routerParams.nodeId
def payloadBits = routerParams.user.payloadBits
}
class DebugBundle(val nIn: Int) extends Bundle {
val va_stall = Vec(nIn, UInt())
val sa_stall = Vec(nIn, UInt())
}
class Router(
val routerParams: RouterParams,
preDiplomaticInParams: Seq[ChannelParams],
preDiplomaticIngressParams: Seq[IngressChannelParams],
outDests: Seq[Int],
egressIds: Seq[Int]
)(implicit p: Parameters) extends LazyModule with HasNoCParams with HasRouterParams {
val allPreDiplomaticInParams = preDiplomaticInParams ++ preDiplomaticIngressParams
val destNodes = preDiplomaticInParams.map(u => ChannelDestNode(u))
val sourceNodes = outDests.map(u => ChannelSourceNode(u))
val ingressNodes = preDiplomaticIngressParams.map(u => IngressChannelDestNode(u))
val egressNodes = egressIds.map(u => EgressChannelSourceNode(u))
val debugNode = BundleBridgeSource(() => new DebugBundle(allPreDiplomaticInParams.size))
val ctrlNode = if (hasCtrl) Some(BundleBridgeSource(() => new RouterCtrlBundle)) else None
def inParams = module.inParams
def outParams = module.outParams
def ingressParams = module.ingressParams
def egressParams = module.egressParams
lazy val module = new LazyModuleImp(this) with HasRouterInputParams with HasRouterOutputParams {
val (io_in, edgesIn) = destNodes.map(_.in(0)).unzip
val (io_out, edgesOut) = sourceNodes.map(_.out(0)).unzip
val (io_ingress, edgesIngress) = ingressNodes.map(_.in(0)).unzip
val (io_egress, edgesEgress) = egressNodes.map(_.out(0)).unzip
val io_debug = debugNode.out(0)._1
val inParams = edgesIn.map(_.cp)
val outParams = edgesOut.map(_.cp)
val ingressParams = edgesIngress.map(_.cp)
val egressParams = edgesEgress.map(_.cp)
allOutParams.foreach(u => require(u.srcId == nodeId && u.payloadBits == routerParams.user.payloadBits))
allInParams.foreach(u => require(u.destId == nodeId && u.payloadBits == routerParams.user.payloadBits))
require(nIngress == routerParams.nIngress)
require(nEgress == routerParams.nEgress)
require(nAllInputs >= 1)
require(nAllOutputs >= 1)
require(nodeId < (1 << nodeIdBits))
val input_units = inParams.zipWithIndex.map { case (u,i) =>
Module(new InputUnit(u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"input_unit_${i}_from_${u.srcId}") }
val ingress_units = ingressParams.zipWithIndex.map { case (u,i) =>
Module(new IngressUnit(i, u, outParams, egressParams,
routerParams.user.combineRCVA, routerParams.user.combineSAST))
.suggestName(s"ingress_unit_${i+nInputs}_from_${u.ingressId}") }
val all_input_units = input_units ++ ingress_units
val output_units = outParams.zipWithIndex.map { case (u,i) =>
Module(new OutputUnit(inParams, ingressParams, u))
.suggestName(s"output_unit_${i}_to_${u.destId}")}
val egress_units = egressParams.zipWithIndex.map { case (u,i) =>
Module(new EgressUnit(routerParams.user.coupleSAVA && all_input_units.size == 1,
routerParams.user.combineSAST,
inParams, ingressParams, u))
.suggestName(s"egress_unit_${i+nOutputs}_to_${u.egressId}")}
val all_output_units = output_units ++ egress_units
val switch = Module(new Switch(routerParams, inParams, outParams, ingressParams, egressParams))
val switch_allocator = Module(new SwitchAllocator(routerParams, inParams, outParams, ingressParams, egressParams))
val vc_allocator = Module(routerParams.user.vcAllocator(
VCAllocatorParams(routerParams, inParams, outParams, ingressParams, egressParams)
)(p))
val route_computer = Module(new RouteComputer(routerParams, inParams, outParams, ingressParams, egressParams))
val fires_count = WireInit(PopCount(vc_allocator.io.req.map(_.fire)))
dontTouch(fires_count)
(io_in zip input_units ).foreach { case (i,u) => u.io.in <> i }
(io_ingress zip ingress_units).foreach { case (i,u) => u.io.in <> i.flit }
(output_units zip io_out ).foreach { case (u,o) => o <> u.io.out }
(egress_units zip io_egress).foreach { case (u,o) => o.flit <> u.io.out }
(route_computer.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.router_req }
(all_input_units zip route_computer.io.resp).foreach {
case (u,o) => u.io.router_resp <> o }
(vc_allocator.io.req zip all_input_units).foreach {
case (i,u) => i <> u.io.vcalloc_req }
(all_input_units zip vc_allocator.io.resp).foreach {
case (u,o) => u.io.vcalloc_resp <> o }
(all_output_units zip vc_allocator.io.out_allocs).foreach {
case (u,a) => u.io.allocs <> a }
(vc_allocator.io.channel_status zip all_output_units).foreach {
case (a,u) => a := u.io.channel_status }
all_input_units.foreach(in => all_output_units.zipWithIndex.foreach { case (out,outIdx) =>
in.io.out_credit_available(outIdx) := out.io.credit_available
})
(all_input_units zip switch_allocator.io.req).foreach {
case (u,r) => r <> u.io.salloc_req }
(all_output_units zip switch_allocator.io.credit_alloc).foreach {
case (u,a) => u.io.credit_alloc := a }
(switch.io.in zip all_input_units).foreach {
case (i,u) => i <> u.io.out }
(all_output_units zip switch.io.out).foreach {
case (u,o) => u.io.in <> o }
switch.io.sel := (if (routerParams.user.combineSAST) {
switch_allocator.io.switch_sel
} else {
RegNext(switch_allocator.io.switch_sel)
})
if (hasCtrl) {
val io_ctrl = ctrlNode.get.out(0)._1
val ctrl = Module(new RouterControlUnit(routerParams, inParams, outParams, ingressParams, egressParams))
io_ctrl <> ctrl.io.ctrl
(all_input_units zip ctrl.io.in_block ).foreach { case (l,r) => l.io.block := r }
(all_input_units zip ctrl.io.in_fire ).foreach { case (l,r) => r := l.io.out.map(_.valid) }
} else {
input_units.foreach(_.io.block := false.B)
ingress_units.foreach(_.io.block := false.B)
}
(io_debug.va_stall zip all_input_units.map(_.io.debug.va_stall)).map { case (l,r) => l := r }
(io_debug.sa_stall zip all_input_units.map(_.io.debug.sa_stall)).map { case (l,r) => l := r }
val debug_tsc = RegInit(0.U(64.W))
debug_tsc := debug_tsc + 1.U
val debug_sample = RegInit(0.U(64.W))
debug_sample := debug_sample + 1.U
val sample_rate = PlusArg("noc_util_sample_rate", width=20)
when (debug_sample === sample_rate - 1.U) { debug_sample := 0.U }
def sample(fire: Bool, s: String) = {
val util_ctr = RegInit(0.U(64.W))
val fired = RegInit(false.B)
util_ctr := util_ctr + fire
fired := fired || fire
when (sample_rate =/= 0.U && debug_sample === sample_rate - 1.U && fired) {
val fmtStr = s"nocsample %d $s %d\n"
printf(fmtStr, debug_tsc, util_ctr);
fired := fire
}
}
destNodes.map(_.in(0)).foreach { case (in, edge) => in.flit.map { f =>
sample(f.fire, s"${edge.cp.srcId} $nodeId")
} }
ingressNodes.map(_.in(0)).foreach { case (in, edge) =>
sample(in.flit.fire, s"i${edge.cp.asInstanceOf[IngressChannelParams].ingressId} $nodeId")
}
egressNodes.map(_.out(0)).foreach { case (out, edge) =>
sample(out.flit.fire, s"$nodeId e${edge.cp.asInstanceOf[EgressChannelParams].egressId}")
}
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
| module Router_57( // @[Router.scala:89:25]
input clock, // @[Router.scala:89:25]
input reset, // @[Router.scala:89:25]
output [3:0] auto_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_va_stall_1, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_1, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_source_nodes_out_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_2_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_2_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_source_nodes_out_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_1_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_1_vc_free, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
output auto_source_nodes_out_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
output [72:0] auto_source_nodes_out_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_source_nodes_out_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_source_nodes_out_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_source_nodes_out_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_0_credit_return, // @[LazyModuleImp.scala:107:25]
input [9:0] auto_source_nodes_out_0_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_2_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_dest_nodes_in_2_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_2_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_2_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_2_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_2_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_2_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_2_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_1_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_dest_nodes_in_1_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_1_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_1_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_1_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_1_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_1_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_1_vc_free, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_valid, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_bits_head, // @[LazyModuleImp.scala:107:25]
input auto_dest_nodes_in_0_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25]
input [72:0] auto_dest_nodes_in_0_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_0_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_dest_nodes_in_0_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_dest_nodes_in_0_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_dest_nodes_in_0_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_0_credit_return, // @[LazyModuleImp.scala:107:25]
output [9:0] auto_dest_nodes_in_0_vc_free // @[LazyModuleImp.scala:107:25]
);
wire [19:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire _route_computer_io_resp_2_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_3; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_4; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_5; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_6; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_7; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_8; // @[Router.scala:136:32]
wire _route_computer_io_resp_2_vc_sel_0_9; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_2; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_3; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_4; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_5; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_6; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_7; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_8; // @[Router.scala:136:32]
wire _route_computer_io_resp_1_vc_sel_0_9; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_2_9; // @[Router.scala:136:32]
wire _route_computer_io_resp_0_vc_sel_1_9; // @[Router.scala:136:32]
wire _vc_allocator_io_req_2_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_1_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_req_0_ready; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_2_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_2; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_3; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_4; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_5; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_6; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_7; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_8; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_1_vc_sel_0_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_2_9; // @[Router.scala:133:30]
wire _vc_allocator_io_resp_0_vc_sel_1_9; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_2_9_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_1_9_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_2_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_3_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_4_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_5_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_6_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_7_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_8_alloc; // @[Router.scala:133:30]
wire _vc_allocator_io_out_allocs_0_9_alloc; // @[Router.scala:133:30]
wire _switch_allocator_io_req_2_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_1_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_req_0_0_ready; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_2_9_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_1_9_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_2_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_3_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_4_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_5_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_6_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_7_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_8_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_credit_alloc_0_9_alloc; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_2_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_1_0_0_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_2_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_1_0; // @[Router.scala:132:34]
wire _switch_allocator_io_switch_sel_0_0_0_0; // @[Router.scala:132:34]
wire _switch_io_out_2_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_2_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_2_0_bits_payload; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_2_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_2_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_2_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_2_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _switch_io_out_1_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_1_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_1_0_bits_payload; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_1_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_1_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_1_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_1_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _switch_io_out_0_0_valid; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_head; // @[Router.scala:131:24]
wire _switch_io_out_0_0_bits_tail; // @[Router.scala:131:24]
wire [72:0] _switch_io_out_0_0_bits_payload; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_vnet_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_ingress_node; // @[Router.scala:131:24]
wire [1:0] _switch_io_out_0_0_bits_flow_ingress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_flow_egress_node; // @[Router.scala:131:24]
wire [2:0] _switch_io_out_0_0_bits_flow_egress_node_id; // @[Router.scala:131:24]
wire [3:0] _switch_io_out_0_0_bits_virt_channel_id; // @[Router.scala:131:24]
wire _output_unit_2_to_11_io_credit_available_9; // @[Router.scala:122:13]
wire _output_unit_2_to_11_io_channel_status_9_occupied; // @[Router.scala:122:13]
wire _output_unit_1_to_9_io_credit_available_9; // @[Router.scala:122:13]
wire _output_unit_1_to_9_io_channel_status_9_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_2; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_3; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_4; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_5; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_6; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_7; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_8; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_credit_available_9; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_2_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_3_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_4_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_5_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_6_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_7_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_8_occupied; // @[Router.scala:122:13]
wire _output_unit_0_to_2_io_channel_status_9_occupied; // @[Router.scala:122:13]
wire [3:0] _input_unit_2_from_11_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_11_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_11_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_11_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_11_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_11_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_3; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_4; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_5; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_6; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_7; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_8; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_2_9; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_3; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_4; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_5; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_6; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_7; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_8; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_1_9; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_2_from_11_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [72:0] _input_unit_2_from_11_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_11_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_11_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_2_from_11_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_11_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_2_from_11_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_2_from_11_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_9_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_9_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_9_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_9_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_9_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_9_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_3; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_4; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_5; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_6; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_7; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_8; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_vcalloc_req_bits_vc_sel_0_9; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_3; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_4; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_5; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_6; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_7; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_8; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_2_9; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_3; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_4; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_5; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_6; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_7; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_8; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_1_9; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_1_from_9_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [72:0] _input_unit_1_from_9_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_1_from_9_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_1_from_9_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_2_io_router_req_bits_src_virt_id; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_2_io_router_req_bits_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_2_io_router_req_bits_flow_ingress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_2_io_router_req_bits_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_2_io_router_req_bits_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_2_io_router_req_bits_flow_egress_node_id; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_vcalloc_req_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_vcalloc_req_bits_vc_sel_2_9; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_vcalloc_req_bits_vc_sel_1_9; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_2; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_3; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_4; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_5; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_6; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_7; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_8; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_2_9; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_2; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_3; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_4; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_5; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_6; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_7; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_8; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_1_9; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_2; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_3; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_4; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_5; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_6; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_7; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_8; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_vc_sel_0_9; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_salloc_req_0_bits_tail; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_out_0_valid; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_out_0_bits_flit_head; // @[Router.scala:112:13]
wire _input_unit_0_from_2_io_out_0_bits_flit_tail; // @[Router.scala:112:13]
wire [72:0] _input_unit_0_from_2_io_out_0_bits_flit_payload; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_2_io_out_0_bits_flit_flow_vnet_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_2_io_out_0_bits_flit_flow_ingress_node; // @[Router.scala:112:13]
wire [1:0] _input_unit_0_from_2_io_out_0_bits_flit_flow_ingress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_2_io_out_0_bits_flit_flow_egress_node; // @[Router.scala:112:13]
wire [2:0] _input_unit_0_from_2_io_out_0_bits_flit_flow_egress_node_id; // @[Router.scala:112:13]
wire [3:0] _input_unit_0_from_2_io_out_0_bits_out_virt_channel; // @[Router.scala:112:13]
wire [1:0] fires_count = {1'h0, _vc_allocator_io_req_0_ready & _input_unit_0_from_2_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_1_ready & _input_unit_1_from_9_io_vcalloc_req_valid} + {1'h0, _vc_allocator_io_req_2_ready & _input_unit_2_from_11_io_vcalloc_req_valid}; // @[Decoupled.scala:51:35]
reg REG_2_0_2_0; // @[Router.scala:178:14]
reg REG_2_0_1_0; // @[Router.scala:178:14]
reg REG_2_0_0_0; // @[Router.scala:178:14]
reg REG_1_0_2_0; // @[Router.scala:178:14]
reg REG_1_0_1_0; // @[Router.scala:178:14]
reg REG_1_0_0_0; // @[Router.scala:178:14]
reg REG_0_0_2_0; // @[Router.scala:178:14]
reg REG_0_0_1_0; // @[Router.scala:178:14]
reg REG_0_0_0_0; // @[Router.scala:178:14]
reg [63:0] debug_tsc; // @[Router.scala:195:28]
reg [63:0] debug_sample; // @[Router.scala:197:31]
wire _GEN = debug_sample == {44'h0, _plusarg_reader_out - 20'h1}; // @[PlusArg.scala:80:11]
reg [63:0] util_ctr; // @[Router.scala:203:29]
reg fired; // @[Router.scala:204:26]
wire _GEN_0 = (|_plusarg_reader_out) & _GEN; // @[PlusArg.scala:80:11]
wire _GEN_1 = _GEN_0 & fired; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_1; // @[Router.scala:203:29]
reg fired_1; // @[Router.scala:204:26]
wire _GEN_2 = _GEN_0 & fired_1; // @[Router.scala:204:26, :207:{33,71}]
reg [63:0] util_ctr_2; // @[Router.scala:203:29]
reg fired_2; // @[Router.scala:204:26]
wire _GEN_3 = _GEN_0 & fired_2; // @[Router.scala:204:26, :207:{33,71}] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_15( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [5:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [5:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input [2:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [4:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
reg in_flight_4; // @[Monitor.scala:16:26]
reg in_flight_5; // @[Monitor.scala:16:26]
reg in_flight_6; // @[Monitor.scala:16:26]
reg in_flight_7; // @[Monitor.scala:16:26]
reg in_flight_8; // @[Monitor.scala:16:26]
reg in_flight_9; // @[Monitor.scala:16:26]
reg in_flight_10; // @[Monitor.scala:16:26]
reg in_flight_11; // @[Monitor.scala:16:26]
reg in_flight_12; // @[Monitor.scala:16:26]
reg in_flight_13; // @[Monitor.scala:16:26]
reg in_flight_14; // @[Monitor.scala:16:26]
reg in_flight_15; // @[Monitor.scala:16:26]
reg in_flight_16; // @[Monitor.scala:16:26]
reg in_flight_17; // @[Monitor.scala:16:26]
reg in_flight_18; // @[Monitor.scala:16:26]
reg in_flight_19; // @[Monitor.scala:16:26]
reg in_flight_20; // @[Monitor.scala:16:26]
reg in_flight_21; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 5'h0; // @[Monitor.scala:21:46]
wire _GEN_0 = io_in_flit_0_bits_virt_channel_id == 5'h1; // @[Monitor.scala:21:46]
wire _GEN_1 = io_in_flit_0_bits_virt_channel_id == 5'h2; // @[Monitor.scala:21:46]
wire _GEN_2 = io_in_flit_0_bits_virt_channel_id == 5'h3; // @[Monitor.scala:21:46]
wire _GEN_3 = io_in_flit_0_bits_virt_channel_id == 5'h4; // @[Monitor.scala:21:46]
wire _GEN_4 = io_in_flit_0_bits_virt_channel_id == 5'h5; // @[Monitor.scala:21:46]
wire _GEN_5 = io_in_flit_0_bits_virt_channel_id == 5'h6; // @[Monitor.scala:21:46]
wire _GEN_6 = io_in_flit_0_bits_virt_channel_id == 5'h7; // @[Monitor.scala:21:46]
wire _GEN_7 = io_in_flit_0_bits_virt_channel_id == 5'hA; // @[Monitor.scala:21:46]
wire _GEN_8 = io_in_flit_0_bits_virt_channel_id == 5'hB; // @[Monitor.scala:21:46]
wire _GEN_9 = io_in_flit_0_bits_virt_channel_id == 5'hE; // @[Monitor.scala:21:46]
wire _GEN_10 = io_in_flit_0_bits_virt_channel_id == 5'hF; // @[Monitor.scala:21:46]
wire _GEN_11 = io_in_flit_0_bits_virt_channel_id == 5'h12; // @[Monitor.scala:21:46]
wire _GEN_12 = io_in_flit_0_bits_virt_channel_id == 5'h13; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_413( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File rob.scala:
//******************************************************************************
// Copyright (c) 2013 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Re-order Buffer
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Bank the ROB, such that each "dispatch" group gets its own row of the ROB,
// and each instruction in the dispatch group goes to a different bank.
// We can compress out the PC by only saving the high-order bits!
//
// ASSUMPTIONS:
// - dispatch groups are aligned to the PC.
//
// NOTES:
// - Currently we do not compress out bubbles in the ROB.
// - Exceptions are only taken when at the head of the commit bundle --
// this helps deal with loads, stores, and refetch instructions.
package boom.v3.exu
import scala.math.ceil
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
import boom.v3.common._
import boom.v3.util._
/**
* IO bundle to interact with the ROB
*
* @param numWakeupPorts number of wakeup ports to the rob
* @param numFpuPorts number of fpu ports that will write back fflags
*/
class RobIo(
val numWakeupPorts: Int,
val numFpuPorts: Int
)(implicit p: Parameters) extends BoomBundle
{
// Decode Stage
// (Allocate, write instruction to ROB).
val enq_valids = Input(Vec(coreWidth, Bool()))
val enq_uops = Input(Vec(coreWidth, new MicroOp()))
val enq_partial_stall= Input(Bool()) // we're dispatching only a partial packet,
// and stalling on the rest of it (don't
// advance the tail ptr)
val xcpt_fetch_pc = Input(UInt(vaddrBitsExtended.W))
val rob_tail_idx = Output(UInt(robAddrSz.W))
val rob_pnr_idx = Output(UInt(robAddrSz.W))
val rob_head_idx = Output(UInt(robAddrSz.W))
// Handle Branch Misspeculations
val brupdate = Input(new BrUpdateInfo())
// Write-back Stage
// (Update of ROB)
// Instruction is no longer busy and can be committed
val wb_resps = Flipped(Vec(numWakeupPorts, Valid(new ExeUnitResp(xLen max fLen+1))))
// Unbusying ports for stores.
// +1 for fpstdata
val lsu_clr_bsy = Input(Vec(memWidth + 1, Valid(UInt(robAddrSz.W))))
// Port for unmarking loads/stores as speculation hazards..
val lsu_clr_unsafe = Input(Vec(memWidth, Valid(UInt(robAddrSz.W))))
// Track side-effects for debug purposes.
// Also need to know when loads write back, whereas we don't need loads to unbusy.
val debug_wb_valids = Input(Vec(numWakeupPorts, Bool()))
val debug_wb_wdata = Input(Vec(numWakeupPorts, Bits(xLen.W)))
val fflags = Flipped(Vec(numFpuPorts, new ValidIO(new FFlagsResp())))
val lxcpt = Input(Valid(new Exception())) // LSU
val csr_replay = Input(Valid(new Exception()))
// Commit stage (free resources; also used for rollback).
val commit = Output(new CommitSignals())
// tell the LSU that the head of the ROB is a load
// (some loads can only execute once they are at the head of the ROB).
val com_load_is_at_rob_head = Output(Bool())
// Communicate exceptions to the CSRFile
val com_xcpt = Valid(new CommitExceptionSignals())
// Let the CSRFile stall us (e.g., wfi).
val csr_stall = Input(Bool())
// Flush signals (including exceptions, pipeline replays, and memory ordering failures)
// to send to the frontend for redirection.
val flush = Valid(new CommitExceptionSignals)
// Stall Decode as appropriate
val empty = Output(Bool())
val ready = Output(Bool()) // ROB is busy unrolling rename state...
// Stall the frontend if we know we will redirect the PC
val flush_frontend = Output(Bool())
val debug_tsc = Input(UInt(xLen.W))
}
/**
* Bundle to send commit signals across processor
*/
class CommitSignals(implicit p: Parameters) extends BoomBundle
{
val valids = Vec(retireWidth, Bool()) // These instructions may not correspond to an architecturally executed insn
val arch_valids = Vec(retireWidth, Bool())
val uops = Vec(retireWidth, new MicroOp())
val fflags = Valid(UInt(5.W))
// These come a cycle later
val debug_insts = Vec(retireWidth, UInt(32.W))
// Perform rollback of rename state (in conjuction with commit.uops).
val rbk_valids = Vec(retireWidth, Bool())
val rollback = Bool()
val debug_wdata = Vec(retireWidth, UInt(xLen.W))
}
/**
* Bundle to communicate exceptions to CSRFile
*
* TODO combine FlushSignals and ExceptionSignals (currently timed to different cycles).
*/
class CommitExceptionSignals(implicit p: Parameters) extends BoomBundle
{
val ftq_idx = UInt(log2Ceil(ftqSz).W)
val edge_inst = Bool()
val is_rvc = Bool()
val pc_lob = UInt(log2Ceil(icBlockBytes).W)
val cause = UInt(xLen.W)
val badvaddr = UInt(xLen.W)
// The ROB needs to tell the FTQ if there's a pipeline flush (and what type)
// so the FTQ can drive the frontend with the correct redirected PC.
val flush_typ = FlushTypes()
}
/**
* Tell the frontend the type of flush so it can set up the next PC properly.
*/
object FlushTypes
{
def SZ = 3
def apply() = UInt(SZ.W)
def none = 0.U
def xcpt = 1.U // An exception occurred.
def eret = (2+1).U // Execute an environment return instruction.
def refetch = 2.U // Flush and refetch the head instruction.
def next = 4.U // Flush and fetch the next instruction.
def useCsrEvec(typ: UInt): Bool = typ(0) // typ === xcpt.U || typ === eret.U
def useSamePC(typ: UInt): Bool = typ === refetch
def usePCplus4(typ: UInt): Bool = typ === next
def getType(valid: Bool, i_xcpt: Bool, i_eret: Bool, i_refetch: Bool): UInt = {
val ret =
Mux(!valid, none,
Mux(i_eret, eret,
Mux(i_xcpt, xcpt,
Mux(i_refetch, refetch,
next))))
ret
}
}
/**
* Bundle of signals indicating that an exception occurred
*/
class Exception(implicit p: Parameters) extends BoomBundle
{
val uop = new MicroOp()
val cause = Bits(log2Ceil(freechips.rocketchip.rocket.Causes.all.max+2).W)
val badvaddr = UInt(coreMaxAddrBits.W)
}
/**
* Bundle for debug ROB signals
* These should not be synthesized!
*/
class DebugRobSignals(implicit p: Parameters) extends BoomBundle
{
val state = UInt()
val rob_head = UInt(robAddrSz.W)
val rob_pnr = UInt(robAddrSz.W)
val xcpt_val = Bool()
val xcpt_uop = new MicroOp()
val xcpt_badvaddr = UInt(xLen.W)
}
/**
* Reorder Buffer to keep track of dependencies and inflight instructions
*
* @param numWakeupPorts number of wakeup ports to the ROB
* @param numFpuPorts number of FPU units that will write back fflags
*/
class Rob(
val numWakeupPorts: Int,
val numFpuPorts: Int
)(implicit p: Parameters) extends BoomModule
{
val io = IO(new RobIo(numWakeupPorts, numFpuPorts))
// ROB Finite State Machine
val s_reset :: s_normal :: s_rollback :: s_wait_till_empty :: Nil = Enum(4)
val rob_state = RegInit(s_reset)
//commit entries at the head, and unwind exceptions from the tail
val rob_head = RegInit(0.U(log2Ceil(numRobRows).W))
val rob_head_lsb = RegInit(0.U((1 max log2Ceil(coreWidth)).W)) // TODO: Accurately track head LSB (currently always 0)
val rob_head_idx = if (coreWidth == 1) rob_head else Cat(rob_head, rob_head_lsb)
val rob_tail = RegInit(0.U(log2Ceil(numRobRows).W))
val rob_tail_lsb = RegInit(0.U((1 max log2Ceil(coreWidth)).W))
val rob_tail_idx = if (coreWidth == 1) rob_tail else Cat(rob_tail, rob_tail_lsb)
val rob_pnr = RegInit(0.U(log2Ceil(numRobRows).W))
val rob_pnr_lsb = RegInit(0.U((1 max log2Ceil(coreWidth)).W))
val rob_pnr_idx = if (coreWidth == 1) rob_pnr else Cat(rob_pnr , rob_pnr_lsb)
val com_idx = Mux(rob_state === s_rollback, rob_tail, rob_head)
val maybe_full = RegInit(false.B)
val full = Wire(Bool())
val empty = Wire(Bool())
val will_commit = Wire(Vec(coreWidth, Bool()))
val can_commit = Wire(Vec(coreWidth, Bool()))
val can_throw_exception = Wire(Vec(coreWidth, Bool()))
val rob_pnr_unsafe = Wire(Vec(coreWidth, Bool())) // are the instructions at the pnr unsafe?
val rob_head_vals = Wire(Vec(coreWidth, Bool())) // are the instructions at the head valid?
val rob_tail_vals = Wire(Vec(coreWidth, Bool())) // are the instructions at the tail valid? (to track partial row dispatches)
val rob_head_uses_stq = Wire(Vec(coreWidth, Bool()))
val rob_head_uses_ldq = Wire(Vec(coreWidth, Bool()))
val rob_head_fflags = Wire(Vec(coreWidth, UInt(freechips.rocketchip.tile.FPConstants.FLAGS_SZ.W)))
val exception_thrown = Wire(Bool())
// exception info
// TODO compress xcpt cause size. Most bits in the middle are zero.
val r_xcpt_val = RegInit(false.B)
val r_xcpt_uop = Reg(new MicroOp())
val r_xcpt_badvaddr = Reg(UInt(coreMaxAddrBits.W))
io.flush_frontend := r_xcpt_val
//--------------------------------------------------
// Utility
def GetRowIdx(rob_idx: UInt): UInt = {
if (coreWidth == 1) return rob_idx
else return rob_idx >> log2Ceil(coreWidth).U
}
def GetBankIdx(rob_idx: UInt): UInt = {
if(coreWidth == 1) { return 0.U }
else { return rob_idx(log2Ceil(coreWidth)-1, 0).asUInt }
}
// **************************************************************************
// Debug
class DebugRobBundle extends BoomBundle
{
val valid = Bool()
val busy = Bool()
val unsafe = Bool()
val uop = new MicroOp()
val exception = Bool()
}
val debug_entry = Wire(Vec(numRobEntries, new DebugRobBundle))
debug_entry := DontCare // override in statements below
// **************************************************************************
// --------------------------------------------------------------------------
// **************************************************************************
// Contains all information the PNR needs to find the oldest instruction which can't be safely speculated past.
val rob_unsafe_masked = WireInit(VecInit(Seq.fill(numRobRows << log2Ceil(coreWidth)){false.B}))
// Used for trace port, for debug purposes only
val rob_debug_inst_mem = SyncReadMem(numRobRows, Vec(coreWidth, UInt(32.W)))
val rob_debug_inst_wmask = WireInit(VecInit(0.U(coreWidth.W).asBools))
val rob_debug_inst_wdata = Wire(Vec(coreWidth, UInt(32.W)))
rob_debug_inst_mem.write(rob_tail, rob_debug_inst_wdata, rob_debug_inst_wmask)
val rob_debug_inst_rdata = rob_debug_inst_mem.read(rob_head, will_commit.reduce(_||_))
val rob_fflags = Seq.fill(coreWidth)(Reg(Vec(numRobRows, UInt(freechips.rocketchip.tile.FPConstants.FLAGS_SZ.W))))
for (w <- 0 until coreWidth) {
def MatchBank(bank_idx: UInt): Bool = (bank_idx === w.U)
// one bank
val rob_val = RegInit(VecInit(Seq.fill(numRobRows){false.B}))
val rob_bsy = Reg(Vec(numRobRows, Bool()))
val rob_unsafe = Reg(Vec(numRobRows, Bool()))
val rob_uop = Reg(Vec(numRobRows, new MicroOp()))
val rob_exception = Reg(Vec(numRobRows, Bool()))
val rob_predicated = Reg(Vec(numRobRows, Bool())) // Was this instruction predicated out?
val rob_debug_wdata = Mem(numRobRows, UInt(xLen.W))
//-----------------------------------------------
// Dispatch: Add Entry to ROB
rob_debug_inst_wmask(w) := io.enq_valids(w)
rob_debug_inst_wdata(w) := io.enq_uops(w).debug_inst
when (io.enq_valids(w)) {
rob_val(rob_tail) := true.B
rob_bsy(rob_tail) := !(io.enq_uops(w).is_fence ||
io.enq_uops(w).is_fencei)
rob_unsafe(rob_tail) := io.enq_uops(w).unsafe
rob_uop(rob_tail) := io.enq_uops(w)
rob_exception(rob_tail) := io.enq_uops(w).exception
rob_predicated(rob_tail) := false.B
rob_fflags(w)(rob_tail) := 0.U
assert (rob_val(rob_tail) === false.B, "[rob] overwriting a valid entry.")
assert ((io.enq_uops(w).rob_idx >> log2Ceil(coreWidth)) === rob_tail)
} .elsewhen (io.enq_valids.reduce(_|_) && !rob_val(rob_tail)) {
rob_uop(rob_tail).debug_inst := BUBBLE // just for debug purposes
}
//-----------------------------------------------
// Writeback
for (i <- 0 until numWakeupPorts) {
val wb_resp = io.wb_resps(i)
val wb_uop = wb_resp.bits.uop
val row_idx = GetRowIdx(wb_uop.rob_idx)
when (wb_resp.valid && MatchBank(GetBankIdx(wb_uop.rob_idx))) {
rob_bsy(row_idx) := false.B
rob_unsafe(row_idx) := false.B
rob_predicated(row_idx) := wb_resp.bits.predicated
}
// TODO check that fflags aren't overwritten
// TODO check that the wb is to a valid ROB entry, give it a time stamp
// assert (!(wb_resp.valid && MatchBank(GetBankIdx(wb_uop.rob_idx)) &&
// wb_uop.fp_val && !(wb_uop.is_load || wb_uop.is_store) &&
// rob_exc_cause(row_idx) =/= 0.U),
// "FP instruction writing back exc bits is overriding an existing exception.")
}
// Stores have a separate method to clear busy bits
for (clr_rob_idx <- io.lsu_clr_bsy) {
when (clr_rob_idx.valid && MatchBank(GetBankIdx(clr_rob_idx.bits))) {
val cidx = GetRowIdx(clr_rob_idx.bits)
rob_bsy(cidx) := false.B
rob_unsafe(cidx) := false.B
assert (rob_val(cidx) === true.B, "[rob] store writing back to invalid entry.")
assert (rob_bsy(cidx) === true.B, "[rob] store writing back to a not-busy entry.")
}
}
for (clr <- io.lsu_clr_unsafe) {
when (clr.valid && MatchBank(GetBankIdx(clr.bits))) {
val cidx = GetRowIdx(clr.bits)
rob_unsafe(cidx) := false.B
}
}
//-----------------------------------------------
// Accruing fflags
for (i <- 0 until numFpuPorts) {
val fflag_uop = io.fflags(i).bits.uop
when (io.fflags(i).valid && MatchBank(GetBankIdx(fflag_uop.rob_idx))) {
rob_fflags(w)(GetRowIdx(fflag_uop.rob_idx)) := io.fflags(i).bits.flags
}
}
//-----------------------------------------------------
// Exceptions
// (the cause bits are compressed and stored elsewhere)
when (io.lxcpt.valid && MatchBank(GetBankIdx(io.lxcpt.bits.uop.rob_idx))) {
rob_exception(GetRowIdx(io.lxcpt.bits.uop.rob_idx)) := true.B
when (io.lxcpt.bits.cause =/= MINI_EXCEPTION_MEM_ORDERING) {
// In the case of a mem-ordering failure, the failing load will have been marked safe already.
assert(rob_unsafe(GetRowIdx(io.lxcpt.bits.uop.rob_idx)),
"An instruction marked as safe is causing an exception")
}
}
when (io.csr_replay.valid && MatchBank(GetBankIdx(io.csr_replay.bits.uop.rob_idx))) {
rob_exception(GetRowIdx(io.csr_replay.bits.uop.rob_idx)) := true.B
}
can_throw_exception(w) := rob_val(rob_head) && rob_exception(rob_head)
//-----------------------------------------------
// Commit or Rollback
// Can this instruction commit? (the check for exceptions/rob_state happens later).
can_commit(w) := rob_val(rob_head) && !(rob_bsy(rob_head)) && !io.csr_stall
// use the same "com_uop" for both rollback AND commit
// Perform Commit
io.commit.valids(w) := will_commit(w)
io.commit.arch_valids(w) := will_commit(w) && !rob_predicated(com_idx)
io.commit.uops(w) := rob_uop(com_idx)
io.commit.debug_insts(w) := rob_debug_inst_rdata(w)
// We unbusy branches in b1, but its easier to mark the taken/provider src in b2,
// when the branch might be committing
when (io.brupdate.b2.mispredict &&
MatchBank(GetBankIdx(io.brupdate.b2.uop.rob_idx)) &&
GetRowIdx(io.brupdate.b2.uop.rob_idx) === com_idx) {
io.commit.uops(w).debug_fsrc := BSRC_C
io.commit.uops(w).taken := io.brupdate.b2.taken
}
// Don't attempt to rollback the tail's row when the rob is full.
val rbk_row = rob_state === s_rollback && !full
io.commit.rbk_valids(w) := rbk_row && rob_val(com_idx) && !(enableCommitMapTable.B)
io.commit.rollback := (rob_state === s_rollback)
assert (!(io.commit.valids.reduce(_||_) && io.commit.rbk_valids.reduce(_||_)),
"com_valids and rbk_valids are mutually exclusive")
when (rbk_row) {
rob_val(com_idx) := false.B
rob_exception(com_idx) := false.B
}
if (enableCommitMapTable) {
when (RegNext(exception_thrown)) {
for (i <- 0 until numRobRows) {
rob_val(i) := false.B
rob_bsy(i) := false.B
rob_uop(i).debug_inst := BUBBLE
}
}
}
// -----------------------------------------------
// Kill speculated entries on branch mispredict
for (i <- 0 until numRobRows) {
val br_mask = rob_uop(i).br_mask
//kill instruction if mispredict & br mask match
when (IsKilledByBranch(io.brupdate, br_mask))
{
rob_val(i) := false.B
rob_uop(i.U).debug_inst := BUBBLE
} .elsewhen (rob_val(i)) {
// clear speculation bit even on correct speculation
rob_uop(i).br_mask := GetNewBrMask(io.brupdate, br_mask)
}
}
// Debug signal to figure out which prediction structure
// or core resolved a branch correctly
when (io.brupdate.b2.mispredict &&
MatchBank(GetBankIdx(io.brupdate.b2.uop.rob_idx))) {
rob_uop(GetRowIdx(io.brupdate.b2.uop.rob_idx)).debug_fsrc := BSRC_C
rob_uop(GetRowIdx(io.brupdate.b2.uop.rob_idx)).taken := io.brupdate.b2.taken
}
// -----------------------------------------------
// Commit
when (will_commit(w)) {
rob_val(rob_head) := false.B
}
// -----------------------------------------------
// Outputs
rob_head_vals(w) := rob_val(rob_head)
rob_tail_vals(w) := rob_val(rob_tail)
rob_head_fflags(w) := rob_fflags(w)(rob_head)
rob_head_uses_stq(w) := rob_uop(rob_head).uses_stq
rob_head_uses_ldq(w) := rob_uop(rob_head).uses_ldq
//------------------------------------------------
// Invalid entries are safe; thrown exceptions are unsafe.
for (i <- 0 until numRobRows) {
rob_unsafe_masked((i << log2Ceil(coreWidth)) + w) := rob_val(i) && (rob_unsafe(i) || rob_exception(i))
}
// Read unsafe status of PNR row.
rob_pnr_unsafe(w) := rob_val(rob_pnr) && (rob_unsafe(rob_pnr) || rob_exception(rob_pnr))
// -----------------------------------------------
// debugging write ports that should not be synthesized
when (will_commit(w)) {
rob_uop(rob_head).debug_inst := BUBBLE
} .elsewhen (rbk_row)
{
rob_uop(rob_tail).debug_inst := BUBBLE
}
//--------------------------------------------------
// Debug: for debug purposes, track side-effects to all register destinations
for (i <- 0 until numWakeupPorts) {
val rob_idx = io.wb_resps(i).bits.uop.rob_idx
when (io.debug_wb_valids(i) && MatchBank(GetBankIdx(rob_idx))) {
rob_debug_wdata(GetRowIdx(rob_idx)) := io.debug_wb_wdata(i)
}
val temp_uop = rob_uop(GetRowIdx(rob_idx))
assert (!(io.wb_resps(i).valid && MatchBank(GetBankIdx(rob_idx)) &&
!rob_val(GetRowIdx(rob_idx))),
"[rob] writeback (" + i + ") occurred to an invalid ROB entry.")
assert (!(io.wb_resps(i).valid && MatchBank(GetBankIdx(rob_idx)) &&
!rob_bsy(GetRowIdx(rob_idx))),
"[rob] writeback (" + i + ") occurred to a not-busy ROB entry.")
assert (!(io.wb_resps(i).valid && MatchBank(GetBankIdx(rob_idx)) &&
temp_uop.ldst_val && temp_uop.pdst =/= io.wb_resps(i).bits.uop.pdst),
"[rob] writeback (" + i + ") occurred to the wrong pdst.")
}
io.commit.debug_wdata(w) := rob_debug_wdata(rob_head)
} //for (w <- 0 until coreWidth)
// **************************************************************************
// --------------------------------------------------------------------------
// **************************************************************************
// -----------------------------------------------
// Commit Logic
// need to take a "can_commit" array, and let the first can_commits commit
// previous instructions may block the commit of younger instructions in the commit bundle
// e.g., exception, or (valid && busy).
// Finally, don't throw an exception if there are instructions in front of
// it that want to commit (only throw exception when head of the bundle).
var block_commit = (rob_state =/= s_normal) && (rob_state =/= s_wait_till_empty) || RegNext(exception_thrown) || RegNext(RegNext(exception_thrown))
var will_throw_exception = false.B
var block_xcpt = false.B
for (w <- 0 until coreWidth) {
will_throw_exception = (can_throw_exception(w) && !block_commit && !block_xcpt) || will_throw_exception
will_commit(w) := can_commit(w) && !can_throw_exception(w) && !block_commit
block_commit = (rob_head_vals(w) &&
(!can_commit(w) || can_throw_exception(w))) || block_commit
block_xcpt = will_commit(w)
}
// Note: exception must be in the commit bundle.
// Note: exception must be the first valid instruction in the commit bundle.
exception_thrown := will_throw_exception
val is_mini_exception = io.com_xcpt.bits.cause.isOneOf(MINI_EXCEPTION_MEM_ORDERING, MINI_EXCEPTION_CSR_REPLAY)
io.com_xcpt.valid := exception_thrown && !is_mini_exception
io.com_xcpt.bits := DontCare
io.com_xcpt.bits.cause := r_xcpt_uop.exc_cause
io.com_xcpt.bits.badvaddr := Sext(r_xcpt_badvaddr, xLen)
val insn_sys_pc2epc =
rob_head_vals.reduce(_|_) && PriorityMux(rob_head_vals, io.commit.uops.map{u => u.is_sys_pc2epc})
val refetch_inst = exception_thrown || insn_sys_pc2epc
val com_xcpt_uop = PriorityMux(rob_head_vals, io.commit.uops)
io.com_xcpt.bits.ftq_idx := com_xcpt_uop.ftq_idx
io.com_xcpt.bits.edge_inst := com_xcpt_uop.edge_inst
io.com_xcpt.bits.is_rvc := com_xcpt_uop.is_rvc
io.com_xcpt.bits.pc_lob := com_xcpt_uop.pc_lob
val flush_commit_mask = Range(0,coreWidth).map{i => io.commit.valids(i) && io.commit.uops(i).flush_on_commit}
val flush_commit = flush_commit_mask.reduce(_|_)
val flush_val = exception_thrown || flush_commit
assert(!(PopCount(flush_commit_mask) > 1.U),
"[rob] Can't commit multiple flush_on_commit instructions on one cycle")
val flush_uop = Mux(exception_thrown, com_xcpt_uop, Mux1H(flush_commit_mask, io.commit.uops))
// delay a cycle for critical path considerations
io.flush.valid := flush_val
io.flush.bits := DontCare
io.flush.bits.ftq_idx := flush_uop.ftq_idx
io.flush.bits.pc_lob := flush_uop.pc_lob
io.flush.bits.edge_inst := flush_uop.edge_inst
io.flush.bits.is_rvc := flush_uop.is_rvc
io.flush.bits.flush_typ := FlushTypes.getType(flush_val,
exception_thrown && !is_mini_exception,
flush_commit && flush_uop.uopc === uopERET,
refetch_inst)
// -----------------------------------------------
// FP Exceptions
// send fflags bits to the CSRFile to accrue
val fflags_val = Wire(Vec(coreWidth, Bool()))
val fflags = Wire(Vec(coreWidth, UInt(freechips.rocketchip.tile.FPConstants.FLAGS_SZ.W)))
for (w <- 0 until coreWidth) {
fflags_val(w) :=
io.commit.valids(w) &&
io.commit.uops(w).fp_val &&
!io.commit.uops(w).uses_stq
fflags(w) := Mux(fflags_val(w), rob_head_fflags(w), 0.U)
assert (!(io.commit.valids(w) &&
!io.commit.uops(w).fp_val &&
rob_head_fflags(w) =/= 0.U),
"Committed non-FP instruction has non-zero fflag bits.")
assert (!(io.commit.valids(w) &&
io.commit.uops(w).fp_val &&
(io.commit.uops(w).uses_ldq || io.commit.uops(w).uses_stq) &&
rob_head_fflags(w) =/= 0.U),
"Committed FP load or store has non-zero fflag bits.")
}
io.commit.fflags.valid := fflags_val.reduce(_|_)
io.commit.fflags.bits := fflags.reduce(_|_)
// -----------------------------------------------
// Exception Tracking Logic
// only store the oldest exception, since only one can happen!
val next_xcpt_uop = Wire(new MicroOp())
next_xcpt_uop := r_xcpt_uop
val enq_xcpts = Wire(Vec(coreWidth, Bool()))
for (i <- 0 until coreWidth) {
enq_xcpts(i) := io.enq_valids(i) && io.enq_uops(i).exception
}
when (!(io.flush.valid || exception_thrown) && rob_state =/= s_rollback) {
val new_xcpt_valid = io.lxcpt.valid || io.csr_replay.valid
val lxcpt_older = !io.csr_replay.valid || (IsOlder(io.lxcpt.bits.uop.rob_idx, io.csr_replay.bits.uop.rob_idx, rob_head_idx) && io.lxcpt.valid)
val new_xcpt = Mux(lxcpt_older, io.lxcpt.bits, io.csr_replay.bits)
when (new_xcpt_valid) {
when (!r_xcpt_val || IsOlder(new_xcpt.uop.rob_idx, r_xcpt_uop.rob_idx, rob_head_idx)) {
r_xcpt_val := true.B
next_xcpt_uop := new_xcpt.uop
next_xcpt_uop.exc_cause := new_xcpt.cause
r_xcpt_badvaddr := new_xcpt.badvaddr
}
} .elsewhen (!r_xcpt_val && enq_xcpts.reduce(_|_)) {
val idx = enq_xcpts.indexWhere{i: Bool => i}
// if no exception yet, dispatch exception wins
r_xcpt_val := true.B
next_xcpt_uop := io.enq_uops(idx)
r_xcpt_badvaddr := AlignPCToBoundary(io.xcpt_fetch_pc, icBlockBytes) | io.enq_uops(idx).pc_lob
}
}
r_xcpt_uop := next_xcpt_uop
r_xcpt_uop.br_mask := GetNewBrMask(io.brupdate, next_xcpt_uop)
when (io.flush.valid || IsKilledByBranch(io.brupdate, next_xcpt_uop)) {
r_xcpt_val := false.B
}
assert (!(exception_thrown && !r_xcpt_val),
"ROB trying to throw an exception, but it doesn't have a valid xcpt_cause")
assert (!(empty && r_xcpt_val),
"ROB is empty, but believes it has an outstanding exception.")
assert (!(will_throw_exception && (GetRowIdx(r_xcpt_uop.rob_idx) =/= rob_head)),
"ROB is throwing an exception, but the stored exception information's " +
"rob_idx does not match the rob_head")
// -----------------------------------------------
// ROB Head Logic
// remember if we're still waiting on the rest of the dispatch packet, and prevent
// the rob_head from advancing if it commits a partial parket before we
// dispatch the rest of it.
// update when committed ALL valid instructions in commit_bundle
val rob_deq = WireInit(false.B)
val r_partial_row = RegInit(false.B)
when (io.enq_valids.reduce(_|_)) {
r_partial_row := io.enq_partial_stall
}
val finished_committing_row =
(io.commit.valids.asUInt =/= 0.U) &&
((will_commit.asUInt ^ rob_head_vals.asUInt) === 0.U) &&
!(r_partial_row && rob_head === rob_tail && !maybe_full)
when (finished_committing_row) {
rob_head := WrapInc(rob_head, numRobRows)
rob_head_lsb := 0.U
rob_deq := true.B
} .otherwise {
rob_head_lsb := OHToUInt(PriorityEncoderOH(rob_head_vals.asUInt))
}
// -----------------------------------------------
// ROB Point-of-No-Return (PNR) Logic
// Acts as a second head, but only waits on busy instructions which might cause misspeculation.
// TODO is it worth it to add an extra 'parity' bit to all rob pointer logic?
// Makes 'older than' comparisons ~3x cheaper, in case we're going to use the PNR to do a large number of those.
// Also doesn't require the rob tail (or head) to be exported to whatever we want to compare with the PNR.
if (enableFastPNR) {
val unsafe_entry_in_rob = rob_unsafe_masked.reduce(_||_)
val next_rob_pnr_idx = Mux(unsafe_entry_in_rob,
AgePriorityEncoder(rob_unsafe_masked, rob_head_idx),
rob_tail << log2Ceil(coreWidth) | PriorityEncoder(~rob_tail_vals.asUInt))
rob_pnr := next_rob_pnr_idx >> log2Ceil(coreWidth)
if (coreWidth > 1)
rob_pnr_lsb := next_rob_pnr_idx(log2Ceil(coreWidth)-1, 0)
} else {
// Distinguish between PNR being at head/tail when ROB is full.
// Works the same as maybe_full tracking for the ROB tail.
val pnr_maybe_at_tail = RegInit(false.B)
val safe_to_inc = rob_state === s_normal || rob_state === s_wait_till_empty
val do_inc_row = !rob_pnr_unsafe.reduce(_||_) && (rob_pnr =/= rob_tail || (full && !pnr_maybe_at_tail))
when (empty && io.enq_valids.asUInt =/= 0.U) {
// Unforunately for us, the ROB does not use its entries in monotonically
// increasing order, even in the case of no exceptions. The edge case
// arises when partial rows are enqueued and committed, leaving an empty
// ROB.
rob_pnr := rob_head
rob_pnr_lsb := PriorityEncoder(io.enq_valids)
} .elsewhen (safe_to_inc && do_inc_row) {
rob_pnr := WrapInc(rob_pnr, numRobRows)
rob_pnr_lsb := 0.U
} .elsewhen (safe_to_inc && (rob_pnr =/= rob_tail || (full && !pnr_maybe_at_tail))) {
rob_pnr_lsb := PriorityEncoder(rob_pnr_unsafe)
} .elsewhen (safe_to_inc && !full && !empty) {
rob_pnr_lsb := PriorityEncoder(rob_pnr_unsafe.asUInt | ~MaskLower(rob_tail_vals.asUInt))
} .elsewhen (full && pnr_maybe_at_tail) {
rob_pnr_lsb := 0.U
}
pnr_maybe_at_tail := !rob_deq && (do_inc_row || pnr_maybe_at_tail)
}
// Head overrunning PNR likely means an entry hasn't been marked as safe when it should have been.
assert(!IsOlder(rob_pnr_idx, rob_head_idx, rob_tail_idx) || rob_pnr_idx === rob_tail_idx)
// PNR overrunning tail likely means an entry has been marked as safe when it shouldn't have been.
assert(!IsOlder(rob_tail_idx, rob_pnr_idx, rob_head_idx) || full)
// -----------------------------------------------
// ROB Tail Logic
val rob_enq = WireInit(false.B)
when (rob_state === s_rollback && (rob_tail =/= rob_head || maybe_full)) {
// Rollback a row
rob_tail := WrapDec(rob_tail, numRobRows)
rob_tail_lsb := (coreWidth-1).U
rob_deq := true.B
} .elsewhen (rob_state === s_rollback && (rob_tail === rob_head) && !maybe_full) {
// Rollback an entry
rob_tail_lsb := rob_head_lsb
} .elsewhen (io.brupdate.b2.mispredict) {
rob_tail := WrapInc(GetRowIdx(io.brupdate.b2.uop.rob_idx), numRobRows)
rob_tail_lsb := 0.U
} .elsewhen (io.enq_valids.asUInt =/= 0.U && !io.enq_partial_stall) {
rob_tail := WrapInc(rob_tail, numRobRows)
rob_tail_lsb := 0.U
rob_enq := true.B
} .elsewhen (io.enq_valids.asUInt =/= 0.U && io.enq_partial_stall) {
rob_tail_lsb := PriorityEncoder(~MaskLower(io.enq_valids.asUInt))
}
if (enableCommitMapTable) {
when (RegNext(exception_thrown)) {
rob_tail := 0.U
rob_tail_lsb := 0.U
rob_head := 0.U
rob_pnr := 0.U
rob_pnr_lsb := 0.U
}
}
// -----------------------------------------------
// Full/Empty Logic
// The ROB can be completely full, but only if it did not dispatch a row in the prior cycle.
// I.E. at least one entry will be empty when in a steady state of dispatching and committing a row each cycle.
// TODO should we add an extra 'parity bit' onto the ROB pointers to simplify this logic?
maybe_full := !rob_deq && (rob_enq || maybe_full) || io.brupdate.b1.mispredict_mask =/= 0.U
full := rob_tail === rob_head && maybe_full
empty := (rob_head === rob_tail) && (rob_head_vals.asUInt === 0.U)
io.rob_head_idx := rob_head_idx
io.rob_tail_idx := rob_tail_idx
io.rob_pnr_idx := rob_pnr_idx
io.empty := empty
io.ready := (rob_state === s_normal) && !full && !r_xcpt_val
//-----------------------------------------------
//-----------------------------------------------
//-----------------------------------------------
// ROB FSM
if (!enableCommitMapTable) {
switch (rob_state) {
is (s_reset) {
rob_state := s_normal
}
is (s_normal) {
// Delay rollback 2 cycles so branch mispredictions can drain
when (RegNext(RegNext(exception_thrown))) {
rob_state := s_rollback
} .otherwise {
for (w <- 0 until coreWidth) {
when (io.enq_valids(w) && io.enq_uops(w).is_unique) {
rob_state := s_wait_till_empty
}
}
}
}
is (s_rollback) {
when (empty) {
rob_state := s_normal
}
}
is (s_wait_till_empty) {
when (RegNext(exception_thrown)) {
rob_state := s_rollback
} .elsewhen (empty) {
rob_state := s_normal
}
}
}
} else {
switch (rob_state) {
is (s_reset) {
rob_state := s_normal
}
is (s_normal) {
when (exception_thrown) {
; //rob_state := s_rollback
} .otherwise {
for (w <- 0 until coreWidth) {
when (io.enq_valids(w) && io.enq_uops(w).is_unique) {
rob_state := s_wait_till_empty
}
}
}
}
is (s_rollback) {
when (rob_tail_idx === rob_head_idx) {
rob_state := s_normal
}
}
is (s_wait_till_empty) {
when (exception_thrown) {
; //rob_state := s_rollback
} .elsewhen (rob_tail === rob_head) {
rob_state := s_normal
}
}
}
}
// -----------------------------------------------
// Outputs
io.com_load_is_at_rob_head := RegNext(rob_head_uses_ldq(PriorityEncoder(rob_head_vals.asUInt)) &&
!will_commit.reduce(_||_))
override def toString: String = BoomCoreStringPrefix(
"==ROB==",
"Machine Width : " + coreWidth,
"Rob Entries : " + numRobEntries,
"Rob Rows : " + numRobRows,
"Rob Row size : " + log2Ceil(numRobRows),
"log2Ceil(coreWidth): " + log2Ceil(coreWidth),
"FPU FFlag Ports : " + numFpuPorts)
}
| module rob_debug_inst_mem_0( // @[rob.scala:296:41]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [95:0] R0_data,
input [4:0] W0_addr,
input W0_clk,
input [95:0] W0_data,
input [2:0] W0_mask
);
rob_debug_inst_mem_ext rob_debug_inst_mem_ext ( // @[rob.scala:296:41]
.R0_addr (R0_addr),
.R0_en (R0_en),
.R0_clk (R0_clk),
.R0_data (R0_data),
.W0_addr (W0_addr),
.W0_en (1'h1), // @[rob.scala:296:41]
.W0_clk (W0_clk),
.W0_data (W0_data),
.W0_mask (W0_mask)
); // @[rob.scala:296:41]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFN_e8_s24_36( // @[MulAddRecFN.scala:300:7]
input [32:0] io_a, // @[MulAddRecFN.scala:303:16]
input [32:0] io_c, // @[MulAddRecFN.scala:303:16]
output [32:0] io_out // @[MulAddRecFN.scala:303:16]
);
wire _mulAddRecFNToRaw_postMul_io_invalidExc; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isNaN; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isInf; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_isZero; // @[MulAddRecFN.scala:319:15]
wire _mulAddRecFNToRaw_postMul_io_rawOut_sign; // @[MulAddRecFN.scala:319:15]
wire [9:0] _mulAddRecFNToRaw_postMul_io_rawOut_sExp; // @[MulAddRecFN.scala:319:15]
wire [26:0] _mulAddRecFNToRaw_postMul_io_rawOut_sig; // @[MulAddRecFN.scala:319:15]
wire [23:0] _mulAddRecFNToRaw_preMul_io_mulAddA; // @[MulAddRecFN.scala:317:15]
wire [47:0] _mulAddRecFNToRaw_preMul_io_mulAddC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_signProd; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isInfC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC; // @[MulAddRecFN.scala:317:15]
wire [9:0] _mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant; // @[MulAddRecFN.scala:317:15]
wire [4:0] _mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist; // @[MulAddRecFN.scala:317:15]
wire [25:0] _mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC; // @[MulAddRecFN.scala:317:15]
wire _mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC; // @[MulAddRecFN.scala:317:15]
wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:300:7]
wire [32:0] io_c_0 = io_c; // @[MulAddRecFN.scala:300:7]
wire io_detectTininess = 1'h1; // @[MulAddRecFN.scala:300:7, :303:16, :339:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:300:7, :303:16, :319:15, :339:15]
wire [32:0] io_b = 33'h80000000; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:300:7, :303:16, :317:15]
wire [32:0] io_out_0; // @[MulAddRecFN.scala:300:7]
wire [4:0] io_exceptionFlags; // @[MulAddRecFN.scala:300:7]
wire [47:0] _mulAddResult_T = {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddA, 23'h0}; // @[MulAddRecFN.scala:317:15, :327:45]
wire [48:0] mulAddResult = {1'h0, _mulAddResult_T} + {1'h0, _mulAddRecFNToRaw_preMul_io_mulAddC}; // @[MulAddRecFN.scala:317:15, :327:45, :328:50]
MulAddRecFNToRaw_preMul_e8_s24_36 mulAddRecFNToRaw_preMul ( // @[MulAddRecFN.scala:317:15]
.io_a (io_a_0), // @[MulAddRecFN.scala:300:7]
.io_c (io_c_0), // @[MulAddRecFN.scala:300:7]
.io_mulAddA (_mulAddRecFNToRaw_preMul_io_mulAddA),
.io_mulAddC (_mulAddRecFNToRaw_preMul_io_mulAddC),
.io_toPostMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny),
.io_toPostMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB),
.io_toPostMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA),
.io_toPostMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA),
.io_toPostMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd),
.io_toPostMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC),
.io_toPostMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC),
.io_toPostMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC),
.io_toPostMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum),
.io_toPostMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags),
.io_toPostMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant),
.io_toPostMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist),
.io_toPostMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC),
.io_toPostMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC)
); // @[MulAddRecFN.scala:317:15]
MulAddRecFNToRaw_postMul_e8_s24_36 mulAddRecFNToRaw_postMul ( // @[MulAddRecFN.scala:319:15]
.io_fromPreMul_isSigNaNAny (_mulAddRecFNToRaw_preMul_io_toPostMul_isSigNaNAny), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNAOrB (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNAOrB), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfA (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroA (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroA), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_signProd (_mulAddRecFNToRaw_preMul_io_toPostMul_signProd), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isNaNC (_mulAddRecFNToRaw_preMul_io_toPostMul_isNaNC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isInfC (_mulAddRecFNToRaw_preMul_io_toPostMul_isInfC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_isZeroC (_mulAddRecFNToRaw_preMul_io_toPostMul_isZeroC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_sExpSum (_mulAddRecFNToRaw_preMul_io_toPostMul_sExpSum), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_doSubMags (_mulAddRecFNToRaw_preMul_io_toPostMul_doSubMags), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CIsDominant (_mulAddRecFNToRaw_preMul_io_toPostMul_CIsDominant), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_CDom_CAlignDist (_mulAddRecFNToRaw_preMul_io_toPostMul_CDom_CAlignDist), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_highAlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_highAlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_fromPreMul_bit0AlignedSigC (_mulAddRecFNToRaw_preMul_io_toPostMul_bit0AlignedSigC), // @[MulAddRecFN.scala:317:15]
.io_mulAddResult (mulAddResult), // @[MulAddRecFN.scala:328:50]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc),
.io_rawOut_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN),
.io_rawOut_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf),
.io_rawOut_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero),
.io_rawOut_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign),
.io_rawOut_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp),
.io_rawOut_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig)
); // @[MulAddRecFN.scala:319:15]
RoundRawFNToRecFN_e8_s24_52 roundRawFNToRecFN ( // @[MulAddRecFN.scala:339:15]
.io_invalidExc (_mulAddRecFNToRaw_postMul_io_invalidExc), // @[MulAddRecFN.scala:319:15]
.io_in_isNaN (_mulAddRecFNToRaw_postMul_io_rawOut_isNaN), // @[MulAddRecFN.scala:319:15]
.io_in_isInf (_mulAddRecFNToRaw_postMul_io_rawOut_isInf), // @[MulAddRecFN.scala:319:15]
.io_in_isZero (_mulAddRecFNToRaw_postMul_io_rawOut_isZero), // @[MulAddRecFN.scala:319:15]
.io_in_sign (_mulAddRecFNToRaw_postMul_io_rawOut_sign), // @[MulAddRecFN.scala:319:15]
.io_in_sExp (_mulAddRecFNToRaw_postMul_io_rawOut_sExp), // @[MulAddRecFN.scala:319:15]
.io_in_sig (_mulAddRecFNToRaw_postMul_io_rawOut_sig), // @[MulAddRecFN.scala:319:15]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulAddRecFN.scala:339:15]
assign io_out = io_out_0; // @[MulAddRecFN.scala:300:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundAnyRawFNToRecFN_ie11_is53_oe8_os24_3( // @[RoundAnyRawFNToRecFN.scala:48:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [12:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [53:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16]
input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [12:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [53:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20]
wire _common_underflow_T_16 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:27]
wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire io_detectTininess = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire _unboundedRange_anyRound_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:205:30]
wire _common_underflow_T_7 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:222:49]
wire _common_underflow_T_12 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:222:77]
wire _common_underflow_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:226:38]
wire _common_underflow_T_14 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:227:45]
wire _common_underflow_T_15 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:227:60]
wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49]
wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33]
wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66]
wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
wire roundingMode_near_even = io_roundingMode_0 == 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5, :90:53]
wire roundingMode_minMag = io_roundingMode_0 == 3'h1; // @[RoundAnyRawFNToRecFN.scala:48:5, :91:53]
wire roundingMode_min = io_roundingMode_0 == 3'h2; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53]
wire roundingMode_max = io_roundingMode_0 == 3'h3; // @[RoundAnyRawFNToRecFN.scala:48:5, :93:53]
wire roundingMode_near_maxMag = io_roundingMode_0 == 3'h4; // @[RoundAnyRawFNToRecFN.scala:48:5, :94:53]
wire roundingMode_odd = io_roundingMode_0 == 3'h6; // @[RoundAnyRawFNToRecFN.scala:48:5, :95:53]
wire _roundMagUp_T = roundingMode_min & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :92:53, :98:27]
wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66]
wire _roundMagUp_T_2 = roundingMode_max & _roundMagUp_T_1; // @[RoundAnyRawFNToRecFN.scala:93:53, :98:{63,66}]
wire roundMagUp = _roundMagUp_T | _roundMagUp_T_2; // @[RoundAnyRawFNToRecFN.scala:98:{27,42,63}]
wire [13:0] sAdjustedExp = {io_in_sExp_0[12], io_in_sExp_0} - 14'h700; // @[RoundAnyRawFNToRecFN.scala:48:5, :110:24]
wire [25:0] _adjustedSig_T = io_in_sig_0[53:28]; // @[RoundAnyRawFNToRecFN.scala:48:5, :116:23]
wire [27:0] _adjustedSig_T_1 = io_in_sig_0[27:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :117:26]
wire _adjustedSig_T_2 = |_adjustedSig_T_1; // @[RoundAnyRawFNToRecFN.scala:117:{26,60}]
wire [26:0] adjustedSig = {_adjustedSig_T, _adjustedSig_T_2}; // @[RoundAnyRawFNToRecFN.scala:116:{23,66}, :117:60]
wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37]
wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31]
wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16]
wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31]
wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50]
wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37]
wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31]
wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37]
wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40]
wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37]
wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49]
wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37]
wire [8:0] _roundMask_T = sAdjustedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:110:24, :156:37]
wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21]
wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25]
wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26]
wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26]
wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26]
wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26]
wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26]
wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56]
wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22]
wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22]
wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20]
wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20]
wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20]
wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20]
wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20]
wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22]
wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20]
wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20]
wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20]
wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20]
wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20]
wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20]
wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20]
wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20]
wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20]
wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20]
wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}]
wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}]
wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17]
wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56]
wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22]
wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22]
wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20]
wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20]
wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20]
wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22]
wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20]
wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20]
wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58]
wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24]
wire [24:0] _roundMask_T_74 = _roundMask_T_73; // @[primitives.scala:62:24]
wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}]
wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41]
wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}]
wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28]
wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}]
wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:116:66, :163:46, :164:40]
wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}]
wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:116:66, :162:53, :165:42]
wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}]
wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36]
wire _GEN = roundingMode_near_even | roundingMode_near_maxMag; // @[RoundAnyRawFNToRecFN.scala:90:53, :94:53, :169:38]
wire _roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:169:38]
assign _roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38]
wire _unboundedRange_roundIncr_T; // @[RoundAnyRawFNToRecFN.scala:207:38]
assign _unboundedRange_roundIncr_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :207:38]
wire _overflow_roundMagUp_T; // @[RoundAnyRawFNToRecFN.scala:243:32]
assign _overflow_roundMagUp_T = _GEN; // @[RoundAnyRawFNToRecFN.scala:169:38, :243:32]
wire _roundIncr_T_1 = _roundIncr_T & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:{38,67}]
wire _roundIncr_T_2 = roundMagUp & anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :166:36, :171:29]
wire roundIncr = _roundIncr_T_1 | _roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31, :171:29]
wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:116:66, :159:42, :174:32]
wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}]
wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}]
wire _roundedSig_T_3 = roundingMode_near_even & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:90:53, :164:56, :175:49]
wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30]
wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30]
wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35]
wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35]
wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}]
wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21]
wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32]
wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:116:66, :180:{30,32}]
wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}]
wire _roundedSig_T_13 = roundingMode_odd & anyRound; // @[RoundAnyRawFNToRecFN.scala:95:53, :166:36, :181:42]
wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67]
wire [25:0] _roundedSig_T_15 = _roundedSig_T_13 ? _roundedSig_T_14 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:{24,42,67}]
wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12} | _roundedSig_T_15; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}, :181:24]
wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47]
wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54]
wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}]
wire [14:0] sRoundedExp = {sAdjustedExp[13], sAdjustedExp} + {{12{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:110:24, :185:{40,76}]
assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37]
assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37]
wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27]
wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27]
assign _common_fractOut_T_2 = _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:189:16, :191:27]
assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16]
wire [7:0] _common_overflow_T = sRoundedExp[14:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30]
assign _common_overflow_T_1 = $signed(_common_overflow_T) > 8'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}]
assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50]
assign _common_totalUnderflow_T = $signed(sRoundedExp) < 15'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31]
assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31]
wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45]
wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:45, :205:44]
wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:116:66, :203:61]
wire unboundedRange_roundPosBit = _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:203:{16,61}]
wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:116:66, :205:63]
wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}]
wire unboundedRange_anyRound = _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{49,70}]
wire _unboundedRange_roundIncr_T_1 = _unboundedRange_roundIncr_T & unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:{38,67}]
wire _unboundedRange_roundIncr_T_2 = roundMagUp & unboundedRange_anyRound; // @[RoundAnyRawFNToRecFN.scala:98:42, :205:49, :209:29]
wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1 | _unboundedRange_roundIncr_T_2; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46, :209:29]
wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27]
wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27]
wire roundCarry = _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:211:16, :213:27]
wire [5:0] _common_underflow_T = sAdjustedExp[13:8]; // @[RoundAnyRawFNToRecFN.scala:110:24, :220:49]
wire _common_underflow_T_1 = $signed(_common_underflow_T) < 6'sh1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}]
wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}]
wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57]
wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49]
wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71]
wire _common_underflow_T_5 = _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:221:{30,71}]
wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30]
wire _common_underflow_T_17 = _common_underflow_T_6; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76]
wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49]
wire _common_underflow_T_10 = _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:223:39, :225:49]
wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}]
assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76]
assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40]
assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49]
assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49]
wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34]
wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22]
wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36]
wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}]
wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64]
wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}]
wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32]
wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32]
wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43]
wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}]
wire overflow_roundMagUp = _overflow_roundMagUp_T | roundMagUp; // @[RoundAnyRawFNToRecFN.scala:98:42, :243:{32,60}]
wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20]
wire _pegMinNonzeroMagOut_T_1 = roundMagUp | roundingMode_odd; // @[RoundAnyRawFNToRecFN.scala:95:53, :98:42, :245:60]
wire pegMinNonzeroMagOut = _pegMinNonzeroMagOut_T & _pegMinNonzeroMagOut_T_1; // @[RoundAnyRawFNToRecFN.scala:245:{20,45,60}]
wire _pegMaxFiniteMagOut_T = ~overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:243:60, :246:42]
wire pegMaxFiniteMagOut = overflow & _pegMaxFiniteMagOut_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :246:{39,42}]
wire _notNaN_isInfOut_T = overflow & overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:238:32, :243:60, :248:45]
wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}]
wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22]
wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32]
wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}]
wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}]
wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14]
wire [8:0] _expOut_T_5 = pegMinNonzeroMagOut ? 9'h194 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:245:45, :257:18]
wire [8:0] _expOut_T_6 = ~_expOut_T_5; // @[RoundAnyRawFNToRecFN.scala:257:{14,18}]
wire [8:0] _expOut_T_7 = _expOut_T_3 & _expOut_T_6; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17, :257:14]
wire [8:0] _expOut_T_8 = {1'h0, pegMaxFiniteMagOut, 7'h0}; // @[RoundAnyRawFNToRecFN.scala:246:39, :261:18]
wire [8:0] _expOut_T_9 = ~_expOut_T_8; // @[RoundAnyRawFNToRecFN.scala:261:{14,18}]
wire [8:0] _expOut_T_10 = _expOut_T_7 & _expOut_T_9; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17, :261:14]
wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18]
wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}]
wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14]
wire [8:0] _expOut_T_14 = pegMinNonzeroMagOut ? 9'h6B : 9'h0; // @[RoundAnyRawFNToRecFN.scala:245:45, :269:16]
wire [8:0] _expOut_T_15 = _expOut_T_13 | _expOut_T_14; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18, :269:16]
wire [8:0] _expOut_T_16 = pegMaxFiniteMagOut ? 9'h17F : 9'h0; // @[RoundAnyRawFNToRecFN.scala:246:39, :273:16]
wire [8:0] _expOut_T_17 = _expOut_T_15 | _expOut_T_16; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15, :273:16]
wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16]
wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16]
wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16]
wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16]
wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22]
wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}]
wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16]
wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16]
wire [22:0] _fractOut_T_4 = {23{pegMaxFiniteMagOut}}; // @[RoundAnyRawFNToRecFN.scala:246:39, :284:13]
wire [22:0] fractOut = _fractOut_T_3 | _fractOut_T_4; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11, :284:13]
wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23]
assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}]
assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33]
wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23]
wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}]
wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}]
assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}]
assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File IdIndexer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{log2Ceil, Cat}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.diplomacy.IdRange
import freechips.rocketchip.util.{ControlKey, SimpleBundleField}
case object AXI4ExtraId extends ControlKey[UInt]("extra_id")
case class AXI4ExtraIdField(width: Int) extends SimpleBundleField(AXI4ExtraId)(Output(UInt(width.W)), 0.U)
/** This adapter limits the set of FIFO domain ids used by outbound transactions.
*
* Extra AWID and ARID bits from upstream transactions are stored in a User Bits field called AXI4ExtraId,
* which values are expected to be echoed back to this adapter alongside any downstream response messages,
* and are then prepended to the RID and BID field to restore the original identifier.
*
* @param idBits is the desired number of A[W|R]ID bits to be used
*/
class AXI4IdIndexer(idBits: Int)(implicit p: Parameters) extends LazyModule
{
require (idBits >= 0, s"AXI4IdIndexer: idBits must be > 0, not $idBits")
val node = AXI4AdapterNode(
masterFn = { mp =>
// Create one new "master" per ID
val masters = Array.tabulate(1 << idBits) { i => AXI4MasterParameters(
name = "",
id = IdRange(i, i+1),
aligned = true,
maxFlight = Some(0))
}
// Accumulate the names of masters we squish
val names = Array.fill(1 << idBits) { new scala.collection.mutable.HashSet[String]() }
// Squash the information from original masters into new ID masters
mp.masters.foreach { m =>
for (i <- m.id.start until m.id.end) {
val j = i % (1 << idBits)
val accumulated = masters(j)
names(j) += m.name
masters(j) = accumulated.copy(
aligned = accumulated.aligned && m.aligned,
maxFlight = accumulated.maxFlight.flatMap { o => m.maxFlight.map { n => o+n } })
}
}
val finalNameStrings = names.map { n => if (n.isEmpty) "(unused)" else n.toList.mkString(", ") }
val bits = log2Ceil(mp.endId) - idBits
val field = if (bits > 0) Seq(AXI4ExtraIdField(bits)) else Nil
mp.copy(
echoFields = field ++ mp.echoFields,
masters = masters.zip(finalNameStrings).map { case (m, n) => m.copy(name = n) })
},
slaveFn = { sp => sp
})
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// Leave everything mostly untouched
Connectable.waiveUnmatched(out.ar, in.ar) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(out.aw, in.aw) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(out.w, in.w) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(in.b, out.b) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
Connectable.waiveUnmatched(in.r, out.r) match {
case (lhs, rhs) => lhs.squeezeAll :<>= rhs.squeezeAll
}
val bits = log2Ceil(edgeIn.master.endId) - idBits
if (bits > 0) {
// (in.aX.bits.id >> idBits).width = bits > 0
out.ar.bits.echo(AXI4ExtraId) := in.ar.bits.id >> idBits
out.aw.bits.echo(AXI4ExtraId) := in.aw.bits.id >> idBits
// Special care is needed in case of 0 idBits, b/c .id has width 1 still
if (idBits == 0) {
out.ar.bits.id := 0.U
out.aw.bits.id := 0.U
in.r.bits.id := out.r.bits.echo(AXI4ExtraId)
in.b.bits.id := out.b.bits.echo(AXI4ExtraId)
} else {
in.r.bits.id := Cat(out.r.bits.echo(AXI4ExtraId), out.r.bits.id)
in.b.bits.id := Cat(out.b.bits.echo(AXI4ExtraId), out.b.bits.id)
}
}
}
}
}
object AXI4IdIndexer
{
def apply(idBits: Int)(implicit p: Parameters): AXI4Node =
{
val axi4index = LazyModule(new AXI4IdIndexer(idBits))
axi4index.node
}
}
File ToAXI4.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.amba.{AMBACorrupt, AMBACorruptField, AMBAProt, AMBAProtField}
import freechips.rocketchip.amba.axi4.{AXI4BundleARW, AXI4MasterParameters, AXI4MasterPortParameters, AXI4Parameters, AXI4Imp}
import freechips.rocketchip.diplomacy.{IdMap, IdMapEntry, IdRange}
import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, UIntToOH1}
import freechips.rocketchip.util.DataToAugmentedData
class AXI4TLStateBundle(val sourceBits: Int) extends Bundle {
val size = UInt(4.W)
val source = UInt((sourceBits max 1).W)
}
case object AXI4TLState extends ControlKey[AXI4TLStateBundle]("tl_state")
case class AXI4TLStateField(sourceBits: Int) extends BundleField[AXI4TLStateBundle](AXI4TLState, Output(new AXI4TLStateBundle(sourceBits)), x => {
x.size := 0.U
x.source := 0.U
})
/** TLtoAXI4IdMap serves as a record for the translation performed between id spaces.
*
* Its member [axi4Masters] is used as the new AXI4MasterParameters in diplomacy.
* Its member [mapping] is used as the template for the circuit generated in TLToAXI4Node.module.
*/
class TLtoAXI4IdMap(tlPort: TLMasterPortParameters) extends IdMap[TLToAXI4IdMapEntry]
{
val tlMasters = tlPort.masters.sortBy(_.sourceId).sortWith(TLToAXI4.sortByType)
private val axi4IdSize = tlMasters.map { tl => if (tl.requestFifo) 1 else tl.sourceId.size }
private val axi4IdStart = axi4IdSize.scanLeft(0)(_+_).init
val axi4Masters = axi4IdStart.zip(axi4IdSize).zip(tlMasters).map { case ((start, size), tl) =>
AXI4MasterParameters(
name = tl.name,
id = IdRange(start, start+size),
aligned = true,
maxFlight = Some(if (tl.requestFifo) tl.sourceId.size else 1),
nodePath = tl.nodePath)
}
private val axi4IdEnd = axi4Masters.map(_.id.end).max
private val axiDigits = String.valueOf(axi4IdEnd-1).length()
private val tlDigits = String.valueOf(tlPort.endSourceId-1).length()
protected val fmt = s"\t[%${axiDigits}d, %${axiDigits}d) <= [%${tlDigits}d, %${tlDigits}d) %s%s%s"
val mapping: Seq[TLToAXI4IdMapEntry] = tlMasters.zip(axi4Masters).map { case (tl, axi) =>
TLToAXI4IdMapEntry(axi.id, tl.sourceId, tl.name, tl.supports.probe, tl.requestFifo)
}
}
case class TLToAXI4IdMapEntry(axi4Id: IdRange, tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = axi4Id
val maxTransactionsInFlight = Some(tlId.size)
}
case class TLToAXI4Node(wcorrupt: Boolean = true)(implicit valName: ValName) extends MixedAdapterNode(TLImp, AXI4Imp)(
dFn = { p =>
AXI4MasterPortParameters(
masters = (new TLtoAXI4IdMap(p)).axi4Masters,
requestFields = (if (wcorrupt) Seq(AMBACorruptField()) else Seq()) ++ p.requestFields.filter(!_.isInstanceOf[AMBAProtField]),
echoFields = AXI4TLStateField(log2Ceil(p.endSourceId)) +: p.echoFields,
responseKeys = p.responseKeys)
},
uFn = { p => TLSlavePortParameters.v1(
managers = p.slaves.map { case s =>
TLSlaveParameters.v1(
address = s.address,
resources = s.resources,
regionType = s.regionType,
executable = s.executable,
nodePath = s.nodePath,
supportsGet = s.supportsRead,
supportsPutFull = s.supportsWrite,
supportsPutPartial = s.supportsWrite,
fifoId = Some(0),
mayDenyPut = true,
mayDenyGet = true)},
beatBytes = p.beatBytes,
minLatency = p.minLatency,
responseFields = p.responseFields,
requestKeys = AMBAProt +: p.requestKeys)
})
// wcorrupt alone is not enough; a slave must include AMBACorrupt in the slave port's requestKeys
class TLToAXI4(val combinational: Boolean = true, val adapterName: Option[String] = None, val stripBits: Int = 0, val wcorrupt: Boolean = true)(implicit p: Parameters) extends LazyModule
{
require(stripBits == 0, "stripBits > 0 is no longer supported on TLToAXI4")
val node = TLToAXI4Node(wcorrupt)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
val slaves = edgeOut.slave.slaves
// All pairs of slaves must promise that they will never interleave data
require (slaves(0).interleavedId.isDefined)
slaves.foreach { s => require (s.interleavedId == slaves(0).interleavedId) }
// Construct the source=>ID mapping table
val map = new TLtoAXI4IdMap(edgeIn.client)
val sourceStall = WireDefault(VecInit.fill(edgeIn.client.endSourceId)(false.B))
val sourceTable = WireDefault(VecInit.fill(edgeIn.client.endSourceId)(0.U.asTypeOf(out.aw.bits.id)))
val idStall = WireDefault(VecInit.fill(edgeOut.master.endId)(false.B))
var idCount = Array.fill(edgeOut.master.endId) { None:Option[Int] }
map.mapping.foreach { case TLToAXI4IdMapEntry(axi4Id, tlId, _, _, fifo) =>
for (i <- 0 until tlId.size) {
val id = axi4Id.start + (if (fifo) 0 else i)
sourceStall(tlId.start + i) := idStall(id)
sourceTable(tlId.start + i) := id.U
}
if (fifo) { idCount(axi4Id.start) = Some(tlId.size) }
}
adapterName.foreach { n =>
println(s"$n AXI4-ID <= TL-Source mapping:\n${map.pretty}\n")
ElaborationArtefacts.add(s"$n.axi4.json", s"""{"mapping":[${map.mapping.mkString(",")}]}""")
}
// We need to keep the following state from A => D: (size, source)
// All of those fields could potentially require 0 bits (argh. Chisel.)
// We will pack all of that extra information into the echo bits.
require (log2Ceil(edgeIn.maxLgSize+1) <= 4)
val a_address = edgeIn.address(in.a.bits)
val a_source = in.a.bits.source
val a_size = edgeIn.size(in.a.bits)
val a_isPut = edgeIn.hasData(in.a.bits)
val (a_first, a_last, _) = edgeIn.firstlast(in.a)
val r_state = out.r.bits.echo(AXI4TLState)
val r_source = r_state.source
val r_size = r_state.size
val b_state = out.b.bits.echo(AXI4TLState)
val b_source = b_state.source
val b_size = b_state.size
// We need these Queues because AXI4 queues are irrevocable
val depth = if (combinational) 1 else 2
val out_arw = Wire(Decoupled(new AXI4BundleARW(out.params)))
val out_w = Wire(chiselTypeOf(out.w))
out.w :<>= Queue.irrevocable(out_w, entries=depth, flow=combinational)
val queue_arw = Queue.irrevocable(out_arw, entries=depth, flow=combinational)
// Fan out the ARW channel to AR and AW
out.ar.bits := queue_arw.bits
out.aw.bits := queue_arw.bits
out.ar.valid := queue_arw.valid && !queue_arw.bits.wen
out.aw.valid := queue_arw.valid && queue_arw.bits.wen
queue_arw.ready := Mux(queue_arw.bits.wen, out.aw.ready, out.ar.ready)
val beatBytes = edgeIn.manager.beatBytes
val maxSize = log2Ceil(beatBytes).U
val doneAW = RegInit(false.B)
when (in.a.fire) { doneAW := !a_last }
val arw = out_arw.bits
arw.wen := a_isPut
arw.id := sourceTable(a_source)
arw.addr := a_address
arw.len := UIntToOH1(a_size, AXI4Parameters.lenBits + log2Ceil(beatBytes)) >> log2Ceil(beatBytes)
arw.size := Mux(a_size >= maxSize, maxSize, a_size)
arw.burst := AXI4Parameters.BURST_INCR
arw.lock := 0.U // not exclusive (LR/SC unsupported b/c no forward progress guarantee)
arw.cache := 0.U // do not allow AXI to modify our transactions
arw.prot := AXI4Parameters.PROT_PRIVILEGED
arw.qos := 0.U // no QoS
Connectable.waiveUnmatched(arw.user, in.a.bits.user) match {
case (lhs, rhs) => lhs :<= rhs
}
Connectable.waiveUnmatched(arw.echo, in.a.bits.echo) match {
case (lhs, rhs) => lhs :<= rhs
}
val a_extra = arw.echo(AXI4TLState)
a_extra.source := a_source
a_extra.size := a_size
in.a.bits.user.lift(AMBAProt).foreach { x =>
val prot = Wire(Vec(3, Bool()))
val cache = Wire(Vec(4, Bool()))
prot(0) := x.privileged
prot(1) := !x.secure
prot(2) := x.fetch
cache(0) := x.bufferable
cache(1) := x.modifiable
cache(2) := x.readalloc
cache(3) := x.writealloc
arw.prot := Cat(prot.reverse)
arw.cache := Cat(cache.reverse)
}
val stall = sourceStall(in.a.bits.source) && a_first
in.a.ready := !stall && Mux(a_isPut, (doneAW || out_arw.ready) && out_w.ready, out_arw.ready)
out_arw.valid := !stall && in.a.valid && Mux(a_isPut, !doneAW && out_w.ready, true.B)
out_w.valid := !stall && in.a.valid && a_isPut && (doneAW || out_arw.ready)
out_w.bits.data := in.a.bits.data
out_w.bits.strb := in.a.bits.mask
out_w.bits.last := a_last
out_w.bits.user.lift(AMBACorrupt).foreach { _ := in.a.bits.corrupt }
// R and B => D arbitration
val r_holds_d = RegInit(false.B)
when (out.r.fire) { r_holds_d := !out.r.bits.last }
// Give R higher priority than B, unless B has been delayed for 8 cycles
val b_delay = Reg(UInt(3.W))
when (out.b.valid && !out.b.ready) {
b_delay := b_delay + 1.U
} .otherwise {
b_delay := 0.U
}
val r_wins = (out.r.valid && b_delay =/= 7.U) || r_holds_d
out.r.ready := in.d.ready && r_wins
out.b.ready := in.d.ready && !r_wins
in.d.valid := Mux(r_wins, out.r.valid, out.b.valid)
// If the first beat of the AXI RRESP is RESP_DECERR, treat this as a denied
// request. We must pulse extend this value as AXI is allowed to change the
// value of RRESP on every beat, and ChipLink may not.
val r_first = RegInit(true.B)
when (out.r.fire) { r_first := out.r.bits.last }
val r_denied = out.r.bits.resp === AXI4Parameters.RESP_DECERR holdUnless r_first
val r_corrupt = out.r.bits.resp =/= AXI4Parameters.RESP_OKAY
val b_denied = out.b.bits.resp =/= AXI4Parameters.RESP_OKAY
val r_d = edgeIn.AccessAck(r_source, r_size, 0.U, denied = r_denied, corrupt = r_corrupt || r_denied)
val b_d = edgeIn.AccessAck(b_source, b_size, denied = b_denied)
Connectable.waiveUnmatched(r_d.user, out.r.bits.user) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
Connectable.waiveUnmatched(r_d.echo, out.r.bits.echo) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
Connectable.waiveUnmatched(b_d.user, out.b.bits.user) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
Connectable.waiveUnmatched(b_d.echo, out.b.bits.echo) match {
case (lhs, rhs) => lhs.squeezeAll :<= rhs.squeezeAll
}
in.d.bits := Mux(r_wins, r_d, b_d)
in.d.bits.data := out.r.bits.data // avoid a costly Mux
// We need to track if any reads or writes are inflight for a given ID.
// If the opposite type arrives, we must stall until it completes.
val a_sel = UIntToOH(arw.id, edgeOut.master.endId).asBools
val d_sel = UIntToOH(Mux(r_wins, out.r.bits.id, out.b.bits.id), edgeOut.master.endId).asBools
val d_last = Mux(r_wins, out.r.bits.last, true.B)
// If FIFO was requested, ensure that R+W ordering is preserved
(a_sel zip d_sel zip idStall zip idCount) foreach { case (((as, ds), s), n) =>
// AXI does not guarantee read vs. write ordering. In particular, if we
// are in the middle of receiving a read burst and then issue a write,
// the write might affect the read burst. This violates FIFO behaviour.
// To solve this, we must wait until the last beat of a burst, but this
// means that a TileLink master which performs early source reuse can
// have one more transaction inflight than we promised AXI; stall it too.
val maxCount = n.getOrElse(1)
val count = RegInit(0.U(log2Ceil(maxCount + 1).W))
val write = Reg(Bool())
val idle = count === 0.U
val inc = as && out_arw.fire
val dec = ds && d_last && in.d.fire
count := count + inc.asUInt - dec.asUInt
assert (!dec || count =/= 0.U) // underflow
assert (!inc || count =/= maxCount.U) // overflow
when (inc) { write := arw.wen }
// If only one transaction can be inflight, it can't mismatch
val mismatch = if (maxCount > 1) { write =/= arw.wen } else { false.B }
s := (!idle && mismatch) || (count === maxCount.U)
}
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
}
}
}
object TLToAXI4
{
def apply(combinational: Boolean = true, adapterName: Option[String] = None, stripBits: Int = 0, wcorrupt: Boolean = true)(implicit p: Parameters) =
{
val tl2axi4 = LazyModule(new TLToAXI4(combinational, adapterName, stripBits, wcorrupt))
tl2axi4.node
}
def sortByType(a: TLMasterParameters, b: TLMasterParameters): Boolean = {
if ( a.supports.probe && !b.supports.probe) return false
if (!a.supports.probe && b.supports.probe) return true
if ( a.requestFifo && !b.requestFifo ) return false
if (!a.requestFifo && b.requestFifo ) return true
return false
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File UserYanker.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.amba.axi4
import chisel3._
import chisel3.util.{Queue, QueueIO, UIntToOH}
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp}
import freechips.rocketchip.util.BundleMap
/** This adapter prunes all user bit fields of the echo type from request messages,
* storing them in queues and echoing them back when matching response messages are received.
*
* It also optionally rate limits the number of transactions that can be in flight simultaneously
* per FIFO domain / A[W|R]ID.
*
* @param capMaxFlight is an optional maximum number of transactions that can be in flight per A[W|R]ID.
*/
class AXI4UserYanker(capMaxFlight: Option[Int] = None)(implicit p: Parameters) extends LazyModule
{
val node = AXI4AdapterNode(
masterFn = { mp => mp.copy(
masters = mp.masters.map { m => m.copy(
maxFlight = (m.maxFlight, capMaxFlight) match {
case (Some(x), Some(y)) => Some(x min y)
case (Some(x), None) => Some(x)
case (None, Some(y)) => Some(y)
case (None, None) => None })},
echoFields = Nil)},
slaveFn = { sp => sp })
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// Which fields are we stripping?
val echoFields = edgeIn.master.echoFields
val need_bypass = edgeOut.slave.minLatency < 1
edgeOut.master.masters.foreach { m =>
require (m.maxFlight.isDefined, "UserYanker needs a flight cap on each ID")
}
def queue(id: Int) = {
val depth = edgeOut.master.masters.find(_.id.contains(id)).flatMap(_.maxFlight).getOrElse(0)
if (depth == 0) {
Wire(new QueueIO(BundleMap(echoFields), 1)) // unused ID => undefined value
} else {
Module(new Queue(BundleMap(echoFields), depth, flow=need_bypass)).io
}
}
val rqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val wqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) }
val arid = in.ar.bits.id
val ar_ready = VecInit(rqueues.map(_.enq.ready))(arid)
in .ar.ready := out.ar.ready && ar_ready
out.ar.valid := in .ar.valid && ar_ready
Connectable.waiveUnmatched(out.ar.bits, in.ar.bits) match {
case (lhs, rhs) => lhs :<= rhs
}
val rid = out.r.bits.id
val r_valid = VecInit(rqueues.map(_.deq.valid))(rid)
val r_bits = VecInit(rqueues.map(_.deq.bits))(rid)
assert (!out.r.valid || r_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.r, out.r) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.r.bits.echo :<= r_bits
val arsel = UIntToOH(arid, edgeIn.master.endId).asBools
val rsel = UIntToOH(rid, edgeIn.master.endId).asBools
(rqueues zip (arsel zip rsel)) foreach { case (q, (ar, r)) =>
q.deq.ready := out.r .valid && in .r .ready && r && out.r.bits.last
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .ar.valid && out.ar.ready && ar
q.enq.ready := DontCare
q.enq.bits :<>= in.ar.bits.echo
q.count := DontCare
}
val awid = in.aw.bits.id
val aw_ready = VecInit(wqueues.map(_.enq.ready))(awid)
in .aw.ready := out.aw.ready && aw_ready
out.aw.valid := in .aw.valid && aw_ready
Connectable.waiveUnmatched(out.aw.bits, in.aw.bits) match {
case (lhs, rhs) => lhs :<>= rhs
}
val bid = out.b.bits.id
val b_valid = VecInit(wqueues.map(_.deq.valid))(bid)
val b_bits = VecInit(wqueues.map(_.deq.bits))(bid)
assert (!out.b.valid || b_valid) // Q must be ready faster than the response
Connectable.waiveUnmatched(in.b, out.b) match {
case (lhs, rhs) => lhs :<>= rhs
}
in.b.bits.echo :<>= b_bits
val awsel = UIntToOH(awid, edgeIn.master.endId).asBools
val bsel = UIntToOH(bid, edgeIn.master.endId).asBools
(wqueues zip (awsel zip bsel)) foreach { case (q, (aw, b)) =>
q.deq.ready := out.b .valid && in .b .ready && b
q.deq.valid := DontCare
q.deq.bits := DontCare
q.enq.valid := in .aw.valid && out.aw.ready && aw
q.enq.ready := DontCare
q.enq.bits :<>= in.aw.bits.echo
q.count := DontCare
}
out.w :<>= in.w
}
}
}
object AXI4UserYanker
{
def apply(capMaxFlight: Option[Int] = None)(implicit p: Parameters): AXI4Node =
{
val axi4yank = LazyModule(new AXI4UserYanker(capMaxFlight))
axi4yank.node
}
}
| module TLInterconnectCoupler_mbus_to_memory_controller_port_named_axi4( // @[LazyModuleImp.scala:138:7]
input clock, // @[LazyModuleImp.scala:138:7]
input reset, // @[LazyModuleImp.scala:138:7]
output auto_widget_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_widget_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_widget_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_widget_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_widget_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_widget_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_widget_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_widget_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_widget_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_widget_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_widget_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_widget_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_widget_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_widget_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_widget_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_widget_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_widget_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_widget_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_axi4yank_out_aw_ready, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_aw_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_axi4yank_out_aw_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_axi4yank_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_axi4yank_out_aw_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_axi4yank_out_aw_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_axi4yank_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_axi4yank_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_axi4yank_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_axi4yank_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25]
input auto_axi4yank_out_w_ready, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_w_valid, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_axi4yank_out_w_bits_data, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_axi4yank_out_w_bits_strb, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_w_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_b_ready, // @[LazyModuleImp.scala:107:25]
input auto_axi4yank_out_b_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_axi4yank_out_b_bits_id, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_axi4yank_out_b_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_axi4yank_out_ar_ready, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_ar_valid, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_axi4yank_out_ar_bits_id, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_axi4yank_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_axi4yank_out_ar_bits_len, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_axi4yank_out_ar_bits_size, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_axi4yank_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_axi4yank_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_axi4yank_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_axi4yank_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25]
output auto_axi4yank_out_r_ready, // @[LazyModuleImp.scala:107:25]
input auto_axi4yank_out_r_valid, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_axi4yank_out_r_bits_id, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_axi4yank_out_r_bits_data, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_axi4yank_out_r_bits_resp, // @[LazyModuleImp.scala:107:25]
input auto_axi4yank_out_r_bits_last, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_tl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_tl_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_tl_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_tl_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_tl_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_tl_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_tl_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_tl_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_tl_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire _tl2axi4_auto_out_aw_valid; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_aw_bits_id; // @[ToAXI4.scala:301:29]
wire [31:0] _tl2axi4_auto_out_aw_bits_addr; // @[ToAXI4.scala:301:29]
wire [7:0] _tl2axi4_auto_out_aw_bits_len; // @[ToAXI4.scala:301:29]
wire [2:0] _tl2axi4_auto_out_aw_bits_size; // @[ToAXI4.scala:301:29]
wire [1:0] _tl2axi4_auto_out_aw_bits_burst; // @[ToAXI4.scala:301:29]
wire _tl2axi4_auto_out_aw_bits_lock; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_aw_bits_cache; // @[ToAXI4.scala:301:29]
wire [2:0] _tl2axi4_auto_out_aw_bits_prot; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_aw_bits_qos; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_aw_bits_echo_tl_state_size; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_aw_bits_echo_tl_state_source; // @[ToAXI4.scala:301:29]
wire _tl2axi4_auto_out_w_valid; // @[ToAXI4.scala:301:29]
wire [63:0] _tl2axi4_auto_out_w_bits_data; // @[ToAXI4.scala:301:29]
wire [7:0] _tl2axi4_auto_out_w_bits_strb; // @[ToAXI4.scala:301:29]
wire _tl2axi4_auto_out_w_bits_last; // @[ToAXI4.scala:301:29]
wire _tl2axi4_auto_out_b_ready; // @[ToAXI4.scala:301:29]
wire _tl2axi4_auto_out_ar_valid; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_ar_bits_id; // @[ToAXI4.scala:301:29]
wire [31:0] _tl2axi4_auto_out_ar_bits_addr; // @[ToAXI4.scala:301:29]
wire [7:0] _tl2axi4_auto_out_ar_bits_len; // @[ToAXI4.scala:301:29]
wire [2:0] _tl2axi4_auto_out_ar_bits_size; // @[ToAXI4.scala:301:29]
wire [1:0] _tl2axi4_auto_out_ar_bits_burst; // @[ToAXI4.scala:301:29]
wire _tl2axi4_auto_out_ar_bits_lock; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_ar_bits_cache; // @[ToAXI4.scala:301:29]
wire [2:0] _tl2axi4_auto_out_ar_bits_prot; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_ar_bits_qos; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_ar_bits_echo_tl_state_size; // @[ToAXI4.scala:301:29]
wire [3:0] _tl2axi4_auto_out_ar_bits_echo_tl_state_source; // @[ToAXI4.scala:301:29]
wire _tl2axi4_auto_out_r_ready; // @[ToAXI4.scala:301:29]
wire _axi4index_auto_in_aw_ready; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_in_w_ready; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_in_b_valid; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_in_b_bits_id; // @[IdIndexer.scala:108:31]
wire [1:0] _axi4index_auto_in_b_bits_resp; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_in_b_bits_echo_tl_state_size; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_in_b_bits_echo_tl_state_source; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_in_ar_ready; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_in_r_valid; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_in_r_bits_id; // @[IdIndexer.scala:108:31]
wire [63:0] _axi4index_auto_in_r_bits_data; // @[IdIndexer.scala:108:31]
wire [1:0] _axi4index_auto_in_r_bits_resp; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_in_r_bits_echo_tl_state_size; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_in_r_bits_echo_tl_state_source; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_in_r_bits_last; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_aw_valid; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_aw_bits_id; // @[IdIndexer.scala:108:31]
wire [31:0] _axi4index_auto_out_aw_bits_addr; // @[IdIndexer.scala:108:31]
wire [7:0] _axi4index_auto_out_aw_bits_len; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_aw_bits_size; // @[IdIndexer.scala:108:31]
wire [1:0] _axi4index_auto_out_aw_bits_burst; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_aw_bits_lock; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_aw_bits_cache; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_aw_bits_prot; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_aw_bits_qos; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_aw_bits_echo_tl_state_size; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_aw_bits_echo_tl_state_source; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_w_valid; // @[IdIndexer.scala:108:31]
wire [63:0] _axi4index_auto_out_w_bits_data; // @[IdIndexer.scala:108:31]
wire [7:0] _axi4index_auto_out_w_bits_strb; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_w_bits_last; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_b_ready; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_ar_valid; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_ar_bits_id; // @[IdIndexer.scala:108:31]
wire [31:0] _axi4index_auto_out_ar_bits_addr; // @[IdIndexer.scala:108:31]
wire [7:0] _axi4index_auto_out_ar_bits_len; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_ar_bits_size; // @[IdIndexer.scala:108:31]
wire [1:0] _axi4index_auto_out_ar_bits_burst; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_ar_bits_lock; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_ar_bits_cache; // @[IdIndexer.scala:108:31]
wire [2:0] _axi4index_auto_out_ar_bits_prot; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_ar_bits_qos; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_ar_bits_echo_tl_state_size; // @[IdIndexer.scala:108:31]
wire [3:0] _axi4index_auto_out_ar_bits_echo_tl_state_source; // @[IdIndexer.scala:108:31]
wire _axi4index_auto_out_r_ready; // @[IdIndexer.scala:108:31]
wire _axi4yank_auto_in_aw_ready; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_w_ready; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_b_valid; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_in_b_bits_id; // @[UserYanker.scala:125:30]
wire [1:0] _axi4yank_auto_in_b_bits_resp; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_in_b_bits_echo_tl_state_size; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_in_b_bits_echo_tl_state_source; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_ar_ready; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_r_valid; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_in_r_bits_id; // @[UserYanker.scala:125:30]
wire [63:0] _axi4yank_auto_in_r_bits_data; // @[UserYanker.scala:125:30]
wire [1:0] _axi4yank_auto_in_r_bits_resp; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_in_r_bits_echo_tl_state_size; // @[UserYanker.scala:125:30]
wire [3:0] _axi4yank_auto_in_r_bits_echo_tl_state_source; // @[UserYanker.scala:125:30]
wire _axi4yank_auto_in_r_bits_last; // @[UserYanker.scala:125:30]
AXI4UserYanker axi4yank ( // @[UserYanker.scala:125:30]
.clock (clock),
.reset (reset),
.auto_in_aw_ready (_axi4yank_auto_in_aw_ready),
.auto_in_aw_valid (_axi4index_auto_out_aw_valid), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_id (_axi4index_auto_out_aw_bits_id), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_addr (_axi4index_auto_out_aw_bits_addr), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_len (_axi4index_auto_out_aw_bits_len), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_size (_axi4index_auto_out_aw_bits_size), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_burst (_axi4index_auto_out_aw_bits_burst), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_lock (_axi4index_auto_out_aw_bits_lock), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_cache (_axi4index_auto_out_aw_bits_cache), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_prot (_axi4index_auto_out_aw_bits_prot), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_qos (_axi4index_auto_out_aw_bits_qos), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_echo_tl_state_size (_axi4index_auto_out_aw_bits_echo_tl_state_size), // @[IdIndexer.scala:108:31]
.auto_in_aw_bits_echo_tl_state_source (_axi4index_auto_out_aw_bits_echo_tl_state_source), // @[IdIndexer.scala:108:31]
.auto_in_w_ready (_axi4yank_auto_in_w_ready),
.auto_in_w_valid (_axi4index_auto_out_w_valid), // @[IdIndexer.scala:108:31]
.auto_in_w_bits_data (_axi4index_auto_out_w_bits_data), // @[IdIndexer.scala:108:31]
.auto_in_w_bits_strb (_axi4index_auto_out_w_bits_strb), // @[IdIndexer.scala:108:31]
.auto_in_w_bits_last (_axi4index_auto_out_w_bits_last), // @[IdIndexer.scala:108:31]
.auto_in_b_ready (_axi4index_auto_out_b_ready), // @[IdIndexer.scala:108:31]
.auto_in_b_valid (_axi4yank_auto_in_b_valid),
.auto_in_b_bits_id (_axi4yank_auto_in_b_bits_id),
.auto_in_b_bits_resp (_axi4yank_auto_in_b_bits_resp),
.auto_in_b_bits_echo_tl_state_size (_axi4yank_auto_in_b_bits_echo_tl_state_size),
.auto_in_b_bits_echo_tl_state_source (_axi4yank_auto_in_b_bits_echo_tl_state_source),
.auto_in_ar_ready (_axi4yank_auto_in_ar_ready),
.auto_in_ar_valid (_axi4index_auto_out_ar_valid), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_id (_axi4index_auto_out_ar_bits_id), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_addr (_axi4index_auto_out_ar_bits_addr), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_len (_axi4index_auto_out_ar_bits_len), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_size (_axi4index_auto_out_ar_bits_size), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_burst (_axi4index_auto_out_ar_bits_burst), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_lock (_axi4index_auto_out_ar_bits_lock), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_cache (_axi4index_auto_out_ar_bits_cache), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_prot (_axi4index_auto_out_ar_bits_prot), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_qos (_axi4index_auto_out_ar_bits_qos), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_echo_tl_state_size (_axi4index_auto_out_ar_bits_echo_tl_state_size), // @[IdIndexer.scala:108:31]
.auto_in_ar_bits_echo_tl_state_source (_axi4index_auto_out_ar_bits_echo_tl_state_source), // @[IdIndexer.scala:108:31]
.auto_in_r_ready (_axi4index_auto_out_r_ready), // @[IdIndexer.scala:108:31]
.auto_in_r_valid (_axi4yank_auto_in_r_valid),
.auto_in_r_bits_id (_axi4yank_auto_in_r_bits_id),
.auto_in_r_bits_data (_axi4yank_auto_in_r_bits_data),
.auto_in_r_bits_resp (_axi4yank_auto_in_r_bits_resp),
.auto_in_r_bits_echo_tl_state_size (_axi4yank_auto_in_r_bits_echo_tl_state_size),
.auto_in_r_bits_echo_tl_state_source (_axi4yank_auto_in_r_bits_echo_tl_state_source),
.auto_in_r_bits_last (_axi4yank_auto_in_r_bits_last),
.auto_out_aw_ready (auto_axi4yank_out_aw_ready),
.auto_out_aw_valid (auto_axi4yank_out_aw_valid),
.auto_out_aw_bits_id (auto_axi4yank_out_aw_bits_id),
.auto_out_aw_bits_addr (auto_axi4yank_out_aw_bits_addr),
.auto_out_aw_bits_len (auto_axi4yank_out_aw_bits_len),
.auto_out_aw_bits_size (auto_axi4yank_out_aw_bits_size),
.auto_out_aw_bits_burst (auto_axi4yank_out_aw_bits_burst),
.auto_out_aw_bits_lock (auto_axi4yank_out_aw_bits_lock),
.auto_out_aw_bits_cache (auto_axi4yank_out_aw_bits_cache),
.auto_out_aw_bits_prot (auto_axi4yank_out_aw_bits_prot),
.auto_out_aw_bits_qos (auto_axi4yank_out_aw_bits_qos),
.auto_out_w_ready (auto_axi4yank_out_w_ready),
.auto_out_w_valid (auto_axi4yank_out_w_valid),
.auto_out_w_bits_data (auto_axi4yank_out_w_bits_data),
.auto_out_w_bits_strb (auto_axi4yank_out_w_bits_strb),
.auto_out_w_bits_last (auto_axi4yank_out_w_bits_last),
.auto_out_b_ready (auto_axi4yank_out_b_ready),
.auto_out_b_valid (auto_axi4yank_out_b_valid),
.auto_out_b_bits_id (auto_axi4yank_out_b_bits_id),
.auto_out_b_bits_resp (auto_axi4yank_out_b_bits_resp),
.auto_out_ar_ready (auto_axi4yank_out_ar_ready),
.auto_out_ar_valid (auto_axi4yank_out_ar_valid),
.auto_out_ar_bits_id (auto_axi4yank_out_ar_bits_id),
.auto_out_ar_bits_addr (auto_axi4yank_out_ar_bits_addr),
.auto_out_ar_bits_len (auto_axi4yank_out_ar_bits_len),
.auto_out_ar_bits_size (auto_axi4yank_out_ar_bits_size),
.auto_out_ar_bits_burst (auto_axi4yank_out_ar_bits_burst),
.auto_out_ar_bits_lock (auto_axi4yank_out_ar_bits_lock),
.auto_out_ar_bits_cache (auto_axi4yank_out_ar_bits_cache),
.auto_out_ar_bits_prot (auto_axi4yank_out_ar_bits_prot),
.auto_out_ar_bits_qos (auto_axi4yank_out_ar_bits_qos),
.auto_out_r_ready (auto_axi4yank_out_r_ready),
.auto_out_r_valid (auto_axi4yank_out_r_valid),
.auto_out_r_bits_id (auto_axi4yank_out_r_bits_id),
.auto_out_r_bits_data (auto_axi4yank_out_r_bits_data),
.auto_out_r_bits_resp (auto_axi4yank_out_r_bits_resp),
.auto_out_r_bits_last (auto_axi4yank_out_r_bits_last)
); // @[UserYanker.scala:125:30]
AXI4IdIndexer axi4index ( // @[IdIndexer.scala:108:31]
.auto_in_aw_ready (_axi4index_auto_in_aw_ready),
.auto_in_aw_valid (_tl2axi4_auto_out_aw_valid), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_id (_tl2axi4_auto_out_aw_bits_id), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_addr (_tl2axi4_auto_out_aw_bits_addr), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_len (_tl2axi4_auto_out_aw_bits_len), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_size (_tl2axi4_auto_out_aw_bits_size), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_burst (_tl2axi4_auto_out_aw_bits_burst), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_lock (_tl2axi4_auto_out_aw_bits_lock), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_cache (_tl2axi4_auto_out_aw_bits_cache), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_prot (_tl2axi4_auto_out_aw_bits_prot), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_qos (_tl2axi4_auto_out_aw_bits_qos), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_echo_tl_state_size (_tl2axi4_auto_out_aw_bits_echo_tl_state_size), // @[ToAXI4.scala:301:29]
.auto_in_aw_bits_echo_tl_state_source (_tl2axi4_auto_out_aw_bits_echo_tl_state_source), // @[ToAXI4.scala:301:29]
.auto_in_w_ready (_axi4index_auto_in_w_ready),
.auto_in_w_valid (_tl2axi4_auto_out_w_valid), // @[ToAXI4.scala:301:29]
.auto_in_w_bits_data (_tl2axi4_auto_out_w_bits_data), // @[ToAXI4.scala:301:29]
.auto_in_w_bits_strb (_tl2axi4_auto_out_w_bits_strb), // @[ToAXI4.scala:301:29]
.auto_in_w_bits_last (_tl2axi4_auto_out_w_bits_last), // @[ToAXI4.scala:301:29]
.auto_in_b_ready (_tl2axi4_auto_out_b_ready), // @[ToAXI4.scala:301:29]
.auto_in_b_valid (_axi4index_auto_in_b_valid),
.auto_in_b_bits_id (_axi4index_auto_in_b_bits_id),
.auto_in_b_bits_resp (_axi4index_auto_in_b_bits_resp),
.auto_in_b_bits_echo_tl_state_size (_axi4index_auto_in_b_bits_echo_tl_state_size),
.auto_in_b_bits_echo_tl_state_source (_axi4index_auto_in_b_bits_echo_tl_state_source),
.auto_in_ar_ready (_axi4index_auto_in_ar_ready),
.auto_in_ar_valid (_tl2axi4_auto_out_ar_valid), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_id (_tl2axi4_auto_out_ar_bits_id), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_addr (_tl2axi4_auto_out_ar_bits_addr), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_len (_tl2axi4_auto_out_ar_bits_len), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_size (_tl2axi4_auto_out_ar_bits_size), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_burst (_tl2axi4_auto_out_ar_bits_burst), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_lock (_tl2axi4_auto_out_ar_bits_lock), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_cache (_tl2axi4_auto_out_ar_bits_cache), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_prot (_tl2axi4_auto_out_ar_bits_prot), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_qos (_tl2axi4_auto_out_ar_bits_qos), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_echo_tl_state_size (_tl2axi4_auto_out_ar_bits_echo_tl_state_size), // @[ToAXI4.scala:301:29]
.auto_in_ar_bits_echo_tl_state_source (_tl2axi4_auto_out_ar_bits_echo_tl_state_source), // @[ToAXI4.scala:301:29]
.auto_in_r_ready (_tl2axi4_auto_out_r_ready), // @[ToAXI4.scala:301:29]
.auto_in_r_valid (_axi4index_auto_in_r_valid),
.auto_in_r_bits_id (_axi4index_auto_in_r_bits_id),
.auto_in_r_bits_data (_axi4index_auto_in_r_bits_data),
.auto_in_r_bits_resp (_axi4index_auto_in_r_bits_resp),
.auto_in_r_bits_echo_tl_state_size (_axi4index_auto_in_r_bits_echo_tl_state_size),
.auto_in_r_bits_echo_tl_state_source (_axi4index_auto_in_r_bits_echo_tl_state_source),
.auto_in_r_bits_last (_axi4index_auto_in_r_bits_last),
.auto_out_aw_ready (_axi4yank_auto_in_aw_ready), // @[UserYanker.scala:125:30]
.auto_out_aw_valid (_axi4index_auto_out_aw_valid),
.auto_out_aw_bits_id (_axi4index_auto_out_aw_bits_id),
.auto_out_aw_bits_addr (_axi4index_auto_out_aw_bits_addr),
.auto_out_aw_bits_len (_axi4index_auto_out_aw_bits_len),
.auto_out_aw_bits_size (_axi4index_auto_out_aw_bits_size),
.auto_out_aw_bits_burst (_axi4index_auto_out_aw_bits_burst),
.auto_out_aw_bits_lock (_axi4index_auto_out_aw_bits_lock),
.auto_out_aw_bits_cache (_axi4index_auto_out_aw_bits_cache),
.auto_out_aw_bits_prot (_axi4index_auto_out_aw_bits_prot),
.auto_out_aw_bits_qos (_axi4index_auto_out_aw_bits_qos),
.auto_out_aw_bits_echo_tl_state_size (_axi4index_auto_out_aw_bits_echo_tl_state_size),
.auto_out_aw_bits_echo_tl_state_source (_axi4index_auto_out_aw_bits_echo_tl_state_source),
.auto_out_w_ready (_axi4yank_auto_in_w_ready), // @[UserYanker.scala:125:30]
.auto_out_w_valid (_axi4index_auto_out_w_valid),
.auto_out_w_bits_data (_axi4index_auto_out_w_bits_data),
.auto_out_w_bits_strb (_axi4index_auto_out_w_bits_strb),
.auto_out_w_bits_last (_axi4index_auto_out_w_bits_last),
.auto_out_b_ready (_axi4index_auto_out_b_ready),
.auto_out_b_valid (_axi4yank_auto_in_b_valid), // @[UserYanker.scala:125:30]
.auto_out_b_bits_id (_axi4yank_auto_in_b_bits_id), // @[UserYanker.scala:125:30]
.auto_out_b_bits_resp (_axi4yank_auto_in_b_bits_resp), // @[UserYanker.scala:125:30]
.auto_out_b_bits_echo_tl_state_size (_axi4yank_auto_in_b_bits_echo_tl_state_size), // @[UserYanker.scala:125:30]
.auto_out_b_bits_echo_tl_state_source (_axi4yank_auto_in_b_bits_echo_tl_state_source), // @[UserYanker.scala:125:30]
.auto_out_ar_ready (_axi4yank_auto_in_ar_ready), // @[UserYanker.scala:125:30]
.auto_out_ar_valid (_axi4index_auto_out_ar_valid),
.auto_out_ar_bits_id (_axi4index_auto_out_ar_bits_id),
.auto_out_ar_bits_addr (_axi4index_auto_out_ar_bits_addr),
.auto_out_ar_bits_len (_axi4index_auto_out_ar_bits_len),
.auto_out_ar_bits_size (_axi4index_auto_out_ar_bits_size),
.auto_out_ar_bits_burst (_axi4index_auto_out_ar_bits_burst),
.auto_out_ar_bits_lock (_axi4index_auto_out_ar_bits_lock),
.auto_out_ar_bits_cache (_axi4index_auto_out_ar_bits_cache),
.auto_out_ar_bits_prot (_axi4index_auto_out_ar_bits_prot),
.auto_out_ar_bits_qos (_axi4index_auto_out_ar_bits_qos),
.auto_out_ar_bits_echo_tl_state_size (_axi4index_auto_out_ar_bits_echo_tl_state_size),
.auto_out_ar_bits_echo_tl_state_source (_axi4index_auto_out_ar_bits_echo_tl_state_source),
.auto_out_r_ready (_axi4index_auto_out_r_ready),
.auto_out_r_valid (_axi4yank_auto_in_r_valid), // @[UserYanker.scala:125:30]
.auto_out_r_bits_id (_axi4yank_auto_in_r_bits_id), // @[UserYanker.scala:125:30]
.auto_out_r_bits_data (_axi4yank_auto_in_r_bits_data), // @[UserYanker.scala:125:30]
.auto_out_r_bits_resp (_axi4yank_auto_in_r_bits_resp), // @[UserYanker.scala:125:30]
.auto_out_r_bits_echo_tl_state_size (_axi4yank_auto_in_r_bits_echo_tl_state_size), // @[UserYanker.scala:125:30]
.auto_out_r_bits_echo_tl_state_source (_axi4yank_auto_in_r_bits_echo_tl_state_source), // @[UserYanker.scala:125:30]
.auto_out_r_bits_last (_axi4yank_auto_in_r_bits_last) // @[UserYanker.scala:125:30]
); // @[IdIndexer.scala:108:31]
TLToAXI4_1 tl2axi4 ( // @[ToAXI4.scala:301:29]
.clock (clock),
.reset (reset),
.auto_in_a_ready (auto_widget_anon_in_a_ready),
.auto_in_a_valid (auto_widget_anon_in_a_valid),
.auto_in_a_bits_opcode (auto_widget_anon_in_a_bits_opcode),
.auto_in_a_bits_param (auto_widget_anon_in_a_bits_param),
.auto_in_a_bits_size (auto_widget_anon_in_a_bits_size),
.auto_in_a_bits_source (auto_widget_anon_in_a_bits_source),
.auto_in_a_bits_address (auto_widget_anon_in_a_bits_address),
.auto_in_a_bits_mask (auto_widget_anon_in_a_bits_mask),
.auto_in_a_bits_data (auto_widget_anon_in_a_bits_data),
.auto_in_a_bits_corrupt (auto_widget_anon_in_a_bits_corrupt),
.auto_in_d_ready (auto_widget_anon_in_d_ready),
.auto_in_d_valid (auto_widget_anon_in_d_valid),
.auto_in_d_bits_opcode (auto_widget_anon_in_d_bits_opcode),
.auto_in_d_bits_size (auto_widget_anon_in_d_bits_size),
.auto_in_d_bits_source (auto_widget_anon_in_d_bits_source),
.auto_in_d_bits_denied (auto_widget_anon_in_d_bits_denied),
.auto_in_d_bits_data (auto_widget_anon_in_d_bits_data),
.auto_in_d_bits_corrupt (auto_widget_anon_in_d_bits_corrupt),
.auto_out_aw_ready (_axi4index_auto_in_aw_ready), // @[IdIndexer.scala:108:31]
.auto_out_aw_valid (_tl2axi4_auto_out_aw_valid),
.auto_out_aw_bits_id (_tl2axi4_auto_out_aw_bits_id),
.auto_out_aw_bits_addr (_tl2axi4_auto_out_aw_bits_addr),
.auto_out_aw_bits_len (_tl2axi4_auto_out_aw_bits_len),
.auto_out_aw_bits_size (_tl2axi4_auto_out_aw_bits_size),
.auto_out_aw_bits_burst (_tl2axi4_auto_out_aw_bits_burst),
.auto_out_aw_bits_lock (_tl2axi4_auto_out_aw_bits_lock),
.auto_out_aw_bits_cache (_tl2axi4_auto_out_aw_bits_cache),
.auto_out_aw_bits_prot (_tl2axi4_auto_out_aw_bits_prot),
.auto_out_aw_bits_qos (_tl2axi4_auto_out_aw_bits_qos),
.auto_out_aw_bits_echo_tl_state_size (_tl2axi4_auto_out_aw_bits_echo_tl_state_size),
.auto_out_aw_bits_echo_tl_state_source (_tl2axi4_auto_out_aw_bits_echo_tl_state_source),
.auto_out_w_ready (_axi4index_auto_in_w_ready), // @[IdIndexer.scala:108:31]
.auto_out_w_valid (_tl2axi4_auto_out_w_valid),
.auto_out_w_bits_data (_tl2axi4_auto_out_w_bits_data),
.auto_out_w_bits_strb (_tl2axi4_auto_out_w_bits_strb),
.auto_out_w_bits_last (_tl2axi4_auto_out_w_bits_last),
.auto_out_b_ready (_tl2axi4_auto_out_b_ready),
.auto_out_b_valid (_axi4index_auto_in_b_valid), // @[IdIndexer.scala:108:31]
.auto_out_b_bits_id (_axi4index_auto_in_b_bits_id), // @[IdIndexer.scala:108:31]
.auto_out_b_bits_resp (_axi4index_auto_in_b_bits_resp), // @[IdIndexer.scala:108:31]
.auto_out_b_bits_echo_tl_state_size (_axi4index_auto_in_b_bits_echo_tl_state_size), // @[IdIndexer.scala:108:31]
.auto_out_b_bits_echo_tl_state_source (_axi4index_auto_in_b_bits_echo_tl_state_source), // @[IdIndexer.scala:108:31]
.auto_out_ar_ready (_axi4index_auto_in_ar_ready), // @[IdIndexer.scala:108:31]
.auto_out_ar_valid (_tl2axi4_auto_out_ar_valid),
.auto_out_ar_bits_id (_tl2axi4_auto_out_ar_bits_id),
.auto_out_ar_bits_addr (_tl2axi4_auto_out_ar_bits_addr),
.auto_out_ar_bits_len (_tl2axi4_auto_out_ar_bits_len),
.auto_out_ar_bits_size (_tl2axi4_auto_out_ar_bits_size),
.auto_out_ar_bits_burst (_tl2axi4_auto_out_ar_bits_burst),
.auto_out_ar_bits_lock (_tl2axi4_auto_out_ar_bits_lock),
.auto_out_ar_bits_cache (_tl2axi4_auto_out_ar_bits_cache),
.auto_out_ar_bits_prot (_tl2axi4_auto_out_ar_bits_prot),
.auto_out_ar_bits_qos (_tl2axi4_auto_out_ar_bits_qos),
.auto_out_ar_bits_echo_tl_state_size (_tl2axi4_auto_out_ar_bits_echo_tl_state_size),
.auto_out_ar_bits_echo_tl_state_source (_tl2axi4_auto_out_ar_bits_echo_tl_state_source),
.auto_out_r_ready (_tl2axi4_auto_out_r_ready),
.auto_out_r_valid (_axi4index_auto_in_r_valid), // @[IdIndexer.scala:108:31]
.auto_out_r_bits_id (_axi4index_auto_in_r_bits_id), // @[IdIndexer.scala:108:31]
.auto_out_r_bits_data (_axi4index_auto_in_r_bits_data), // @[IdIndexer.scala:108:31]
.auto_out_r_bits_resp (_axi4index_auto_in_r_bits_resp), // @[IdIndexer.scala:108:31]
.auto_out_r_bits_echo_tl_state_size (_axi4index_auto_in_r_bits_echo_tl_state_size), // @[IdIndexer.scala:108:31]
.auto_out_r_bits_echo_tl_state_source (_axi4index_auto_in_r_bits_echo_tl_state_source), // @[IdIndexer.scala:108:31]
.auto_out_r_bits_last (_axi4index_auto_in_r_bits_last) // @[IdIndexer.scala:108:31]
); // @[ToAXI4.scala:301:29]
assign auto_tl_in_a_ready = auto_tl_out_a_ready; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_valid = auto_tl_out_d_valid; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_opcode = auto_tl_out_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_size = auto_tl_out_d_bits_size; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_source = auto_tl_out_d_bits_source; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_denied = auto_tl_out_d_bits_denied; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_data = auto_tl_out_d_bits_data; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_corrupt = auto_tl_out_d_bits_corrupt; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_valid = auto_tl_in_a_valid; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_opcode = auto_tl_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_param = auto_tl_in_a_bits_param; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_size = auto_tl_in_a_bits_size; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_source = auto_tl_in_a_bits_source; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_address = auto_tl_in_a_bits_address; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_mask = auto_tl_in_a_bits_mask; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_data = auto_tl_in_a_bits_data; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_a_bits_corrupt = auto_tl_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
assign auto_tl_out_d_ready = auto_tl_in_d_ready; // @[LazyModuleImp.scala:138:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File primitives.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object lowMask
{
def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt =
{
require(topBound != bottomBound)
val numInVals = BigInt(1)<<in.getWidth
if (topBound < bottomBound) {
lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound)
} else if (numInVals > 64 /* Empirical */) {
// For simulation performance, we should avoid generating
// exteremely wide shifters, so we divide and conquer.
// Empirically, this does not impact synthesis QoR.
val mid = numInVals / 2
val msb = in(in.getWidth - 1)
val lsbs = in(in.getWidth - 2, 0)
if (mid < topBound) {
if (mid <= bottomBound) {
Mux(msb,
lowMask(lsbs, topBound - mid, bottomBound - mid),
0.U
)
} else {
Mux(msb,
lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U,
lowMask(lsbs, mid, bottomBound)
)
}
} else {
~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound))
}
} else {
val shift = (BigInt(-1)<<numInVals.toInt).S>>in
Reverse(
shift(
(numInVals - 1 - bottomBound).toInt,
(numInVals - topBound).toInt
)
)
}
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object countLeadingZeros
{
def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy2
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 1)>>1
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 2).orR
reducedVec.asUInt
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
object orReduceBy4
{
def apply(in: UInt): UInt =
{
val reducedWidth = (in.getWidth + 3)>>2
val reducedVec = Wire(Vec(reducedWidth, Bool()))
for (ix <- 0 until reducedWidth - 1) {
reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR
}
reducedVec(reducedWidth - 1) :=
in(in.getWidth - 1, (reducedWidth - 1) * 4).orR
reducedVec.asUInt
}
}
File MulAddRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle
{
//*** ENCODE SOME OF THESE CASES IN FEWER BITS?:
val isSigNaNAny = Bool()
val isNaNAOrB = Bool()
val isInfA = Bool()
val isZeroA = Bool()
val isInfB = Bool()
val isZeroB = Bool()
val signProd = Bool()
val isNaNC = Bool()
val isInfC = Bool()
val isZeroC = Bool()
val sExpSum = SInt((expWidth + 2).W)
val doSubMags = Bool()
val CIsDominant = Bool()
val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W)
val highAlignedSigC = UInt((sigWidth + 2).W)
val bit0AlignedSigC = UInt(1.W)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val mulAddA = Output(UInt(sigWidth.W))
val mulAddB = Output(UInt(sigWidth.W))
val mulAddC = Output(UInt((sigWidth * 2).W))
val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
//*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN
//*** UNSHIFTED C AND PRODUCT):
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a)
val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b)
val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c)
val signProd = rawA.sign ^ rawB.sign ^ io.op(1)
//*** REVIEW THE BIAS FOR 'sExpAlignedProd':
val sExpAlignedProd =
rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S
val doSubMags = signProd ^ rawC.sign ^ io.op(0)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sNatCAlignDist = sExpAlignedProd - rawC.sExp
val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0)
val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S)
val CIsDominant =
! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U))
val CAlignDist =
Mux(isMinCAlign,
0.U,
Mux(posNatCAlignDist < (sigSumWidth - 1).U,
posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0),
(sigSumWidth - 1).U
)
)
val mainAlignedSigC =
(Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist
val reduced4CExtra =
(orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) &
lowMask(
CAlignDist>>2,
//*** NOT NEEDED?:
// (sigSumWidth + 2)>>2,
(sigSumWidth - 1)>>2,
(sigSumWidth - sigWidth - 1)>>2
)
).orR
val alignedSigC =
Cat(mainAlignedSigC>>3,
Mux(doSubMags,
mainAlignedSigC(2, 0).andR && ! reduced4CExtra,
mainAlignedSigC(2, 0).orR || reduced4CExtra
)
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
io.mulAddA := rawA.sig
io.mulAddB := rawB.sig
io.mulAddC := alignedSigC(sigWidth * 2, 1)
io.toPostMul.isSigNaNAny :=
isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) ||
isSigNaNRawFloat(rawC)
io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN
io.toPostMul.isInfA := rawA.isInf
io.toPostMul.isZeroA := rawA.isZero
io.toPostMul.isInfB := rawB.isInf
io.toPostMul.isZeroB := rawB.isZero
io.toPostMul.signProd := signProd
io.toPostMul.isNaNC := rawC.isNaN
io.toPostMul.isInfC := rawC.isInf
io.toPostMul.isZeroC := rawC.isZero
io.toPostMul.sExpSum :=
Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S)
io.toPostMul.doSubMags := doSubMags
io.toPostMul.CIsDominant := CIsDominant
io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0)
io.toPostMul.highAlignedSigC :=
alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1)
io.toPostMul.bit0AlignedSigC := alignedSigC(0)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth))
val mulAddResult = Input(UInt((sigWidth * 2 + 1).W))
val roundingMode = Input(UInt(3.W))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigSumWidth = sigWidth * 3 + 3
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_min = (io.roundingMode === round_min)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags
val sigSum =
Cat(Mux(io.mulAddResult(sigWidth * 2),
io.fromPreMul.highAlignedSigC + 1.U,
io.fromPreMul.highAlignedSigC
),
io.mulAddResult(sigWidth * 2 - 1, 0),
io.fromPreMul.bit0AlignedSigC
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val CDom_sign = opSignC
val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext
val CDom_absSigSum =
Mux(io.fromPreMul.doSubMags,
~sigSum(sigSumWidth - 1, sigWidth + 1),
0.U(1.W) ##
//*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO:
io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ##
sigSum(sigSumWidth - 3, sigWidth + 2)
)
val CDom_absSigSumExtra =
Mux(io.fromPreMul.doSubMags,
(~sigSum(sigWidth, 1)).orR,
sigSum(sigWidth + 1, 1).orR
)
val CDom_mainSig =
(CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)(
sigWidth * 2 + 1, sigWidth - 3)
val CDom_reduced4SigExtra =
(orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) &
lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR
val CDom_sig =
Cat(CDom_mainSig>>3,
CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra ||
CDom_absSigSumExtra
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notCDom_signSigSum = sigSum(sigWidth * 2 + 3)
val notCDom_absSigSum =
Mux(notCDom_signSigSum,
~sigSum(sigWidth * 2 + 2, 0),
sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags
)
val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum)
val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum)
val notCDom_nearNormDist = notCDom_normDistReduced2<<1
val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext
val notCDom_mainSig =
(notCDom_absSigSum<<notCDom_nearNormDist)(
sigWidth * 2 + 3, sigWidth - 1)
val notCDom_reduced4SigExtra =
(orReduceBy2(
notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) &
lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2)
).orR
val notCDom_sig =
Cat(notCDom_mainSig>>3,
notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra
)
val notCDom_completeCancellation =
(notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U)
val notCDom_sign =
Mux(notCDom_completeCancellation,
roundingMode_min,
io.fromPreMul.signProd ^ notCDom_signSigSum
)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB
val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC
val notNaN_addZeros =
(io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) &&
io.fromPreMul.isZeroC
io.invalidExc :=
io.fromPreMul.isSigNaNAny ||
(io.fromPreMul.isInfA && io.fromPreMul.isZeroB) ||
(io.fromPreMul.isZeroA && io.fromPreMul.isInfB) ||
(! io.fromPreMul.isNaNAOrB &&
(io.fromPreMul.isInfA || io.fromPreMul.isInfB) &&
io.fromPreMul.isInfC &&
io.fromPreMul.doSubMags)
io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC
io.rawOut.isInf := notNaN_isInfOut
//*** IMPROVE?:
io.rawOut.isZero :=
notNaN_addZeros ||
(! io.fromPreMul.CIsDominant && notCDom_completeCancellation)
io.rawOut.sign :=
(notNaN_isInfProd && io.fromPreMul.signProd) ||
(io.fromPreMul.isInfC && opSignC) ||
(notNaN_addZeros && ! roundingMode_min &&
io.fromPreMul.signProd && opSignC) ||
(notNaN_addZeros && roundingMode_min &&
(io.fromPreMul.signProd || opSignC)) ||
(! notNaN_isInfOut && ! notNaN_addZeros &&
Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign))
io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp)
io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig)
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule
{
override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val op = Input(Bits(2.W))
val a = Input(Bits((expWidth + sigWidth + 1).W))
val b = Input(Bits((expWidth + sigWidth + 1).W))
val c = Input(Bits((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulAddRecFNToRaw_preMul =
Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth))
val mulAddRecFNToRaw_postMul =
Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth))
mulAddRecFNToRaw_preMul.io.op := io.op
mulAddRecFNToRaw_preMul.io.a := io.a
mulAddRecFNToRaw_preMul.io.b := io.b
mulAddRecFNToRaw_preMul.io.c := io.c
val mulAddResult =
(mulAddRecFNToRaw_preMul.io.mulAddA *
mulAddRecFNToRaw_preMul.io.mulAddB) +&
mulAddRecFNToRaw_preMul.io.mulAddC
mulAddRecFNToRaw_postMul.io.fromPreMul :=
mulAddRecFNToRaw_preMul.io.toPostMul
mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult
mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
| module MulAddRecFNToRaw_postMul_e8_s24_33( // @[MulAddRecFN.scala:169:7]
input io_fromPreMul_isSigNaNAny, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isNaNAOrB, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isInfA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_isZeroA, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_signProd, // @[MulAddRecFN.scala:172:16]
input [9:0] io_fromPreMul_sExpSum, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_doSubMags, // @[MulAddRecFN.scala:172:16]
input [4:0] io_fromPreMul_CDom_CAlignDist, // @[MulAddRecFN.scala:172:16]
input [25:0] io_fromPreMul_highAlignedSigC, // @[MulAddRecFN.scala:172:16]
input io_fromPreMul_bit0AlignedSigC, // @[MulAddRecFN.scala:172:16]
input [48:0] io_mulAddResult, // @[MulAddRecFN.scala:172:16]
output io_invalidExc, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isNaN, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isInf, // @[MulAddRecFN.scala:172:16]
output io_rawOut_isZero, // @[MulAddRecFN.scala:172:16]
output io_rawOut_sign, // @[MulAddRecFN.scala:172:16]
output [9:0] io_rawOut_sExp, // @[MulAddRecFN.scala:172:16]
output [26:0] io_rawOut_sig // @[MulAddRecFN.scala:172:16]
);
wire io_fromPreMul_isSigNaNAny_0 = io_fromPreMul_isSigNaNAny; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isNaNAOrB_0 = io_fromPreMul_isNaNAOrB; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfA_0 = io_fromPreMul_isInfA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroA_0 = io_fromPreMul_isZeroA; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_signProd_0 = io_fromPreMul_signProd; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_fromPreMul_sExpSum_0 = io_fromPreMul_sExpSum; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_doSubMags_0 = io_fromPreMul_doSubMags; // @[MulAddRecFN.scala:169:7]
wire [4:0] io_fromPreMul_CDom_CAlignDist_0 = io_fromPreMul_CDom_CAlignDist; // @[MulAddRecFN.scala:169:7]
wire [25:0] io_fromPreMul_highAlignedSigC_0 = io_fromPreMul_highAlignedSigC; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_bit0AlignedSigC_0 = io_fromPreMul_bit0AlignedSigC; // @[MulAddRecFN.scala:169:7]
wire [48:0] io_mulAddResult_0 = io_mulAddResult; // @[MulAddRecFN.scala:169:7]
wire [2:0] io_roundingMode = 3'h0; // @[MulAddRecFN.scala:169:7, :172:16]
wire io_fromPreMul_isZeroC = 1'h1; // @[MulAddRecFN.scala:169:7]
wire _io_rawOut_isZero_T = 1'h1; // @[MulAddRecFN.scala:283:14]
wire _io_rawOut_sign_T_3 = 1'h1; // @[MulAddRecFN.scala:287:29]
wire io_fromPreMul_isInfB = 1'h0; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isZeroB = 1'h0; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isNaNC = 1'h0; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_isInfC = 1'h0; // @[MulAddRecFN.scala:169:7]
wire io_fromPreMul_CIsDominant = 1'h0; // @[MulAddRecFN.scala:169:7]
wire roundingMode_min = 1'h0; // @[MulAddRecFN.scala:186:45]
wire _io_invalidExc_T = 1'h0; // @[MulAddRecFN.scala:272:31]
wire _io_invalidExc_T_2 = 1'h0; // @[MulAddRecFN.scala:273:32]
wire _io_invalidExc_T_7 = 1'h0; // @[MulAddRecFN.scala:275:61]
wire _io_invalidExc_T_8 = 1'h0; // @[MulAddRecFN.scala:276:35]
wire _io_rawOut_sign_T_1 = 1'h0; // @[MulAddRecFN.scala:286:31]
wire _io_rawOut_sign_T_8 = 1'h0; // @[MulAddRecFN.scala:289:26]
wire _io_rawOut_sign_T_10 = 1'h0; // @[MulAddRecFN.scala:289:46]
wire _io_invalidExc_T_1 = io_fromPreMul_isSigNaNAny_0; // @[MulAddRecFN.scala:169:7, :271:35]
wire _io_rawOut_isNaN_T = io_fromPreMul_isNaNAOrB_0; // @[MulAddRecFN.scala:169:7, :278:48]
wire notNaN_isInfProd = io_fromPreMul_isInfA_0; // @[MulAddRecFN.scala:169:7, :264:49]
wire _io_invalidExc_T_5 = io_fromPreMul_isInfA_0; // @[MulAddRecFN.scala:169:7, :275:36]
wire _notNaN_addZeros_T = io_fromPreMul_isZeroA_0; // @[MulAddRecFN.scala:169:7, :267:32]
wire _io_invalidExc_T_9; // @[MulAddRecFN.scala:273:57]
wire notNaN_isInfOut; // @[MulAddRecFN.scala:265:44]
wire _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:282:25]
wire _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:290:50]
wire [9:0] _io_rawOut_sExp_T; // @[MulAddRecFN.scala:293:26]
wire [26:0] _io_rawOut_sig_T; // @[MulAddRecFN.scala:294:25]
wire io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
wire io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
wire [9:0] io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
wire [26:0] io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
wire io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
wire opSignC = io_fromPreMul_signProd_0 ^ io_fromPreMul_doSubMags_0; // @[MulAddRecFN.scala:169:7, :190:42]
wire _sigSum_T = io_mulAddResult_0[48]; // @[MulAddRecFN.scala:169:7, :192:32]
wire [26:0] _sigSum_T_1 = {1'h0, io_fromPreMul_highAlignedSigC_0} + 27'h1; // @[MulAddRecFN.scala:169:7, :193:47]
wire [25:0] _sigSum_T_2 = _sigSum_T_1[25:0]; // @[MulAddRecFN.scala:193:47]
wire [25:0] _sigSum_T_3 = _sigSum_T ? _sigSum_T_2 : io_fromPreMul_highAlignedSigC_0; // @[MulAddRecFN.scala:169:7, :192:{16,32}, :193:47]
wire [47:0] _sigSum_T_4 = io_mulAddResult_0[47:0]; // @[MulAddRecFN.scala:169:7, :196:28]
wire [73:0] sigSum_hi = {_sigSum_T_3, _sigSum_T_4}; // @[MulAddRecFN.scala:192:{12,16}, :196:28]
wire [74:0] sigSum = {sigSum_hi, io_fromPreMul_bit0AlignedSigC_0}; // @[MulAddRecFN.scala:169:7, :192:12]
wire [1:0] _CDom_sExp_T = {1'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :203:69]
wire [10:0] _GEN = {io_fromPreMul_sExpSum_0[9], io_fromPreMul_sExpSum_0}; // @[MulAddRecFN.scala:169:7, :203:43]
wire [10:0] _CDom_sExp_T_1 = _GEN - {{9{_CDom_sExp_T[1]}}, _CDom_sExp_T}; // @[MulAddRecFN.scala:203:{43,69}]
wire [9:0] _CDom_sExp_T_2 = _CDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:203:43]
wire [9:0] CDom_sExp = _CDom_sExp_T_2; // @[MulAddRecFN.scala:203:43]
wire [49:0] _CDom_absSigSum_T = sigSum[74:25]; // @[MulAddRecFN.scala:192:12, :206:20]
wire [49:0] _CDom_absSigSum_T_1 = ~_CDom_absSigSum_T; // @[MulAddRecFN.scala:206:{13,20}]
wire [1:0] _CDom_absSigSum_T_2 = io_fromPreMul_highAlignedSigC_0[25:24]; // @[MulAddRecFN.scala:169:7, :209:46]
wire [2:0] _CDom_absSigSum_T_3 = {1'h0, _CDom_absSigSum_T_2}; // @[MulAddRecFN.scala:207:22, :209:46]
wire [46:0] _CDom_absSigSum_T_4 = sigSum[72:26]; // @[MulAddRecFN.scala:192:12, :210:23]
wire [49:0] _CDom_absSigSum_T_5 = {_CDom_absSigSum_T_3, _CDom_absSigSum_T_4}; // @[MulAddRecFN.scala:207:22, :209:71, :210:23]
wire [49:0] CDom_absSigSum = io_fromPreMul_doSubMags_0 ? _CDom_absSigSum_T_1 : _CDom_absSigSum_T_5; // @[MulAddRecFN.scala:169:7, :205:12, :206:13, :209:71]
wire [23:0] _CDom_absSigSumExtra_T = sigSum[24:1]; // @[MulAddRecFN.scala:192:12, :215:21]
wire [23:0] _CDom_absSigSumExtra_T_1 = ~_CDom_absSigSumExtra_T; // @[MulAddRecFN.scala:215:{14,21}]
wire _CDom_absSigSumExtra_T_2 = |_CDom_absSigSumExtra_T_1; // @[MulAddRecFN.scala:215:{14,36}]
wire [24:0] _CDom_absSigSumExtra_T_3 = sigSum[25:1]; // @[MulAddRecFN.scala:192:12, :216:19]
wire _CDom_absSigSumExtra_T_4 = |_CDom_absSigSumExtra_T_3; // @[MulAddRecFN.scala:216:{19,37}]
wire CDom_absSigSumExtra = io_fromPreMul_doSubMags_0 ? _CDom_absSigSumExtra_T_2 : _CDom_absSigSumExtra_T_4; // @[MulAddRecFN.scala:169:7, :214:12, :215:36, :216:37]
wire [80:0] _CDom_mainSig_T = {31'h0, CDom_absSigSum} << io_fromPreMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:169:7, :205:12, :219:24]
wire [28:0] CDom_mainSig = _CDom_mainSig_T[49:21]; // @[MulAddRecFN.scala:219:{24,56}]
wire [23:0] _CDom_reduced4SigExtra_T = CDom_absSigSum[23:0]; // @[MulAddRecFN.scala:205:12, :222:36]
wire [26:0] _CDom_reduced4SigExtra_T_1 = {_CDom_reduced4SigExtra_T, 3'h0}; // @[MulAddRecFN.scala:169:7, :172:16, :222:{36,53}]
wire _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:120:54]
wire _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:123:57]
wire CDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:118:30]
wire CDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:118:30]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_0_T = _CDom_reduced4SigExtra_T_1[3:0]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_0_T_1 = |_CDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_0 = _CDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_1_T = _CDom_reduced4SigExtra_T_1[7:4]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_1_T_1 = |_CDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_1 = _CDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_2_T = _CDom_reduced4SigExtra_T_1[11:8]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_2_T_1 = |_CDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_2 = _CDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_3_T = _CDom_reduced4SigExtra_T_1[15:12]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_3_T_1 = |_CDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_3 = _CDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_4_T = _CDom_reduced4SigExtra_T_1[19:16]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_4_T_1 = |_CDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_4 = _CDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:118:30, :120:54]
wire [3:0] _CDom_reduced4SigExtra_reducedVec_5_T = _CDom_reduced4SigExtra_T_1[23:20]; // @[primitives.scala:120:33]
assign _CDom_reduced4SigExtra_reducedVec_5_T_1 = |_CDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:120:{33,54}]
assign CDom_reduced4SigExtra_reducedVec_5 = _CDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:118:30, :120:54]
wire [2:0] _CDom_reduced4SigExtra_reducedVec_6_T = _CDom_reduced4SigExtra_T_1[26:24]; // @[primitives.scala:123:15]
assign _CDom_reduced4SigExtra_reducedVec_6_T_1 = |_CDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:123:{15,57}]
assign CDom_reduced4SigExtra_reducedVec_6 = _CDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:118:30, :123:57]
wire [1:0] CDom_reduced4SigExtra_lo_hi = {CDom_reduced4SigExtra_reducedVec_2, CDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20]
wire [2:0] CDom_reduced4SigExtra_lo = {CDom_reduced4SigExtra_lo_hi, CDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_lo = {CDom_reduced4SigExtra_reducedVec_4, CDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:118:30, :124:20]
wire [1:0] CDom_reduced4SigExtra_hi_hi = {CDom_reduced4SigExtra_reducedVec_6, CDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:118:30, :124:20]
wire [3:0] CDom_reduced4SigExtra_hi = {CDom_reduced4SigExtra_hi_hi, CDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:124:20]
wire [6:0] _CDom_reduced4SigExtra_T_2 = {CDom_reduced4SigExtra_hi, CDom_reduced4SigExtra_lo}; // @[primitives.scala:124:20]
wire [2:0] _CDom_reduced4SigExtra_T_3 = io_fromPreMul_CDom_CAlignDist_0[4:2]; // @[MulAddRecFN.scala:169:7, :223:51]
wire [2:0] _CDom_reduced4SigExtra_T_4 = ~_CDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [8:0] CDom_reduced4SigExtra_shift = $signed(9'sh100 >>> _CDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _CDom_reduced4SigExtra_T_5 = CDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _CDom_reduced4SigExtra_T_6 = _CDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _CDom_reduced4SigExtra_T_7 = _CDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_8 = _CDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_9 = _CDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_10 = {_CDom_reduced4SigExtra_T_8, _CDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_11 = _CDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_12 = _CDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_13 = _CDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_14 = {_CDom_reduced4SigExtra_T_12, _CDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _CDom_reduced4SigExtra_T_15 = {_CDom_reduced4SigExtra_T_10, _CDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_16 = _CDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _CDom_reduced4SigExtra_T_17 = _CDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _CDom_reduced4SigExtra_T_18 = _CDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _CDom_reduced4SigExtra_T_19 = {_CDom_reduced4SigExtra_T_17, _CDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _CDom_reduced4SigExtra_T_20 = {_CDom_reduced4SigExtra_T_15, _CDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _CDom_reduced4SigExtra_T_21 = {1'h0, _CDom_reduced4SigExtra_T_2[5:0] & _CDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :124:20]
wire CDom_reduced4SigExtra = |_CDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:222:72, :223:73]
wire [25:0] _CDom_sig_T = CDom_mainSig[28:3]; // @[MulAddRecFN.scala:219:56, :225:25]
wire [2:0] _CDom_sig_T_1 = CDom_mainSig[2:0]; // @[MulAddRecFN.scala:219:56, :226:25]
wire _CDom_sig_T_2 = |_CDom_sig_T_1; // @[MulAddRecFN.scala:226:{25,32}]
wire _CDom_sig_T_3 = _CDom_sig_T_2 | CDom_reduced4SigExtra; // @[MulAddRecFN.scala:223:73, :226:{32,36}]
wire _CDom_sig_T_4 = _CDom_sig_T_3 | CDom_absSigSumExtra; // @[MulAddRecFN.scala:214:12, :226:{36,61}]
wire [26:0] CDom_sig = {_CDom_sig_T, _CDom_sig_T_4}; // @[MulAddRecFN.scala:225:{12,25}, :226:61]
wire notCDom_signSigSum = sigSum[51]; // @[MulAddRecFN.scala:192:12, :232:36]
wire [50:0] _notCDom_absSigSum_T = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20]
wire [50:0] _notCDom_absSigSum_T_2 = sigSum[50:0]; // @[MulAddRecFN.scala:192:12, :235:20, :236:19]
wire [50:0] _notCDom_absSigSum_T_1 = ~_notCDom_absSigSum_T; // @[MulAddRecFN.scala:235:{13,20}]
wire [51:0] _notCDom_absSigSum_T_3 = {1'h0, _notCDom_absSigSum_T_2} + {51'h0, io_fromPreMul_doSubMags_0}; // @[MulAddRecFN.scala:169:7, :236:{19,41}]
wire [50:0] _notCDom_absSigSum_T_4 = _notCDom_absSigSum_T_3[50:0]; // @[MulAddRecFN.scala:236:41]
wire [50:0] notCDom_absSigSum = notCDom_signSigSum ? _notCDom_absSigSum_T_1 : _notCDom_absSigSum_T_4; // @[MulAddRecFN.scala:232:36, :234:12, :235:13, :236:41]
wire _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced2AbsSigSum_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_6; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_7; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_8; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_9; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_10; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_11; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_12; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_13; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_14; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_15; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_16; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_17; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_18; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_19; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_20; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_21; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_22; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_23; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_24; // @[primitives.scala:101:30]
wire notCDom_reduced2AbsSigSum_reducedVec_25; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_0_T = notCDom_absSigSum[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_0_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_0 = _notCDom_reduced2AbsSigSum_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_1_T = notCDom_absSigSum[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_1_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_1 = _notCDom_reduced2AbsSigSum_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_2_T = notCDom_absSigSum[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_2_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_2 = _notCDom_reduced2AbsSigSum_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_3_T = notCDom_absSigSum[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_3_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_3 = _notCDom_reduced2AbsSigSum_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_4_T = notCDom_absSigSum[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_4_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_4 = _notCDom_reduced2AbsSigSum_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_5_T = notCDom_absSigSum[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_5_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_5 = _notCDom_reduced2AbsSigSum_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_6_T = notCDom_absSigSum[13:12]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_6_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_6_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_6 = _notCDom_reduced2AbsSigSum_reducedVec_6_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_7_T = notCDom_absSigSum[15:14]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_7_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_7_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_7 = _notCDom_reduced2AbsSigSum_reducedVec_7_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_8_T = notCDom_absSigSum[17:16]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_8_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_8_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_8 = _notCDom_reduced2AbsSigSum_reducedVec_8_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_9_T = notCDom_absSigSum[19:18]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_9_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_9_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_9 = _notCDom_reduced2AbsSigSum_reducedVec_9_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_10_T = notCDom_absSigSum[21:20]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_10_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_10_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_10 = _notCDom_reduced2AbsSigSum_reducedVec_10_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_11_T = notCDom_absSigSum[23:22]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_11_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_11_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_11 = _notCDom_reduced2AbsSigSum_reducedVec_11_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_12_T = notCDom_absSigSum[25:24]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_12_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_12_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_12 = _notCDom_reduced2AbsSigSum_reducedVec_12_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_13_T = notCDom_absSigSum[27:26]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_13_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_13_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_13 = _notCDom_reduced2AbsSigSum_reducedVec_13_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_14_T = notCDom_absSigSum[29:28]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_14_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_14_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_14 = _notCDom_reduced2AbsSigSum_reducedVec_14_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_15_T = notCDom_absSigSum[31:30]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_15_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_15_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_15 = _notCDom_reduced2AbsSigSum_reducedVec_15_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_16_T = notCDom_absSigSum[33:32]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_16_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_16_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_16 = _notCDom_reduced2AbsSigSum_reducedVec_16_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_17_T = notCDom_absSigSum[35:34]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_17_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_17_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_17 = _notCDom_reduced2AbsSigSum_reducedVec_17_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_18_T = notCDom_absSigSum[37:36]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_18_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_18_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_18 = _notCDom_reduced2AbsSigSum_reducedVec_18_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_19_T = notCDom_absSigSum[39:38]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_19_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_19_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_19 = _notCDom_reduced2AbsSigSum_reducedVec_19_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_20_T = notCDom_absSigSum[41:40]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_20_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_20_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_20 = _notCDom_reduced2AbsSigSum_reducedVec_20_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_21_T = notCDom_absSigSum[43:42]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_21_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_21_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_21 = _notCDom_reduced2AbsSigSum_reducedVec_21_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_22_T = notCDom_absSigSum[45:44]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_22_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_22_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_22 = _notCDom_reduced2AbsSigSum_reducedVec_22_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_23_T = notCDom_absSigSum[47:46]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_23_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_23_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_23 = _notCDom_reduced2AbsSigSum_reducedVec_23_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced2AbsSigSum_reducedVec_24_T = notCDom_absSigSum[49:48]; // @[primitives.scala:103:33]
assign _notCDom_reduced2AbsSigSum_reducedVec_24_T_1 = |_notCDom_reduced2AbsSigSum_reducedVec_24_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced2AbsSigSum_reducedVec_24 = _notCDom_reduced2AbsSigSum_reducedVec_24_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced2AbsSigSum_reducedVec_25_T = notCDom_absSigSum[50]; // @[primitives.scala:106:15]
assign _notCDom_reduced2AbsSigSum_reducedVec_25_T_1 = _notCDom_reduced2AbsSigSum_reducedVec_25_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced2AbsSigSum_reducedVec_25 = _notCDom_reduced2AbsSigSum_reducedVec_25_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_2, notCDom_reduced2AbsSigSum_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_5, notCDom_reduced2AbsSigSum_reducedVec_4}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_lo_hi = {notCDom_reduced2AbsSigSum_lo_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_lo_lo = {notCDom_reduced2AbsSigSum_lo_lo_hi, notCDom_reduced2AbsSigSum_lo_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_8, notCDom_reduced2AbsSigSum_reducedVec_7}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_lo_hi_lo = {notCDom_reduced2AbsSigSum_lo_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_6}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_10, notCDom_reduced2AbsSigSum_reducedVec_9}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_lo_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_12, notCDom_reduced2AbsSigSum_reducedVec_11}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_lo_hi_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_lo_hi = {notCDom_reduced2AbsSigSum_lo_hi_hi, notCDom_reduced2AbsSigSum_lo_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_lo = {notCDom_reduced2AbsSigSum_lo_hi, notCDom_reduced2AbsSigSum_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_15, notCDom_reduced2AbsSigSum_reducedVec_14}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_lo = {notCDom_reduced2AbsSigSum_hi_lo_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_13}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_lo_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_18, notCDom_reduced2AbsSigSum_reducedVec_17}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_lo_hi = {notCDom_reduced2AbsSigSum_hi_lo_hi_hi, notCDom_reduced2AbsSigSum_reducedVec_16}; // @[primitives.scala:101:30, :107:20]
wire [5:0] notCDom_reduced2AbsSigSum_hi_lo = {notCDom_reduced2AbsSigSum_hi_lo_hi, notCDom_reduced2AbsSigSum_hi_lo_lo}; // @[primitives.scala:107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_lo_hi = {notCDom_reduced2AbsSigSum_reducedVec_21, notCDom_reduced2AbsSigSum_reducedVec_20}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced2AbsSigSum_hi_hi_lo = {notCDom_reduced2AbsSigSum_hi_hi_lo_hi, notCDom_reduced2AbsSigSum_reducedVec_19}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_lo = {notCDom_reduced2AbsSigSum_reducedVec_23, notCDom_reduced2AbsSigSum_reducedVec_22}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced2AbsSigSum_hi_hi_hi_hi = {notCDom_reduced2AbsSigSum_reducedVec_25, notCDom_reduced2AbsSigSum_reducedVec_24}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced2AbsSigSum_hi_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] notCDom_reduced2AbsSigSum_hi_hi = {notCDom_reduced2AbsSigSum_hi_hi_hi, notCDom_reduced2AbsSigSum_hi_hi_lo}; // @[primitives.scala:107:20]
wire [12:0] notCDom_reduced2AbsSigSum_hi = {notCDom_reduced2AbsSigSum_hi_hi, notCDom_reduced2AbsSigSum_hi_lo}; // @[primitives.scala:107:20]
wire [25:0] notCDom_reduced2AbsSigSum = {notCDom_reduced2AbsSigSum_hi, notCDom_reduced2AbsSigSum_lo}; // @[primitives.scala:107:20]
wire _notCDom_normDistReduced2_T = notCDom_reduced2AbsSigSum[0]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_1 = notCDom_reduced2AbsSigSum[1]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_2 = notCDom_reduced2AbsSigSum[2]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_3 = notCDom_reduced2AbsSigSum[3]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_4 = notCDom_reduced2AbsSigSum[4]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_5 = notCDom_reduced2AbsSigSum[5]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_6 = notCDom_reduced2AbsSigSum[6]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_7 = notCDom_reduced2AbsSigSum[7]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_8 = notCDom_reduced2AbsSigSum[8]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_9 = notCDom_reduced2AbsSigSum[9]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_10 = notCDom_reduced2AbsSigSum[10]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_11 = notCDom_reduced2AbsSigSum[11]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_12 = notCDom_reduced2AbsSigSum[12]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_13 = notCDom_reduced2AbsSigSum[13]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_14 = notCDom_reduced2AbsSigSum[14]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_15 = notCDom_reduced2AbsSigSum[15]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_16 = notCDom_reduced2AbsSigSum[16]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_17 = notCDom_reduced2AbsSigSum[17]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_18 = notCDom_reduced2AbsSigSum[18]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_19 = notCDom_reduced2AbsSigSum[19]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_20 = notCDom_reduced2AbsSigSum[20]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_21 = notCDom_reduced2AbsSigSum[21]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_22 = notCDom_reduced2AbsSigSum[22]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_23 = notCDom_reduced2AbsSigSum[23]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_24 = notCDom_reduced2AbsSigSum[24]; // @[primitives.scala:91:52, :107:20]
wire _notCDom_normDistReduced2_T_25 = notCDom_reduced2AbsSigSum[25]; // @[primitives.scala:91:52, :107:20]
wire [4:0] _notCDom_normDistReduced2_T_26 = {4'hC, ~_notCDom_normDistReduced2_T_1}; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_27 = _notCDom_normDistReduced2_T_2 ? 5'h17 : _notCDom_normDistReduced2_T_26; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_28 = _notCDom_normDistReduced2_T_3 ? 5'h16 : _notCDom_normDistReduced2_T_27; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_29 = _notCDom_normDistReduced2_T_4 ? 5'h15 : _notCDom_normDistReduced2_T_28; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_30 = _notCDom_normDistReduced2_T_5 ? 5'h14 : _notCDom_normDistReduced2_T_29; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_31 = _notCDom_normDistReduced2_T_6 ? 5'h13 : _notCDom_normDistReduced2_T_30; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_32 = _notCDom_normDistReduced2_T_7 ? 5'h12 : _notCDom_normDistReduced2_T_31; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_33 = _notCDom_normDistReduced2_T_8 ? 5'h11 : _notCDom_normDistReduced2_T_32; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_34 = _notCDom_normDistReduced2_T_9 ? 5'h10 : _notCDom_normDistReduced2_T_33; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_35 = _notCDom_normDistReduced2_T_10 ? 5'hF : _notCDom_normDistReduced2_T_34; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_36 = _notCDom_normDistReduced2_T_11 ? 5'hE : _notCDom_normDistReduced2_T_35; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_37 = _notCDom_normDistReduced2_T_12 ? 5'hD : _notCDom_normDistReduced2_T_36; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_38 = _notCDom_normDistReduced2_T_13 ? 5'hC : _notCDom_normDistReduced2_T_37; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_39 = _notCDom_normDistReduced2_T_14 ? 5'hB : _notCDom_normDistReduced2_T_38; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_40 = _notCDom_normDistReduced2_T_15 ? 5'hA : _notCDom_normDistReduced2_T_39; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_41 = _notCDom_normDistReduced2_T_16 ? 5'h9 : _notCDom_normDistReduced2_T_40; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_42 = _notCDom_normDistReduced2_T_17 ? 5'h8 : _notCDom_normDistReduced2_T_41; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_43 = _notCDom_normDistReduced2_T_18 ? 5'h7 : _notCDom_normDistReduced2_T_42; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_44 = _notCDom_normDistReduced2_T_19 ? 5'h6 : _notCDom_normDistReduced2_T_43; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_45 = _notCDom_normDistReduced2_T_20 ? 5'h5 : _notCDom_normDistReduced2_T_44; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_46 = _notCDom_normDistReduced2_T_21 ? 5'h4 : _notCDom_normDistReduced2_T_45; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_47 = _notCDom_normDistReduced2_T_22 ? 5'h3 : _notCDom_normDistReduced2_T_46; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_48 = _notCDom_normDistReduced2_T_23 ? 5'h2 : _notCDom_normDistReduced2_T_47; // @[Mux.scala:50:70]
wire [4:0] _notCDom_normDistReduced2_T_49 = _notCDom_normDistReduced2_T_24 ? 5'h1 : _notCDom_normDistReduced2_T_48; // @[Mux.scala:50:70]
wire [4:0] notCDom_normDistReduced2 = _notCDom_normDistReduced2_T_25 ? 5'h0 : _notCDom_normDistReduced2_T_49; // @[Mux.scala:50:70]
wire [5:0] notCDom_nearNormDist = {notCDom_normDistReduced2, 1'h0}; // @[Mux.scala:50:70]
wire [6:0] _notCDom_sExp_T = {1'h0, notCDom_nearNormDist}; // @[MulAddRecFN.scala:240:56, :241:76]
wire [10:0] _notCDom_sExp_T_1 = _GEN - {{4{_notCDom_sExp_T[6]}}, _notCDom_sExp_T}; // @[MulAddRecFN.scala:203:43, :241:{46,76}]
wire [9:0] _notCDom_sExp_T_2 = _notCDom_sExp_T_1[9:0]; // @[MulAddRecFN.scala:241:46]
wire [9:0] notCDom_sExp = _notCDom_sExp_T_2; // @[MulAddRecFN.scala:241:46]
assign _io_rawOut_sExp_T = notCDom_sExp; // @[MulAddRecFN.scala:241:46, :293:26]
wire [113:0] _notCDom_mainSig_T = {63'h0, notCDom_absSigSum} << notCDom_nearNormDist; // @[MulAddRecFN.scala:234:12, :240:56, :243:27]
wire [28:0] notCDom_mainSig = _notCDom_mainSig_T[51:23]; // @[MulAddRecFN.scala:243:{27,50}]
wire [12:0] _notCDom_reduced4SigExtra_T = notCDom_reduced2AbsSigSum[12:0]; // @[primitives.scala:107:20]
wire [12:0] _notCDom_reduced4SigExtra_T_1 = _notCDom_reduced4SigExtra_T; // @[MulAddRecFN.scala:247:{39,55}]
wire _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:106:57]
wire notCDom_reduced4SigExtra_reducedVec_0; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_1; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_2; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_3; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_4; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_5; // @[primitives.scala:101:30]
wire notCDom_reduced4SigExtra_reducedVec_6; // @[primitives.scala:101:30]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_0_T = _notCDom_reduced4SigExtra_T_1[1:0]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_0_T_1 = |_notCDom_reduced4SigExtra_reducedVec_0_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_0 = _notCDom_reduced4SigExtra_reducedVec_0_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_1_T = _notCDom_reduced4SigExtra_T_1[3:2]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_1_T_1 = |_notCDom_reduced4SigExtra_reducedVec_1_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_1 = _notCDom_reduced4SigExtra_reducedVec_1_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_2_T = _notCDom_reduced4SigExtra_T_1[5:4]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_2_T_1 = |_notCDom_reduced4SigExtra_reducedVec_2_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_2 = _notCDom_reduced4SigExtra_reducedVec_2_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_3_T = _notCDom_reduced4SigExtra_T_1[7:6]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_3_T_1 = |_notCDom_reduced4SigExtra_reducedVec_3_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_3 = _notCDom_reduced4SigExtra_reducedVec_3_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_4_T = _notCDom_reduced4SigExtra_T_1[9:8]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_4_T_1 = |_notCDom_reduced4SigExtra_reducedVec_4_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_4 = _notCDom_reduced4SigExtra_reducedVec_4_T_1; // @[primitives.scala:101:30, :103:54]
wire [1:0] _notCDom_reduced4SigExtra_reducedVec_5_T = _notCDom_reduced4SigExtra_T_1[11:10]; // @[primitives.scala:103:33]
assign _notCDom_reduced4SigExtra_reducedVec_5_T_1 = |_notCDom_reduced4SigExtra_reducedVec_5_T; // @[primitives.scala:103:{33,54}]
assign notCDom_reduced4SigExtra_reducedVec_5 = _notCDom_reduced4SigExtra_reducedVec_5_T_1; // @[primitives.scala:101:30, :103:54]
wire _notCDom_reduced4SigExtra_reducedVec_6_T = _notCDom_reduced4SigExtra_T_1[12]; // @[primitives.scala:106:15]
assign _notCDom_reduced4SigExtra_reducedVec_6_T_1 = _notCDom_reduced4SigExtra_reducedVec_6_T; // @[primitives.scala:106:{15,57}]
assign notCDom_reduced4SigExtra_reducedVec_6 = _notCDom_reduced4SigExtra_reducedVec_6_T_1; // @[primitives.scala:101:30, :106:57]
wire [1:0] notCDom_reduced4SigExtra_lo_hi = {notCDom_reduced4SigExtra_reducedVec_2, notCDom_reduced4SigExtra_reducedVec_1}; // @[primitives.scala:101:30, :107:20]
wire [2:0] notCDom_reduced4SigExtra_lo = {notCDom_reduced4SigExtra_lo_hi, notCDom_reduced4SigExtra_reducedVec_0}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_lo = {notCDom_reduced4SigExtra_reducedVec_4, notCDom_reduced4SigExtra_reducedVec_3}; // @[primitives.scala:101:30, :107:20]
wire [1:0] notCDom_reduced4SigExtra_hi_hi = {notCDom_reduced4SigExtra_reducedVec_6, notCDom_reduced4SigExtra_reducedVec_5}; // @[primitives.scala:101:30, :107:20]
wire [3:0] notCDom_reduced4SigExtra_hi = {notCDom_reduced4SigExtra_hi_hi, notCDom_reduced4SigExtra_hi_lo}; // @[primitives.scala:107:20]
wire [6:0] _notCDom_reduced4SigExtra_T_2 = {notCDom_reduced4SigExtra_hi, notCDom_reduced4SigExtra_lo}; // @[primitives.scala:107:20]
wire [3:0] _notCDom_reduced4SigExtra_T_3 = notCDom_normDistReduced2[4:1]; // @[Mux.scala:50:70]
wire [3:0] _notCDom_reduced4SigExtra_T_4 = ~_notCDom_reduced4SigExtra_T_3; // @[primitives.scala:52:21]
wire [16:0] notCDom_reduced4SigExtra_shift = $signed(17'sh10000 >>> _notCDom_reduced4SigExtra_T_4); // @[primitives.scala:52:21, :76:56]
wire [5:0] _notCDom_reduced4SigExtra_T_5 = notCDom_reduced4SigExtra_shift[6:1]; // @[primitives.scala:76:56, :78:22]
wire [3:0] _notCDom_reduced4SigExtra_T_6 = _notCDom_reduced4SigExtra_T_5[3:0]; // @[primitives.scala:77:20, :78:22]
wire [1:0] _notCDom_reduced4SigExtra_T_7 = _notCDom_reduced4SigExtra_T_6[1:0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_8 = _notCDom_reduced4SigExtra_T_7[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_9 = _notCDom_reduced4SigExtra_T_7[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_10 = {_notCDom_reduced4SigExtra_T_8, _notCDom_reduced4SigExtra_T_9}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_11 = _notCDom_reduced4SigExtra_T_6[3:2]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_12 = _notCDom_reduced4SigExtra_T_11[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_13 = _notCDom_reduced4SigExtra_T_11[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_14 = {_notCDom_reduced4SigExtra_T_12, _notCDom_reduced4SigExtra_T_13}; // @[primitives.scala:77:20]
wire [3:0] _notCDom_reduced4SigExtra_T_15 = {_notCDom_reduced4SigExtra_T_10, _notCDom_reduced4SigExtra_T_14}; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_16 = _notCDom_reduced4SigExtra_T_5[5:4]; // @[primitives.scala:77:20, :78:22]
wire _notCDom_reduced4SigExtra_T_17 = _notCDom_reduced4SigExtra_T_16[0]; // @[primitives.scala:77:20]
wire _notCDom_reduced4SigExtra_T_18 = _notCDom_reduced4SigExtra_T_16[1]; // @[primitives.scala:77:20]
wire [1:0] _notCDom_reduced4SigExtra_T_19 = {_notCDom_reduced4SigExtra_T_17, _notCDom_reduced4SigExtra_T_18}; // @[primitives.scala:77:20]
wire [5:0] _notCDom_reduced4SigExtra_T_20 = {_notCDom_reduced4SigExtra_T_15, _notCDom_reduced4SigExtra_T_19}; // @[primitives.scala:77:20]
wire [6:0] _notCDom_reduced4SigExtra_T_21 = {1'h0, _notCDom_reduced4SigExtra_T_2[5:0] & _notCDom_reduced4SigExtra_T_20}; // @[primitives.scala:77:20, :107:20]
wire notCDom_reduced4SigExtra = |_notCDom_reduced4SigExtra_T_21; // @[MulAddRecFN.scala:247:78, :249:11]
wire [25:0] _notCDom_sig_T = notCDom_mainSig[28:3]; // @[MulAddRecFN.scala:243:50, :251:28]
wire [2:0] _notCDom_sig_T_1 = notCDom_mainSig[2:0]; // @[MulAddRecFN.scala:243:50, :252:28]
wire _notCDom_sig_T_2 = |_notCDom_sig_T_1; // @[MulAddRecFN.scala:252:{28,35}]
wire _notCDom_sig_T_3 = _notCDom_sig_T_2 | notCDom_reduced4SigExtra; // @[MulAddRecFN.scala:249:11, :252:{35,39}]
wire [26:0] notCDom_sig = {_notCDom_sig_T, _notCDom_sig_T_3}; // @[MulAddRecFN.scala:251:{12,28}, :252:39]
assign _io_rawOut_sig_T = notCDom_sig; // @[MulAddRecFN.scala:251:12, :294:25]
wire [1:0] _notCDom_completeCancellation_T = notCDom_sig[26:25]; // @[MulAddRecFN.scala:251:12, :255:21]
wire notCDom_completeCancellation = _notCDom_completeCancellation_T == 2'h0; // @[primitives.scala:103:54]
wire _io_rawOut_isZero_T_1 = notCDom_completeCancellation; // @[MulAddRecFN.scala:255:50, :283:42]
wire _notCDom_sign_T = io_fromPreMul_signProd_0 ^ notCDom_signSigSum; // @[MulAddRecFN.scala:169:7, :232:36, :259:36]
wire notCDom_sign = ~notCDom_completeCancellation & _notCDom_sign_T; // @[MulAddRecFN.scala:255:50, :257:12, :259:36]
wire _io_rawOut_sign_T_15 = notCDom_sign; // @[MulAddRecFN.scala:257:12, :292:17]
assign notNaN_isInfOut = notNaN_isInfProd; // @[MulAddRecFN.scala:264:49, :265:44]
assign io_rawOut_isInf_0 = notNaN_isInfOut; // @[MulAddRecFN.scala:169:7, :265:44]
wire notNaN_addZeros = _notNaN_addZeros_T; // @[MulAddRecFN.scala:267:{32,58}]
wire _io_rawOut_sign_T_4 = notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :287:26]
wire _io_invalidExc_T_3 = _io_invalidExc_T_1; // @[MulAddRecFN.scala:271:35, :272:57]
assign _io_invalidExc_T_9 = _io_invalidExc_T_3; // @[MulAddRecFN.scala:272:57, :273:57]
wire _io_invalidExc_T_4 = ~io_fromPreMul_isNaNAOrB_0; // @[MulAddRecFN.scala:169:7, :274:10]
wire _io_invalidExc_T_6 = _io_invalidExc_T_4 & _io_invalidExc_T_5; // @[MulAddRecFN.scala:274:{10,36}, :275:36]
assign io_invalidExc_0 = _io_invalidExc_T_9; // @[MulAddRecFN.scala:169:7, :273:57]
assign io_rawOut_isNaN_0 = _io_rawOut_isNaN_T; // @[MulAddRecFN.scala:169:7, :278:48]
assign _io_rawOut_isZero_T_2 = notNaN_addZeros | _io_rawOut_isZero_T_1; // @[MulAddRecFN.scala:267:58, :282:25, :283:42]
assign io_rawOut_isZero_0 = _io_rawOut_isZero_T_2; // @[MulAddRecFN.scala:169:7, :282:25]
wire _io_rawOut_sign_T = notNaN_isInfProd & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :264:49, :285:27]
wire _io_rawOut_sign_T_2 = _io_rawOut_sign_T; // @[MulAddRecFN.scala:285:{27,54}]
wire _io_rawOut_sign_T_5 = _io_rawOut_sign_T_4 & io_fromPreMul_signProd_0; // @[MulAddRecFN.scala:169:7, :287:{26,48}]
wire _io_rawOut_sign_T_6 = _io_rawOut_sign_T_5 & opSignC; // @[MulAddRecFN.scala:190:42, :287:48, :288:36]
wire _io_rawOut_sign_T_7 = _io_rawOut_sign_T_2 | _io_rawOut_sign_T_6; // @[MulAddRecFN.scala:285:54, :286:43, :288:36]
wire _io_rawOut_sign_T_11 = _io_rawOut_sign_T_7; // @[MulAddRecFN.scala:286:43, :288:48]
wire _io_rawOut_sign_T_9 = io_fromPreMul_signProd_0 | opSignC; // @[MulAddRecFN.scala:169:7, :190:42, :290:37]
wire _io_rawOut_sign_T_12 = ~notNaN_isInfOut; // @[MulAddRecFN.scala:265:44, :291:10]
wire _io_rawOut_sign_T_13 = ~notNaN_addZeros; // @[MulAddRecFN.scala:267:58, :291:31]
wire _io_rawOut_sign_T_14 = _io_rawOut_sign_T_12 & _io_rawOut_sign_T_13; // @[MulAddRecFN.scala:291:{10,28,31}]
wire _io_rawOut_sign_T_16 = _io_rawOut_sign_T_14 & _io_rawOut_sign_T_15; // @[MulAddRecFN.scala:291:{28,49}, :292:17]
assign _io_rawOut_sign_T_17 = _io_rawOut_sign_T_11 | _io_rawOut_sign_T_16; // @[MulAddRecFN.scala:288:48, :290:50, :291:49]
assign io_rawOut_sign_0 = _io_rawOut_sign_T_17; // @[MulAddRecFN.scala:169:7, :290:50]
assign io_rawOut_sExp_0 = _io_rawOut_sExp_T; // @[MulAddRecFN.scala:169:7, :293:26]
assign io_rawOut_sig_0 = _io_rawOut_sig_T; // @[MulAddRecFN.scala:169:7, :294:25]
assign io_invalidExc = io_invalidExc_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sign = io_rawOut_sign_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulAddRecFN.scala:169:7]
assign io_rawOut_sig = io_rawOut_sig_0; // @[MulAddRecFN.scala:169:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File RoundAnyRawFNToRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util.Fill
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundAnyRawFNToRecFN(
inExpWidth: Int,
inSigWidth: Int,
outExpWidth: Int,
outSigWidth: Int,
options: Int
)
extends RawModule
{
override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(inExpWidth, inSigWidth))
// (allowed exponent range has limits)
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((outExpWidth + outSigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0)
val effectiveInSigWidth =
if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1
val neverUnderflows =
((options &
(flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact)
) != 0) ||
(inExpWidth < outExpWidth)
val neverOverflows =
((options & flRoundOpt_neverOverflows) != 0) ||
(inExpWidth < outExpWidth)
val outNaNExp = BigInt(7)<<(outExpWidth - 2)
val outInfExp = BigInt(6)<<(outExpWidth - 2)
val outMaxFiniteExp = outInfExp - 1
val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2
val outMinNonzeroExp = outMinNormExp - outSigWidth + 1
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundingMode_near_even = (io.roundingMode === round_near_even)
val roundingMode_minMag = (io.roundingMode === round_minMag)
val roundingMode_min = (io.roundingMode === round_min)
val roundingMode_max = (io.roundingMode === round_max)
val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag)
val roundingMode_odd = (io.roundingMode === round_odd)
val roundMagUp =
(roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val sAdjustedExp =
if (inExpWidth < outExpWidth)
(io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
)(outExpWidth, 0).zext
else if (inExpWidth == outExpWidth)
io.in.sExp
else
io.in.sExp +&
((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S
val adjustedSig =
if (inSigWidth <= outSigWidth + 2)
io.in.sig<<(outSigWidth - inSigWidth + 2)
else
(io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ##
io.in.sig(inSigWidth - outSigWidth - 2, 0).orR
)
val doShiftSigDown1 =
if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2)
val common_expOut = Wire(UInt((outExpWidth + 1).W))
val common_fractOut = Wire(UInt((outSigWidth - 1).W))
val common_overflow = Wire(Bool())
val common_totalUnderflow = Wire(Bool())
val common_underflow = Wire(Bool())
val common_inexact = Wire(Bool())
if (
neverOverflows && neverUnderflows
&& (effectiveInSigWidth <= outSigWidth)
) {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1
common_fractOut :=
Mux(doShiftSigDown1,
adjustedSig(outSigWidth + 1, 3),
adjustedSig(outSigWidth, 2)
)
common_overflow := false.B
common_totalUnderflow := false.B
common_underflow := false.B
common_inexact := false.B
} else {
//--------------------------------------------------------------------
//--------------------------------------------------------------------
val roundMask =
if (neverUnderflows)
0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W)
else
(lowMask(
sAdjustedExp(outExpWidth, 0),
outMinNormExp - outSigWidth - 1,
outMinNormExp
) | doShiftSigDown1) ##
3.U(2.W)
val shiftedRoundMask = 0.U(1.W) ## roundMask>>1
val roundPosMask = ~shiftedRoundMask & roundMask
val roundPosBit = (adjustedSig & roundPosMask).orR
val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR
val anyRound = roundPosBit || anyRoundExtra
val roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
roundPosBit) ||
(roundMagUp && anyRound)
val roundedSig: Bits =
Mux(roundIncr,
(((adjustedSig | roundMask)>>2) +& 1.U) &
~Mux(roundingMode_near_even && roundPosBit &&
! anyRoundExtra,
roundMask>>1,
0.U((outSigWidth + 2).W)
),
(adjustedSig & ~roundMask)>>2 |
Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U)
)
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext
common_expOut := sRoundedExp(outExpWidth, 0)
common_fractOut :=
Mux(doShiftSigDown1,
roundedSig(outSigWidth - 1, 1),
roundedSig(outSigWidth - 2, 0)
)
common_overflow :=
(if (neverOverflows) false.B else
//*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?:
(sRoundedExp>>(outExpWidth - 1) >= 3.S))
common_totalUnderflow :=
(if (neverUnderflows) false.B else
//*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?:
(sRoundedExp < outMinNonzeroExp.S))
val unboundedRange_roundPosBit =
Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1))
val unboundedRange_anyRound =
(doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR
val unboundedRange_roundIncr =
((roundingMode_near_even || roundingMode_near_maxMag) &&
unboundedRange_roundPosBit) ||
(roundMagUp && unboundedRange_anyRound)
val roundCarry =
Mux(doShiftSigDown1,
roundedSig(outSigWidth + 1),
roundedSig(outSigWidth)
)
common_underflow :=
(if (neverUnderflows) false.B else
common_totalUnderflow ||
//*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING
//*** M.S. BIT OF SUBNORMAL SIG?
(anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) &&
Mux(doShiftSigDown1, roundMask(3), roundMask(2)) &&
! ((io.detectTininess === tininess_afterRounding) &&
! Mux(doShiftSigDown1,
roundMask(4),
roundMask(3)
) &&
roundCarry && roundPosBit &&
unboundedRange_roundIncr)))
common_inexact := common_totalUnderflow || anyRound
}
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val isNaNOut = io.invalidExc || io.in.isNaN
val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf
val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero
val overflow = commonCase && common_overflow
val underflow = commonCase && common_underflow
val inexact = overflow || (commonCase && common_inexact)
val overflow_roundMagUp =
roundingMode_near_even || roundingMode_near_maxMag || roundMagUp
val pegMinNonzeroMagOut =
commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd)
val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp
val notNaN_isInfOut =
notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp)
val signOut = Mux(isNaNOut, false.B, io.in.sign)
val expOut =
(common_expOut &
~Mux(io.in.isZero || common_totalUnderflow,
(BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMinNonzeroMagOut,
~outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) &
~Mux(pegMaxFiniteMagOut,
(BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W),
0.U
) &
~Mux(notNaN_isInfOut,
(BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W),
0.U
)) |
Mux(pegMinNonzeroMagOut,
outMinNonzeroExp.U((outExpWidth + 1).W),
0.U
) |
Mux(pegMaxFiniteMagOut,
outMaxFiniteExp.U((outExpWidth + 1).W),
0.U
) |
Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) |
Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U)
val fractOut =
Mux(isNaNOut || io.in.isZero || common_totalUnderflow,
Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U),
common_fractOut
) |
Fill(outSigWidth - 1, pegMaxFiniteMagOut)
io.out := signOut ## expOut ## fractOut
io.exceptionFlags :=
io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class
RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int)
extends RawModule
{
override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}"
val io = IO(new Bundle {
val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in'
val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign'
val in = Input(new RawFloat(expWidth, sigWidth + 2))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(UInt(1.W))
val out = Output(Bits((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(Bits(5.W))
})
val roundAnyRawFNToRecFN =
Module(
new RoundAnyRawFNToRecFN(
expWidth, sigWidth + 2, expWidth, sigWidth, options))
roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc
roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc
roundAnyRawFNToRecFN.io.in := io.in
roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode
roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundAnyRawFNToRecFN.io.out
io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags
}
| module RoundRawFNToRecFN_e5_s11_14( // @[RoundAnyRawFNToRecFN.scala:295:5]
input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [6:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [13:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16]
input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:299:16]
input io_detectTininess, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [16:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16]
output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16]
);
wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [6:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [13:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [2:0] io_roundingMode_0 = io_roundingMode; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_detectTininess_0 = io_detectTininess; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15]
wire [16:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
RoundAnyRawFNToRecFN_ie5_is13_oe5_os11_14 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15]
.io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_roundingMode (io_roundingMode_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_detectTininess (io_detectTininess_0), // @[RoundAnyRawFNToRecFN.scala:295:5]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags_0)
); // @[RoundAnyRawFNToRecFN.scala:310:15]
assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File loop.scala:
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v4.common._
import boom.v4.util.{BoomCoreStringPrefix}
import scala.math.min
case class BoomLoopPredictorParams(
nWays: Int = 4,
threshold: Int = 7
)
class LoopBranchPredictorBank(implicit p: Parameters) extends BranchPredictorBank()(p)
{
val tagSz = 10
override val nSets = 16
class LoopMeta extends Bundle {
val s_cnt = UInt(10.W)
}
class LoopEntry extends Bundle {
val tag = UInt(tagSz.W)
val conf = UInt(3.W)
val age = UInt(3.W)
val p_cnt = UInt(10.W)
val s_cnt = UInt(10.W)
}
class LoopBranchPredictorColumn extends Module {
val io = IO(new Bundle {
val f2_req_valid = Input(Bool())
val f2_req_idx = Input(UInt())
val f3_req_fire = Input(Bool())
val f3_pred_in = Input(Bool())
val f3_pred = Output(Bool())
val f3_meta = Output(new LoopMeta)
val update_mispredict = Input(Bool())
val update_repair = Input(Bool())
val update_idx = Input(UInt())
val update_resolve_dir = Input(Bool())
val update_meta = Input(new LoopMeta)
})
val doing_reset = RegInit(true.B)
val reset_idx = RegInit(0.U(log2Ceil(nSets).W))
reset_idx := reset_idx + doing_reset
when (reset_idx === (nSets-1).U) { doing_reset := false.B }
val entries = Reg(Vec(nSets, new LoopEntry))
val f2_entry = WireInit(entries(io.f2_req_idx))
when (io.update_repair && io.update_idx === io.f2_req_idx) {
f2_entry.s_cnt := io.update_meta.s_cnt
} .elsewhen (io.update_mispredict && io.update_idx === io.f2_req_idx) {
f2_entry.s_cnt := 0.U
}
val f3_entry = RegNext(f2_entry)
val f3_scnt = Mux(io.update_repair && io.update_idx === RegNext(io.f2_req_idx),
io.update_meta.s_cnt,
f3_entry.s_cnt)
val f3_tag = RegNext(io.f2_req_idx(tagSz+log2Ceil(nSets)-1,log2Ceil(nSets)))
io.f3_pred := io.f3_pred_in
io.f3_meta.s_cnt := f3_scnt
when (f3_entry.tag === f3_tag) {
when (f3_scnt === f3_entry.p_cnt && f3_entry.conf === 7.U) {
io.f3_pred := !io.f3_pred_in
}
}
val f4_fire = RegNext(io.f3_req_fire)
val f4_entry = RegNext(f3_entry)
val f4_tag = RegNext(f3_tag)
val f4_scnt = RegNext(f3_scnt)
val f4_idx = RegNext(RegNext(io.f2_req_idx))
when (f4_fire) {
when (f4_entry.tag === f4_tag) {
when (f4_scnt === f4_entry.p_cnt && f4_entry.conf === 7.U) {
entries(f4_idx).age := 7.U
entries(f4_idx).s_cnt := 0.U
} .otherwise {
entries(f4_idx).s_cnt := f4_scnt + 1.U
entries(f4_idx).age := Mux(f4_entry.age === 7.U, 7.U, f4_entry.age + 1.U)
}
}
}
val entry = entries(io.update_idx)
val tag = io.update_idx(tagSz+log2Ceil(nSets)-1,log2Ceil(nSets))
val tag_match = entry.tag === tag
val ctr_match = entry.p_cnt === io.update_meta.s_cnt
val wentry = WireInit(entry)
when (io.update_mispredict && !doing_reset) {
// Learned, tag match -> decrement confidence
when (entry.conf === 7.U && tag_match) {
wentry.s_cnt := 0.U
wentry.conf := entry.conf - 1.U
// Learned, no tag match -> do nothing? Don't evict super-confident entries?
} .elsewhen (entry.conf === 7.U && !tag_match) {
// Confident, tag match, ctr_match -> increment confidence, reset counter
} .elsewhen (entry.conf =/= 0.U && tag_match && ctr_match) {
wentry.conf := entry.conf + 1.U
wentry.s_cnt := 0.U
// Confident, tag match, no ctr match -> zero confidence, reset counter, set previous counter
} .elsewhen (entry.conf =/= 0.U && tag_match && !ctr_match) {
wentry.conf := 0.U
wentry.s_cnt := 0.U
wentry.p_cnt := io.update_meta.s_cnt
// Confident, no tag match, age is 0 -> replace this entry with our own, set our age high to avoid ping-pong
} .elsewhen (entry.conf =/= 0.U && !tag_match && entry.age === 0.U) {
wentry.tag := tag
wentry.conf := 1.U
wentry.s_cnt := 0.U
wentry.p_cnt := io.update_meta.s_cnt
// Confident, no tag match, age > 0 -> decrement age
} .elsewhen (entry.conf =/= 0.U && !tag_match && entry.age =/= 0.U) {
wentry.age := entry.age - 1.U
// Unconfident, tag match, ctr match -> increment confidence
} .elsewhen (entry.conf === 0.U && tag_match && ctr_match) {
wentry.conf := 1.U
wentry.age := 7.U
wentry.s_cnt := 0.U
// Unconfident, tag match, no ctr match -> set previous counter
} .elsewhen (entry.conf === 0.U && tag_match && !ctr_match) {
wentry.p_cnt := io.update_meta.s_cnt
wentry.age := 7.U
wentry.s_cnt := 0.U
// Unconfident, no tag match -> set previous counter and tag
} .elsewhen (entry.conf === 0.U && !tag_match) {
wentry.tag := tag
wentry.conf := 1.U
wentry.age := 7.U
wentry.s_cnt := 0.U
wentry.p_cnt := io.update_meta.s_cnt
}
entries(io.update_idx) := wentry
} .elsewhen (io.update_repair && !doing_reset) {
when (tag_match && !(f4_fire && io.update_idx === f4_idx)) {
wentry.s_cnt := io.update_meta.s_cnt
entries(io.update_idx) := wentry
}
}
when (doing_reset) {
entries(reset_idx) := (0.U).asTypeOf(new LoopEntry)
}
}
val columns = Seq.fill(bankWidth) { Module(new LoopBranchPredictorColumn) }
val mems = Nil // TODO fix
val f3_meta = Wire(Vec(bankWidth, new LoopMeta))
override val metaSz = f3_meta.asUInt.getWidth
val update_meta = s1_update.bits.meta.asTypeOf(Vec(bankWidth, new LoopMeta))
for (w <- 0 until bankWidth) {
columns(w).io.f2_req_valid := s2_valid
columns(w).io.f2_req_idx := s2_idx
columns(w).io.f3_req_fire := (s3_valid && s3_mask(w) && io.f3_fire &&
RegNext(io.resp_in(0).f2(w).predicted_pc.valid && io.resp_in(0).f2(w).is_br))
columns(w).io.f3_pred_in := io.resp_in(0).f3(w).taken
io.resp.f3(w).taken := columns(w).io.f3_pred
columns(w).io.update_mispredict := (s1_update.valid &&
s1_update.bits.br_mask(w) &&
s1_update.bits.is_mispredict_update &&
s1_update.bits.cfi_mispredicted)
columns(w).io.update_repair := (s1_update.valid &&
s1_update.bits.br_mask(w) &&
s1_update.bits.is_repair_update)
columns(w).io.update_idx := s1_update_idx
columns(w).io.update_resolve_dir := s1_update.bits.cfi_taken
columns(w).io.update_meta := update_meta(w)
f3_meta(w) := columns(w).io.f3_meta
}
io.f3_meta := f3_meta.asUInt
}
File predictor.scala:
package boom.v4.ifu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import boom.v4.common._
import boom.v4.util.{BoomCoreStringPrefix}
// A branch prediction for a single instruction
class BranchPrediction(implicit p: Parameters) extends BoomBundle()(p)
{
// If this is a branch, do we take it?
val taken = Bool()
// Is this a branch?
val is_br = Bool()
// Is this a JAL?
val is_jal = Bool()
// What is the target of his branch/jump? Do we know the target?
val predicted_pc = Valid(UInt(vaddrBitsExtended.W))
}
// A branch prediction for a entire fetch-width worth of instructions
// This is typically merged from individual predictions from the banked
// predictor
class BranchPredictionBundle(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomFrontendParameters
{
val pc = UInt(vaddrBitsExtended.W)
val preds = Vec(fetchWidth, new BranchPrediction)
val meta = Output(Vec(nBanks, UInt(bpdMaxMetaLength.W)))
val lhist = Output(Vec(nBanks, UInt(localHistoryLength.W)))
}
// A branch update for a fetch-width worth of instructions
class BranchPredictionUpdate(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomFrontendParameters
{
// Indicates that this update is due to a speculated misprediction
// Local predictors typically update themselves with speculative info
// Global predictors only care about non-speculative updates
val is_mispredict_update = Bool()
val is_repair_update = Bool()
val btb_mispredicts = UInt(fetchWidth.W)
def is_btb_mispredict_update = btb_mispredicts =/= 0.U
def is_commit_update = !(is_mispredict_update || is_repair_update || is_btb_mispredict_update)
val pc = UInt(vaddrBitsExtended.W)
// Mask of instructions which are branches.
// If these are not cfi_idx, then they were predicted not taken
val br_mask = UInt(fetchWidth.W)
// Which CFI was taken/mispredicted (if any)
val cfi_idx = Valid(UInt(log2Ceil(fetchWidth).W))
// Was the cfi taken?
val cfi_taken = Bool()
// Was the cfi mispredicted from the original prediction?
val cfi_mispredicted = Bool()
// Was the cfi a br?
val cfi_is_br = Bool()
// Was the cfi a jal/jalr?
val cfi_is_jal = Bool()
// Was the cfi a jalr
val cfi_is_jalr = Bool()
//val cfi_is_ret = Bool()
val ghist = new GlobalHistory
val lhist = Vec(nBanks, UInt(localHistoryLength.W))
// What did this CFI jump to?
val target = UInt(vaddrBitsExtended.W)
val meta = Vec(nBanks, UInt(bpdMaxMetaLength.W))
}
// A branch update to a single bank
class BranchPredictionBankUpdate(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomFrontendParameters
{
val is_mispredict_update = Bool()
val is_repair_update = Bool()
val btb_mispredicts = UInt(bankWidth.W)
def is_btb_mispredict_update = btb_mispredicts =/= 0.U
def is_commit_update = !(is_mispredict_update || is_repair_update || is_btb_mispredict_update)
val pc = UInt(vaddrBitsExtended.W)
val br_mask = UInt(bankWidth.W)
val cfi_idx = Valid(UInt(log2Ceil(bankWidth).W))
val cfi_taken = Bool()
val cfi_mispredicted = Bool()
val cfi_is_br = Bool()
val cfi_is_jal = Bool()
val cfi_is_jalr = Bool()
val ghist = UInt(globalHistoryLength.W)
val lhist = UInt(localHistoryLength.W)
val target = UInt(vaddrBitsExtended.W)
val meta = UInt(bpdMaxMetaLength.W)
}
class BranchPredictionRequest(implicit p: Parameters) extends BoomBundle()(p)
{
val pc = UInt(vaddrBitsExtended.W)
val ghist = new GlobalHistory
}
class BranchPredictionBankResponse(implicit p: Parameters) extends BoomBundle()(p)
with HasBoomFrontendParameters
{
val f1 = Vec(bankWidth, new BranchPrediction)
val f2 = Vec(bankWidth, new BranchPrediction)
val f3 = Vec(bankWidth, new BranchPrediction)
}
abstract class BranchPredictorBank(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
val metaSz = 0
def nInputs = 1
val mems: Seq[Tuple3[String, Int, Int]]
val io = IO(new Bundle {
val f0_valid = Input(Bool())
val f0_pc = Input(UInt(vaddrBitsExtended.W))
val f0_mask = Input(UInt(bankWidth.W))
// Local history not available until end of f1
val f1_ghist = Input(UInt(globalHistoryLength.W))
val f1_lhist = Input(UInt(localHistoryLength.W))
val resp_in = Input(Vec(nInputs, new BranchPredictionBankResponse))
val resp = Output(new BranchPredictionBankResponse)
// Store the meta as a UInt, use width inference to figure out the shape
val f3_meta = Output(UInt(bpdMaxMetaLength.W))
val f3_fire = Input(Bool())
val update = Input(Valid(new BranchPredictionBankUpdate))
})
io.resp := io.resp_in(0)
io.f3_meta := 0.U
val s0_idx = fetchIdx(io.f0_pc)
val s1_idx = RegNext(s0_idx)
val s2_idx = RegNext(s1_idx)
val s3_idx = RegNext(s2_idx)
val s0_valid = io.f0_valid
val s1_valid = RegNext(s0_valid)
val s2_valid = RegNext(s1_valid)
val s3_valid = RegNext(s2_valid)
val s0_mask = io.f0_mask
val s1_mask = RegNext(s0_mask)
val s2_mask = RegNext(s1_mask)
val s3_mask = RegNext(s2_mask)
val s0_pc = bankAlign(io.f0_pc)
val s1_pc = RegNext(s0_pc)
val s2_pc = RegNext(s1_pc)
val s0_update = io.update
val s0_update_idx = fetchIdx(io.update.bits.pc)
val s0_update_valid = io.update.valid
val s1_update = RegNext(s0_update)
s1_update.bits.pc := bankAlign(s0_update.bits.pc)
val s1_update_idx = RegNext(s0_update_idx)
val s1_update_valid = RegNext(s0_update_valid)
}
class BranchPredictor(implicit p: Parameters) extends BoomModule()(p)
with HasBoomFrontendParameters
{
val io = IO(new Bundle {
// Requests and responses
val f0_req = Input(Valid(new BranchPredictionRequest))
val resp = Output(new Bundle {
val f1 = new BranchPredictionBundle
val f2 = new BranchPredictionBundle
val f3 = new BranchPredictionBundle
})
val f3_fire = Input(Bool())
// Update
val update = Input(Valid(new BranchPredictionUpdate))
})
var total_memsize = 0
val bpdStr = new StringBuilder
bpdStr.append(BoomCoreStringPrefix("==Branch Predictor Memory Sizes==\n"))
val banked_predictors = (0 until nBanks) map ( b => {
val m = Module(if (useBPD) new ComposedBranchPredictorBank else new NullBranchPredictorBank)
for ((n, d, w) <- m.mems) {
bpdStr.append(BoomCoreStringPrefix(f"bank$b $n: $d x $w = ${d * w / 8}"))
total_memsize = total_memsize + d * w / 8
}
m
})
bpdStr.append(BoomCoreStringPrefix(f"Total bpd size: ${total_memsize / 1024} KB\n"))
override def toString: String = bpdStr.toString
val banked_lhist_providers = Seq.fill(nBanks) { Module(if (localHistoryNSets > 0) new LocalBranchPredictorBank else new NullLocalBranchPredictorBank) }
if (nBanks == 1) {
banked_lhist_providers(0).io.f0_valid := io.f0_req.valid
banked_lhist_providers(0).io.f0_pc := bankAlign(io.f0_req.bits.pc)
banked_predictors(0).io.f0_valid := io.f0_req.valid
banked_predictors(0).io.f0_pc := io.f0_req.bits.pc
banked_predictors(0).io.f0_mask := fetchMask(io.f0_req.bits.pc)
banked_predictors(0).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(0))
banked_predictors(0).io.f1_lhist := banked_lhist_providers(0).io.f1_lhist
banked_predictors(0).io.resp_in(0) := (0.U).asTypeOf(new BranchPredictionBankResponse)
} else {
require(nBanks == 2)
banked_predictors(0).io.resp_in(0) := (0.U).asTypeOf(new BranchPredictionBankResponse)
banked_predictors(1).io.resp_in(0) := (0.U).asTypeOf(new BranchPredictionBankResponse)
banked_predictors(0).io.f1_lhist := banked_lhist_providers(0).io.f1_lhist
banked_predictors(1).io.f1_lhist := banked_lhist_providers(1).io.f1_lhist
when (bank(io.f0_req.bits.pc) === 0.U) {
banked_lhist_providers(0).io.f0_valid := io.f0_req.valid
banked_lhist_providers(0).io.f0_pc := bankAlign(io.f0_req.bits.pc)
banked_lhist_providers(1).io.f0_valid := io.f0_req.valid
banked_lhist_providers(1).io.f0_pc := nextBank(io.f0_req.bits.pc)
banked_predictors(0).io.f0_valid := io.f0_req.valid
banked_predictors(0).io.f0_pc := io.f0_req.bits.pc
banked_predictors(0).io.f0_mask := fetchMask(io.f0_req.bits.pc)
banked_predictors(1).io.f0_valid := io.f0_req.valid
banked_predictors(1).io.f0_pc := nextBank(io.f0_req.bits.pc)
banked_predictors(1).io.f0_mask := ~(0.U(bankWidth.W))
} .otherwise {
banked_lhist_providers(0).io.f0_valid := io.f0_req.valid && !mayNotBeDualBanked(io.f0_req.bits.pc)
banked_lhist_providers(0).io.f0_pc := nextBank(io.f0_req.bits.pc)
banked_lhist_providers(1).io.f0_valid := io.f0_req.valid
banked_lhist_providers(1).io.f0_pc := bankAlign(io.f0_req.bits.pc)
banked_predictors(0).io.f0_valid := io.f0_req.valid && !mayNotBeDualBanked(io.f0_req.bits.pc)
banked_predictors(0).io.f0_pc := nextBank(io.f0_req.bits.pc)
banked_predictors(0).io.f0_mask := ~(0.U(bankWidth.W))
banked_predictors(1).io.f0_valid := io.f0_req.valid
banked_predictors(1).io.f0_pc := io.f0_req.bits.pc
banked_predictors(1).io.f0_mask := fetchMask(io.f0_req.bits.pc)
}
when (RegNext(bank(io.f0_req.bits.pc) === 0.U)) {
banked_predictors(0).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(0))
banked_predictors(1).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(1))
} .otherwise {
banked_predictors(0).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(1))
banked_predictors(1).io.f1_ghist := RegNext(io.f0_req.bits.ghist.histories(0))
}
}
for (i <- 0 until nBanks) {
banked_lhist_providers(i).io.f3_taken_br := banked_predictors(i).io.resp.f3.map ( p =>
p.is_br && p.predicted_pc.valid && p.taken
).reduce(_||_)
}
if (nBanks == 1) {
io.resp.f1.preds := banked_predictors(0).io.resp.f1
io.resp.f2.preds := banked_predictors(0).io.resp.f2
io.resp.f3.preds := banked_predictors(0).io.resp.f3
io.resp.f3.meta(0) := banked_predictors(0).io.f3_meta
io.resp.f3.lhist(0) := banked_lhist_providers(0).io.f3_lhist
banked_predictors(0).io.f3_fire := io.f3_fire
banked_lhist_providers(0).io.f3_fire := io.f3_fire
} else {
require(nBanks == 2)
val b0_fire = io.f3_fire && RegNext(RegNext(RegNext(banked_predictors(0).io.f0_valid)))
val b1_fire = io.f3_fire && RegNext(RegNext(RegNext(banked_predictors(1).io.f0_valid)))
banked_predictors(0).io.f3_fire := b0_fire
banked_predictors(1).io.f3_fire := b1_fire
banked_lhist_providers(0).io.f3_fire := b0_fire
banked_lhist_providers(1).io.f3_fire := b1_fire
// The branch prediction metadata is stored un-shuffled
io.resp.f3.meta(0) := banked_predictors(0).io.f3_meta
io.resp.f3.meta(1) := banked_predictors(1).io.f3_meta
io.resp.f3.lhist(0) := banked_lhist_providers(0).io.f3_lhist
io.resp.f3.lhist(1) := banked_lhist_providers(1).io.f3_lhist
when (bank(io.resp.f1.pc) === 0.U) {
for (i <- 0 until bankWidth) {
io.resp.f1.preds(i) := banked_predictors(0).io.resp.f1(i)
io.resp.f1.preds(i+bankWidth) := banked_predictors(1).io.resp.f1(i)
}
} .otherwise {
for (i <- 0 until bankWidth) {
io.resp.f1.preds(i) := banked_predictors(1).io.resp.f1(i)
io.resp.f1.preds(i+bankWidth) := banked_predictors(0).io.resp.f1(i)
}
}
when (bank(io.resp.f2.pc) === 0.U) {
for (i <- 0 until bankWidth) {
io.resp.f2.preds(i) := banked_predictors(0).io.resp.f2(i)
io.resp.f2.preds(i+bankWidth) := banked_predictors(1).io.resp.f2(i)
}
} .otherwise {
for (i <- 0 until bankWidth) {
io.resp.f2.preds(i) := banked_predictors(1).io.resp.f2(i)
io.resp.f2.preds(i+bankWidth) := banked_predictors(0).io.resp.f2(i)
}
}
when (bank(io.resp.f3.pc) === 0.U) {
for (i <- 0 until bankWidth) {
io.resp.f3.preds(i) := banked_predictors(0).io.resp.f3(i)
io.resp.f3.preds(i+bankWidth) := banked_predictors(1).io.resp.f3(i)
}
} .otherwise {
for (i <- 0 until bankWidth) {
io.resp.f3.preds(i) := banked_predictors(1).io.resp.f3(i)
io.resp.f3.preds(i+bankWidth) := banked_predictors(0).io.resp.f3(i)
}
}
}
io.resp.f1.pc := RegNext(io.f0_req.bits.pc)
io.resp.f2.pc := RegNext(io.resp.f1.pc)
io.resp.f3.pc := RegNext(io.resp.f2.pc)
// We don't care about meta from the f1 and f2 resps
// Use the meta from the latest resp
io.resp.f1.meta := DontCare
io.resp.f2.meta := DontCare
io.resp.f1.lhist := DontCare
io.resp.f2.lhist := DontCare
for (i <- 0 until nBanks) {
banked_predictors(i).io.update.bits.is_mispredict_update := io.update.bits.is_mispredict_update
banked_predictors(i).io.update.bits.is_repair_update := io.update.bits.is_repair_update
banked_predictors(i).io.update.bits.meta := io.update.bits.meta(i)
banked_predictors(i).io.update.bits.lhist := io.update.bits.lhist(i)
banked_predictors(i).io.update.bits.cfi_idx.bits := io.update.bits.cfi_idx.bits
banked_predictors(i).io.update.bits.cfi_taken := io.update.bits.cfi_taken
banked_predictors(i).io.update.bits.cfi_mispredicted := io.update.bits.cfi_mispredicted
banked_predictors(i).io.update.bits.cfi_is_br := io.update.bits.cfi_is_br
banked_predictors(i).io.update.bits.cfi_is_jal := io.update.bits.cfi_is_jal
banked_predictors(i).io.update.bits.cfi_is_jalr := io.update.bits.cfi_is_jalr
banked_predictors(i).io.update.bits.target := io.update.bits.target
banked_lhist_providers(i).io.update.mispredict := io.update.bits.is_mispredict_update
banked_lhist_providers(i).io.update.repair := io.update.bits.is_repair_update
banked_lhist_providers(i).io.update.lhist := io.update.bits.lhist(i)
}
if (nBanks == 1) {
banked_predictors(0).io.update.valid := io.update.valid
banked_predictors(0).io.update.bits.pc := io.update.bits.pc
banked_predictors(0).io.update.bits.br_mask := io.update.bits.br_mask
banked_predictors(0).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts
banked_predictors(0).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid
banked_predictors(0).io.update.bits.ghist := io.update.bits.ghist.histories(0)
banked_lhist_providers(0).io.update.valid := io.update.valid && io.update.bits.br_mask =/= 0.U
banked_lhist_providers(0).io.update.pc := bankAlign(io.update.bits.pc)
} else {
require(nBanks == 2)
// Split the single update bundle for the fetchpacket into two updates
// 1 for each bank.
when (bank(io.update.bits.pc) === 0.U) {
val b1_update_valid = io.update.valid &&
(!io.update.bits.cfi_idx.valid || io.update.bits.cfi_idx.bits >= bankWidth.U)
banked_lhist_providers(0).io.update.valid := io.update.valid && io.update.bits.br_mask(bankWidth-1,0) =/= 0.U
banked_lhist_providers(1).io.update.valid := b1_update_valid && io.update.bits.br_mask(fetchWidth-1,bankWidth) =/= 0.U
banked_lhist_providers(0).io.update.pc := bankAlign(io.update.bits.pc)
banked_lhist_providers(1).io.update.pc := nextBank(io.update.bits.pc)
banked_predictors(0).io.update.valid := io.update.valid
banked_predictors(1).io.update.valid := b1_update_valid
banked_predictors(0).io.update.bits.pc := io.update.bits.pc
banked_predictors(1).io.update.bits.pc := nextBank(io.update.bits.pc)
banked_predictors(0).io.update.bits.br_mask := io.update.bits.br_mask
banked_predictors(1).io.update.bits.br_mask := io.update.bits.br_mask >> bankWidth
banked_predictors(0).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts
banked_predictors(1).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts >> bankWidth
banked_predictors(0).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits < bankWidth.U
banked_predictors(1).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits >= bankWidth.U
banked_predictors(0).io.update.bits.ghist := io.update.bits.ghist.histories(0)
banked_predictors(1).io.update.bits.ghist := io.update.bits.ghist.histories(1)
} .otherwise {
val b0_update_valid = io.update.valid && !mayNotBeDualBanked(io.update.bits.pc) &&
(!io.update.bits.cfi_idx.valid || io.update.bits.cfi_idx.bits >= bankWidth.U)
banked_lhist_providers(1).io.update.valid := io.update.valid && io.update.bits.br_mask(bankWidth-1,0) =/= 0.U
banked_lhist_providers(0).io.update.valid := b0_update_valid && io.update.bits.br_mask(fetchWidth-1,bankWidth) =/= 0.U
banked_lhist_providers(1).io.update.pc := bankAlign(io.update.bits.pc)
banked_lhist_providers(0).io.update.pc := nextBank(io.update.bits.pc)
banked_predictors(1).io.update.valid := io.update.valid
banked_predictors(0).io.update.valid := b0_update_valid
banked_predictors(1).io.update.bits.pc := io.update.bits.pc
banked_predictors(0).io.update.bits.pc := nextBank(io.update.bits.pc)
banked_predictors(1).io.update.bits.br_mask := io.update.bits.br_mask
banked_predictors(0).io.update.bits.br_mask := io.update.bits.br_mask >> bankWidth
banked_predictors(1).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts
banked_predictors(0).io.update.bits.btb_mispredicts := io.update.bits.btb_mispredicts >> bankWidth
banked_predictors(1).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits < bankWidth.U
banked_predictors(0).io.update.bits.cfi_idx.valid := io.update.bits.cfi_idx.valid && io.update.bits.cfi_idx.bits >= bankWidth.U
banked_predictors(1).io.update.bits.ghist := io.update.bits.ghist.histories(0)
banked_predictors(0).io.update.bits.ghist := io.update.bits.ghist.histories(1)
}
}
when (io.update.valid) {
when (io.update.bits.cfi_is_br && io.update.bits.cfi_idx.valid) {
assert(io.update.bits.br_mask(io.update.bits.cfi_idx.bits))
}
}
}
class NullBranchPredictorBank(implicit p: Parameters) extends BranchPredictorBank()(p) {
val mems = Nil
}
| module LoopBranchPredictorBank( // @[loop.scala:20:7]
input clock, // @[loop.scala:20:7]
input reset, // @[loop.scala:20:7]
input io_f0_valid, // @[predictor.scala:140:14]
input [39:0] io_f0_pc, // @[predictor.scala:140:14]
input [3:0] io_f0_mask, // @[predictor.scala:140:14]
input [63:0] io_f1_ghist, // @[predictor.scala:140:14]
input io_resp_in_0_f1_0_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f1_0_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f1_0_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f1_0_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f1_0_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f1_1_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f1_1_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f1_1_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f1_1_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f1_1_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f1_2_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f1_2_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f1_2_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f1_2_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f1_2_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f1_3_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f1_3_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f1_3_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f1_3_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f1_3_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f2_0_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f2_0_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f2_0_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f2_0_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f2_0_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f2_1_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f2_1_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f2_1_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f2_1_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f2_1_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f2_2_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f2_2_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f2_2_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f2_2_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f2_2_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f2_3_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f2_3_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f2_3_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f2_3_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f2_3_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f3_0_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f3_0_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f3_0_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f3_0_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f3_0_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f3_1_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f3_1_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f3_1_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f3_1_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f3_1_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f3_2_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f3_2_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f3_2_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f3_2_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f3_2_predicted_pc_bits, // @[predictor.scala:140:14]
input io_resp_in_0_f3_3_taken, // @[predictor.scala:140:14]
input io_resp_in_0_f3_3_is_br, // @[predictor.scala:140:14]
input io_resp_in_0_f3_3_is_jal, // @[predictor.scala:140:14]
input io_resp_in_0_f3_3_predicted_pc_valid, // @[predictor.scala:140:14]
input [39:0] io_resp_in_0_f3_3_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f1_0_taken, // @[predictor.scala:140:14]
output io_resp_f1_0_is_br, // @[predictor.scala:140:14]
output io_resp_f1_0_is_jal, // @[predictor.scala:140:14]
output io_resp_f1_0_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f1_0_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f1_1_taken, // @[predictor.scala:140:14]
output io_resp_f1_1_is_br, // @[predictor.scala:140:14]
output io_resp_f1_1_is_jal, // @[predictor.scala:140:14]
output io_resp_f1_1_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f1_1_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f1_2_taken, // @[predictor.scala:140:14]
output io_resp_f1_2_is_br, // @[predictor.scala:140:14]
output io_resp_f1_2_is_jal, // @[predictor.scala:140:14]
output io_resp_f1_2_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f1_2_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f1_3_taken, // @[predictor.scala:140:14]
output io_resp_f1_3_is_br, // @[predictor.scala:140:14]
output io_resp_f1_3_is_jal, // @[predictor.scala:140:14]
output io_resp_f1_3_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f1_3_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f2_0_taken, // @[predictor.scala:140:14]
output io_resp_f2_0_is_br, // @[predictor.scala:140:14]
output io_resp_f2_0_is_jal, // @[predictor.scala:140:14]
output io_resp_f2_0_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f2_0_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f2_1_taken, // @[predictor.scala:140:14]
output io_resp_f2_1_is_br, // @[predictor.scala:140:14]
output io_resp_f2_1_is_jal, // @[predictor.scala:140:14]
output io_resp_f2_1_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f2_1_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f2_2_taken, // @[predictor.scala:140:14]
output io_resp_f2_2_is_br, // @[predictor.scala:140:14]
output io_resp_f2_2_is_jal, // @[predictor.scala:140:14]
output io_resp_f2_2_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f2_2_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f2_3_taken, // @[predictor.scala:140:14]
output io_resp_f2_3_is_br, // @[predictor.scala:140:14]
output io_resp_f2_3_is_jal, // @[predictor.scala:140:14]
output io_resp_f2_3_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f2_3_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f3_0_taken, // @[predictor.scala:140:14]
output io_resp_f3_0_is_br, // @[predictor.scala:140:14]
output io_resp_f3_0_is_jal, // @[predictor.scala:140:14]
output io_resp_f3_0_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f3_0_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f3_1_taken, // @[predictor.scala:140:14]
output io_resp_f3_1_is_br, // @[predictor.scala:140:14]
output io_resp_f3_1_is_jal, // @[predictor.scala:140:14]
output io_resp_f3_1_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f3_1_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f3_2_taken, // @[predictor.scala:140:14]
output io_resp_f3_2_is_br, // @[predictor.scala:140:14]
output io_resp_f3_2_is_jal, // @[predictor.scala:140:14]
output io_resp_f3_2_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f3_2_predicted_pc_bits, // @[predictor.scala:140:14]
output io_resp_f3_3_taken, // @[predictor.scala:140:14]
output io_resp_f3_3_is_br, // @[predictor.scala:140:14]
output io_resp_f3_3_is_jal, // @[predictor.scala:140:14]
output io_resp_f3_3_predicted_pc_valid, // @[predictor.scala:140:14]
output [39:0] io_resp_f3_3_predicted_pc_bits, // @[predictor.scala:140:14]
output [119:0] io_f3_meta, // @[predictor.scala:140:14]
input io_f3_fire, // @[predictor.scala:140:14]
input io_update_valid, // @[predictor.scala:140:14]
input io_update_bits_is_mispredict_update, // @[predictor.scala:140:14]
input io_update_bits_is_repair_update, // @[predictor.scala:140:14]
input [3:0] io_update_bits_btb_mispredicts, // @[predictor.scala:140:14]
input [39:0] io_update_bits_pc, // @[predictor.scala:140:14]
input [3:0] io_update_bits_br_mask, // @[predictor.scala:140:14]
input io_update_bits_cfi_idx_valid, // @[predictor.scala:140:14]
input [1:0] io_update_bits_cfi_idx_bits, // @[predictor.scala:140:14]
input io_update_bits_cfi_taken, // @[predictor.scala:140:14]
input io_update_bits_cfi_mispredicted, // @[predictor.scala:140:14]
input io_update_bits_cfi_is_br, // @[predictor.scala:140:14]
input io_update_bits_cfi_is_jal, // @[predictor.scala:140:14]
input io_update_bits_cfi_is_jalr, // @[predictor.scala:140:14]
input [63:0] io_update_bits_ghist, // @[predictor.scala:140:14]
input io_update_bits_lhist, // @[predictor.scala:140:14]
input [39:0] io_update_bits_target, // @[predictor.scala:140:14]
input [119:0] io_update_bits_meta // @[predictor.scala:140:14]
);
wire io_f0_valid_0 = io_f0_valid; // @[loop.scala:20:7]
wire [39:0] io_f0_pc_0 = io_f0_pc; // @[loop.scala:20:7]
wire [3:0] io_f0_mask_0 = io_f0_mask; // @[loop.scala:20:7]
wire [63:0] io_f1_ghist_0 = io_f1_ghist; // @[loop.scala:20:7]
wire io_resp_in_0_f1_0_taken_0 = io_resp_in_0_f1_0_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f1_0_is_br_0 = io_resp_in_0_f1_0_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f1_0_is_jal_0 = io_resp_in_0_f1_0_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f1_0_predicted_pc_valid_0 = io_resp_in_0_f1_0_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f1_0_predicted_pc_bits_0 = io_resp_in_0_f1_0_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f1_1_taken_0 = io_resp_in_0_f1_1_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f1_1_is_br_0 = io_resp_in_0_f1_1_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f1_1_is_jal_0 = io_resp_in_0_f1_1_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f1_1_predicted_pc_valid_0 = io_resp_in_0_f1_1_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f1_1_predicted_pc_bits_0 = io_resp_in_0_f1_1_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f1_2_taken_0 = io_resp_in_0_f1_2_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f1_2_is_br_0 = io_resp_in_0_f1_2_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f1_2_is_jal_0 = io_resp_in_0_f1_2_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f1_2_predicted_pc_valid_0 = io_resp_in_0_f1_2_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f1_2_predicted_pc_bits_0 = io_resp_in_0_f1_2_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f1_3_taken_0 = io_resp_in_0_f1_3_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f1_3_is_br_0 = io_resp_in_0_f1_3_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f1_3_is_jal_0 = io_resp_in_0_f1_3_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f1_3_predicted_pc_valid_0 = io_resp_in_0_f1_3_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f1_3_predicted_pc_bits_0 = io_resp_in_0_f1_3_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f2_0_taken_0 = io_resp_in_0_f2_0_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f2_0_is_br_0 = io_resp_in_0_f2_0_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f2_0_is_jal_0 = io_resp_in_0_f2_0_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f2_0_predicted_pc_valid_0 = io_resp_in_0_f2_0_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f2_0_predicted_pc_bits_0 = io_resp_in_0_f2_0_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f2_1_taken_0 = io_resp_in_0_f2_1_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f2_1_is_br_0 = io_resp_in_0_f2_1_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f2_1_is_jal_0 = io_resp_in_0_f2_1_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f2_1_predicted_pc_valid_0 = io_resp_in_0_f2_1_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f2_1_predicted_pc_bits_0 = io_resp_in_0_f2_1_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f2_2_taken_0 = io_resp_in_0_f2_2_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f2_2_is_br_0 = io_resp_in_0_f2_2_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f2_2_is_jal_0 = io_resp_in_0_f2_2_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f2_2_predicted_pc_valid_0 = io_resp_in_0_f2_2_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f2_2_predicted_pc_bits_0 = io_resp_in_0_f2_2_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f2_3_taken_0 = io_resp_in_0_f2_3_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f2_3_is_br_0 = io_resp_in_0_f2_3_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f2_3_is_jal_0 = io_resp_in_0_f2_3_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f2_3_predicted_pc_valid_0 = io_resp_in_0_f2_3_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f2_3_predicted_pc_bits_0 = io_resp_in_0_f2_3_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f3_0_taken_0 = io_resp_in_0_f3_0_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f3_0_is_br_0 = io_resp_in_0_f3_0_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f3_0_is_jal_0 = io_resp_in_0_f3_0_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f3_0_predicted_pc_valid_0 = io_resp_in_0_f3_0_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f3_0_predicted_pc_bits_0 = io_resp_in_0_f3_0_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f3_1_taken_0 = io_resp_in_0_f3_1_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f3_1_is_br_0 = io_resp_in_0_f3_1_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f3_1_is_jal_0 = io_resp_in_0_f3_1_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f3_1_predicted_pc_valid_0 = io_resp_in_0_f3_1_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f3_1_predicted_pc_bits_0 = io_resp_in_0_f3_1_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f3_2_taken_0 = io_resp_in_0_f3_2_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f3_2_is_br_0 = io_resp_in_0_f3_2_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f3_2_is_jal_0 = io_resp_in_0_f3_2_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f3_2_predicted_pc_valid_0 = io_resp_in_0_f3_2_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f3_2_predicted_pc_bits_0 = io_resp_in_0_f3_2_predicted_pc_bits; // @[loop.scala:20:7]
wire io_resp_in_0_f3_3_taken_0 = io_resp_in_0_f3_3_taken; // @[loop.scala:20:7]
wire io_resp_in_0_f3_3_is_br_0 = io_resp_in_0_f3_3_is_br; // @[loop.scala:20:7]
wire io_resp_in_0_f3_3_is_jal_0 = io_resp_in_0_f3_3_is_jal; // @[loop.scala:20:7]
wire io_resp_in_0_f3_3_predicted_pc_valid_0 = io_resp_in_0_f3_3_predicted_pc_valid; // @[loop.scala:20:7]
wire [39:0] io_resp_in_0_f3_3_predicted_pc_bits_0 = io_resp_in_0_f3_3_predicted_pc_bits; // @[loop.scala:20:7]
wire io_f3_fire_0 = io_f3_fire; // @[loop.scala:20:7]
wire io_update_valid_0 = io_update_valid; // @[loop.scala:20:7]
wire io_update_bits_is_mispredict_update_0 = io_update_bits_is_mispredict_update; // @[loop.scala:20:7]
wire io_update_bits_is_repair_update_0 = io_update_bits_is_repair_update; // @[loop.scala:20:7]
wire [3:0] io_update_bits_btb_mispredicts_0 = io_update_bits_btb_mispredicts; // @[loop.scala:20:7]
wire [39:0] io_update_bits_pc_0 = io_update_bits_pc; // @[loop.scala:20:7]
wire [3:0] io_update_bits_br_mask_0 = io_update_bits_br_mask; // @[loop.scala:20:7]
wire io_update_bits_cfi_idx_valid_0 = io_update_bits_cfi_idx_valid; // @[loop.scala:20:7]
wire [1:0] io_update_bits_cfi_idx_bits_0 = io_update_bits_cfi_idx_bits; // @[loop.scala:20:7]
wire io_update_bits_cfi_taken_0 = io_update_bits_cfi_taken; // @[loop.scala:20:7]
wire io_update_bits_cfi_mispredicted_0 = io_update_bits_cfi_mispredicted; // @[loop.scala:20:7]
wire io_update_bits_cfi_is_br_0 = io_update_bits_cfi_is_br; // @[loop.scala:20:7]
wire io_update_bits_cfi_is_jal_0 = io_update_bits_cfi_is_jal; // @[loop.scala:20:7]
wire io_update_bits_cfi_is_jalr_0 = io_update_bits_cfi_is_jalr; // @[loop.scala:20:7]
wire [63:0] io_update_bits_ghist_0 = io_update_bits_ghist; // @[loop.scala:20:7]
wire io_update_bits_lhist_0 = io_update_bits_lhist; // @[loop.scala:20:7]
wire [39:0] io_update_bits_target_0 = io_update_bits_target; // @[loop.scala:20:7]
wire [119:0] io_update_bits_meta_0 = io_update_bits_meta; // @[loop.scala:20:7]
wire io_f1_lhist = 1'h0; // @[predictor.scala:140:14]
wire io_resp_f1_0_taken_0 = io_resp_in_0_f1_0_taken_0; // @[loop.scala:20:7]
wire io_resp_f1_0_is_br_0 = io_resp_in_0_f1_0_is_br_0; // @[loop.scala:20:7]
wire io_resp_f1_0_is_jal_0 = io_resp_in_0_f1_0_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f1_0_predicted_pc_valid_0 = io_resp_in_0_f1_0_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f1_0_predicted_pc_bits_0 = io_resp_in_0_f1_0_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f1_1_taken_0 = io_resp_in_0_f1_1_taken_0; // @[loop.scala:20:7]
wire io_resp_f1_1_is_br_0 = io_resp_in_0_f1_1_is_br_0; // @[loop.scala:20:7]
wire io_resp_f1_1_is_jal_0 = io_resp_in_0_f1_1_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f1_1_predicted_pc_valid_0 = io_resp_in_0_f1_1_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f1_1_predicted_pc_bits_0 = io_resp_in_0_f1_1_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f1_2_taken_0 = io_resp_in_0_f1_2_taken_0; // @[loop.scala:20:7]
wire io_resp_f1_2_is_br_0 = io_resp_in_0_f1_2_is_br_0; // @[loop.scala:20:7]
wire io_resp_f1_2_is_jal_0 = io_resp_in_0_f1_2_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f1_2_predicted_pc_valid_0 = io_resp_in_0_f1_2_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f1_2_predicted_pc_bits_0 = io_resp_in_0_f1_2_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f1_3_taken_0 = io_resp_in_0_f1_3_taken_0; // @[loop.scala:20:7]
wire io_resp_f1_3_is_br_0 = io_resp_in_0_f1_3_is_br_0; // @[loop.scala:20:7]
wire io_resp_f1_3_is_jal_0 = io_resp_in_0_f1_3_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f1_3_predicted_pc_valid_0 = io_resp_in_0_f1_3_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f1_3_predicted_pc_bits_0 = io_resp_in_0_f1_3_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f2_0_taken_0 = io_resp_in_0_f2_0_taken_0; // @[loop.scala:20:7]
wire io_resp_f2_0_is_br_0 = io_resp_in_0_f2_0_is_br_0; // @[loop.scala:20:7]
wire io_resp_f2_0_is_jal_0 = io_resp_in_0_f2_0_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f2_0_predicted_pc_valid_0 = io_resp_in_0_f2_0_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f2_0_predicted_pc_bits_0 = io_resp_in_0_f2_0_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f2_1_taken_0 = io_resp_in_0_f2_1_taken_0; // @[loop.scala:20:7]
wire io_resp_f2_1_is_br_0 = io_resp_in_0_f2_1_is_br_0; // @[loop.scala:20:7]
wire io_resp_f2_1_is_jal_0 = io_resp_in_0_f2_1_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f2_1_predicted_pc_valid_0 = io_resp_in_0_f2_1_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f2_1_predicted_pc_bits_0 = io_resp_in_0_f2_1_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f2_2_taken_0 = io_resp_in_0_f2_2_taken_0; // @[loop.scala:20:7]
wire io_resp_f2_2_is_br_0 = io_resp_in_0_f2_2_is_br_0; // @[loop.scala:20:7]
wire io_resp_f2_2_is_jal_0 = io_resp_in_0_f2_2_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f2_2_predicted_pc_valid_0 = io_resp_in_0_f2_2_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f2_2_predicted_pc_bits_0 = io_resp_in_0_f2_2_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f2_3_taken_0 = io_resp_in_0_f2_3_taken_0; // @[loop.scala:20:7]
wire io_resp_f2_3_is_br_0 = io_resp_in_0_f2_3_is_br_0; // @[loop.scala:20:7]
wire io_resp_f2_3_is_jal_0 = io_resp_in_0_f2_3_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f2_3_predicted_pc_valid_0 = io_resp_in_0_f2_3_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f2_3_predicted_pc_bits_0 = io_resp_in_0_f2_3_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f3_0_is_br_0 = io_resp_in_0_f3_0_is_br_0; // @[loop.scala:20:7]
wire io_resp_f3_0_is_jal_0 = io_resp_in_0_f3_0_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f3_0_predicted_pc_valid_0 = io_resp_in_0_f3_0_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f3_0_predicted_pc_bits_0 = io_resp_in_0_f3_0_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f3_1_is_br_0 = io_resp_in_0_f3_1_is_br_0; // @[loop.scala:20:7]
wire io_resp_f3_1_is_jal_0 = io_resp_in_0_f3_1_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f3_1_predicted_pc_valid_0 = io_resp_in_0_f3_1_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f3_1_predicted_pc_bits_0 = io_resp_in_0_f3_1_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f3_2_is_br_0 = io_resp_in_0_f3_2_is_br_0; // @[loop.scala:20:7]
wire io_resp_f3_2_is_jal_0 = io_resp_in_0_f3_2_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f3_2_predicted_pc_valid_0 = io_resp_in_0_f3_2_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f3_2_predicted_pc_bits_0 = io_resp_in_0_f3_2_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f3_3_is_br_0 = io_resp_in_0_f3_3_is_br_0; // @[loop.scala:20:7]
wire io_resp_f3_3_is_jal_0 = io_resp_in_0_f3_3_is_jal_0; // @[loop.scala:20:7]
wire io_resp_f3_3_predicted_pc_valid_0 = io_resp_in_0_f3_3_predicted_pc_valid_0; // @[loop.scala:20:7]
wire [39:0] io_resp_f3_3_predicted_pc_bits_0 = io_resp_in_0_f3_3_predicted_pc_bits_0; // @[loop.scala:20:7]
wire io_resp_f3_0_taken_0; // @[loop.scala:20:7]
wire io_resp_f3_1_taken_0; // @[loop.scala:20:7]
wire io_resp_f3_2_taken_0; // @[loop.scala:20:7]
wire io_resp_f3_3_taken_0; // @[loop.scala:20:7]
wire [119:0] io_f3_meta_0; // @[loop.scala:20:7]
wire [35:0] s0_idx = io_f0_pc_0[39:4]; // @[frontend.scala:149:35]
reg [35:0] s1_idx; // @[predictor.scala:163:29]
reg [35:0] s2_idx; // @[predictor.scala:164:29]
reg [35:0] s3_idx; // @[predictor.scala:165:29]
reg s1_valid; // @[predictor.scala:168:25]
reg s2_valid; // @[predictor.scala:169:25]
reg s3_valid; // @[predictor.scala:170:25]
reg [3:0] s1_mask; // @[predictor.scala:173:24]
reg [3:0] s2_mask; // @[predictor.scala:174:24]
reg [3:0] s3_mask; // @[predictor.scala:175:24]
wire [39:0] _s0_pc_T = ~io_f0_pc_0; // @[frontend.scala:147:33]
wire [39:0] _s0_pc_T_1 = {_s0_pc_T[39:3], 3'h7}; // @[frontend.scala:147:{33,39}]
wire [39:0] s0_pc = ~_s0_pc_T_1; // @[frontend.scala:147:{31,39}]
reg [39:0] s1_pc; // @[predictor.scala:178:22]
reg [39:0] s2_pc; // @[predictor.scala:179:22]
wire [35:0] s0_update_idx = io_update_bits_pc_0[39:4]; // @[frontend.scala:149:35]
reg s1_update_valid; // @[predictor.scala:185:30]
reg s1_update_bits_is_mispredict_update; // @[predictor.scala:185:30]
reg s1_update_bits_is_repair_update; // @[predictor.scala:185:30]
reg [3:0] s1_update_bits_btb_mispredicts; // @[predictor.scala:185:30]
reg [39:0] s1_update_bits_pc; // @[predictor.scala:185:30]
reg [3:0] s1_update_bits_br_mask; // @[predictor.scala:185:30]
reg s1_update_bits_cfi_idx_valid; // @[predictor.scala:185:30]
reg [1:0] s1_update_bits_cfi_idx_bits; // @[predictor.scala:185:30]
reg s1_update_bits_cfi_taken; // @[predictor.scala:185:30]
reg s1_update_bits_cfi_mispredicted; // @[predictor.scala:185:30]
reg s1_update_bits_cfi_is_br; // @[predictor.scala:185:30]
reg s1_update_bits_cfi_is_jal; // @[predictor.scala:185:30]
reg s1_update_bits_cfi_is_jalr; // @[predictor.scala:185:30]
reg [63:0] s1_update_bits_ghist; // @[predictor.scala:185:30]
reg s1_update_bits_lhist; // @[predictor.scala:185:30]
reg [39:0] s1_update_bits_target; // @[predictor.scala:185:30]
reg [119:0] s1_update_bits_meta; // @[predictor.scala:185:30]
wire [39:0] _s1_update_bits_pc_T = ~io_update_bits_pc_0; // @[frontend.scala:147:33]
wire [39:0] _s1_update_bits_pc_T_1 = {_s1_update_bits_pc_T[39:3], 3'h7}; // @[frontend.scala:147:{33,39}]
wire [39:0] _s1_update_bits_pc_T_2 = ~_s1_update_bits_pc_T_1; // @[frontend.scala:147:{31,39}]
reg [35:0] s1_update_idx; // @[predictor.scala:187:30]
reg s1_update_valid_0; // @[predictor.scala:188:32]
wire [9:0] f3_meta_0_s_cnt; // @[loop.scala:184:21]
wire [9:0] f3_meta_1_s_cnt; // @[loop.scala:184:21]
wire [9:0] f3_meta_2_s_cnt; // @[loop.scala:184:21]
wire [9:0] f3_meta_3_s_cnt; // @[loop.scala:184:21]
wire [19:0] _GEN = {f3_meta_1_s_cnt, f3_meta_0_s_cnt}; // @[loop.scala:184:21, :185:33]
wire [19:0] lo; // @[loop.scala:185:33]
assign lo = _GEN; // @[loop.scala:185:33]
wire [19:0] io_f3_meta_lo; // @[loop.scala:212:25]
assign io_f3_meta_lo = _GEN; // @[loop.scala:185:33, :212:25]
wire [19:0] _GEN_0 = {f3_meta_3_s_cnt, f3_meta_2_s_cnt}; // @[loop.scala:184:21, :185:33]
wire [19:0] hi; // @[loop.scala:185:33]
assign hi = _GEN_0; // @[loop.scala:185:33]
wire [19:0] io_f3_meta_hi; // @[loop.scala:212:25]
assign io_f3_meta_hi = _GEN_0; // @[loop.scala:185:33, :212:25]
wire [9:0] _update_meta_T; // @[loop.scala:187:49]
wire [9:0] _update_meta_T_1; // @[loop.scala:187:49]
wire [9:0] _update_meta_T_2; // @[loop.scala:187:49]
wire [9:0] _update_meta_T_3; // @[loop.scala:187:49]
wire [9:0] update_meta_0_s_cnt; // @[loop.scala:187:49]
wire [9:0] update_meta_1_s_cnt; // @[loop.scala:187:49]
wire [9:0] update_meta_2_s_cnt; // @[loop.scala:187:49]
wire [9:0] update_meta_3_s_cnt; // @[loop.scala:187:49]
wire [39:0] _update_meta_WIRE = s1_update_bits_meta[39:0]; // @[predictor.scala:185:30]
assign _update_meta_T = _update_meta_WIRE[9:0]; // @[loop.scala:187:49]
assign update_meta_0_s_cnt = _update_meta_T; // @[loop.scala:187:49]
assign _update_meta_T_1 = _update_meta_WIRE[19:10]; // @[loop.scala:187:49]
assign update_meta_1_s_cnt = _update_meta_T_1; // @[loop.scala:187:49]
assign _update_meta_T_2 = _update_meta_WIRE[29:20]; // @[loop.scala:187:49]
assign update_meta_2_s_cnt = _update_meta_T_2; // @[loop.scala:187:49]
assign _update_meta_T_3 = _update_meta_WIRE[39:30]; // @[loop.scala:187:49]
assign update_meta_3_s_cnt = _update_meta_T_3; // @[loop.scala:187:49]
wire _columns_0_io_f3_req_fire_T = s3_mask[0]; // @[predictor.scala:175:24]
wire _columns_0_io_f3_req_fire_T_1 = s3_valid & _columns_0_io_f3_req_fire_T; // @[predictor.scala:170:25]
wire _columns_0_io_f3_req_fire_T_2 = _columns_0_io_f3_req_fire_T_1 & io_f3_fire_0; // @[loop.scala:20:7, :192:{44,58}]
wire _columns_0_io_f3_req_fire_T_3 = io_resp_in_0_f2_0_predicted_pc_valid_0 & io_resp_in_0_f2_0_is_br_0; // @[loop.scala:20:7, :193:54]
reg columns_0_io_f3_req_fire_REG; // @[loop.scala:193:14]
wire _columns_0_io_f3_req_fire_T_4 = _columns_0_io_f3_req_fire_T_2 & columns_0_io_f3_req_fire_REG; // @[loop.scala:192:{58,72}, :193:14]
wire _columns_0_io_update_mispredict_T = s1_update_bits_br_mask[0]; // @[predictor.scala:185:30]
wire _columns_0_io_update_repair_T = s1_update_bits_br_mask[0]; // @[predictor.scala:185:30]
wire _columns_0_io_update_mispredict_T_1 = s1_update_valid & _columns_0_io_update_mispredict_T; // @[predictor.scala:185:30]
wire _columns_0_io_update_mispredict_T_2 = _columns_0_io_update_mispredict_T_1 & s1_update_bits_is_mispredict_update; // @[predictor.scala:185:30]
wire _columns_0_io_update_mispredict_T_3 = _columns_0_io_update_mispredict_T_2 & s1_update_bits_cfi_mispredicted; // @[predictor.scala:185:30]
wire _columns_0_io_update_repair_T_1 = s1_update_valid & _columns_0_io_update_repair_T; // @[predictor.scala:185:30]
wire _columns_0_io_update_repair_T_2 = _columns_0_io_update_repair_T_1 & s1_update_bits_is_repair_update; // @[predictor.scala:185:30]
wire _columns_1_io_f3_req_fire_T = s3_mask[1]; // @[predictor.scala:175:24]
wire _columns_1_io_f3_req_fire_T_1 = s3_valid & _columns_1_io_f3_req_fire_T; // @[predictor.scala:170:25]
wire _columns_1_io_f3_req_fire_T_2 = _columns_1_io_f3_req_fire_T_1 & io_f3_fire_0; // @[loop.scala:20:7, :192:{44,58}]
wire _columns_1_io_f3_req_fire_T_3 = io_resp_in_0_f2_1_predicted_pc_valid_0 & io_resp_in_0_f2_1_is_br_0; // @[loop.scala:20:7, :193:54]
reg columns_1_io_f3_req_fire_REG; // @[loop.scala:193:14]
wire _columns_1_io_f3_req_fire_T_4 = _columns_1_io_f3_req_fire_T_2 & columns_1_io_f3_req_fire_REG; // @[loop.scala:192:{58,72}, :193:14]
wire _columns_1_io_update_mispredict_T = s1_update_bits_br_mask[1]; // @[predictor.scala:185:30]
wire _columns_1_io_update_repair_T = s1_update_bits_br_mask[1]; // @[predictor.scala:185:30]
wire _columns_1_io_update_mispredict_T_1 = s1_update_valid & _columns_1_io_update_mispredict_T; // @[predictor.scala:185:30]
wire _columns_1_io_update_mispredict_T_2 = _columns_1_io_update_mispredict_T_1 & s1_update_bits_is_mispredict_update; // @[predictor.scala:185:30]
wire _columns_1_io_update_mispredict_T_3 = _columns_1_io_update_mispredict_T_2 & s1_update_bits_cfi_mispredicted; // @[predictor.scala:185:30]
wire _columns_1_io_update_repair_T_1 = s1_update_valid & _columns_1_io_update_repair_T; // @[predictor.scala:185:30]
wire _columns_1_io_update_repair_T_2 = _columns_1_io_update_repair_T_1 & s1_update_bits_is_repair_update; // @[predictor.scala:185:30]
wire _columns_2_io_f3_req_fire_T = s3_mask[2]; // @[predictor.scala:175:24]
wire _columns_2_io_f3_req_fire_T_1 = s3_valid & _columns_2_io_f3_req_fire_T; // @[predictor.scala:170:25]
wire _columns_2_io_f3_req_fire_T_2 = _columns_2_io_f3_req_fire_T_1 & io_f3_fire_0; // @[loop.scala:20:7, :192:{44,58}]
wire _columns_2_io_f3_req_fire_T_3 = io_resp_in_0_f2_2_predicted_pc_valid_0 & io_resp_in_0_f2_2_is_br_0; // @[loop.scala:20:7, :193:54]
reg columns_2_io_f3_req_fire_REG; // @[loop.scala:193:14]
wire _columns_2_io_f3_req_fire_T_4 = _columns_2_io_f3_req_fire_T_2 & columns_2_io_f3_req_fire_REG; // @[loop.scala:192:{58,72}, :193:14]
wire _columns_2_io_update_mispredict_T = s1_update_bits_br_mask[2]; // @[predictor.scala:185:30]
wire _columns_2_io_update_repair_T = s1_update_bits_br_mask[2]; // @[predictor.scala:185:30]
wire _columns_2_io_update_mispredict_T_1 = s1_update_valid & _columns_2_io_update_mispredict_T; // @[predictor.scala:185:30]
wire _columns_2_io_update_mispredict_T_2 = _columns_2_io_update_mispredict_T_1 & s1_update_bits_is_mispredict_update; // @[predictor.scala:185:30]
wire _columns_2_io_update_mispredict_T_3 = _columns_2_io_update_mispredict_T_2 & s1_update_bits_cfi_mispredicted; // @[predictor.scala:185:30]
wire _columns_2_io_update_repair_T_1 = s1_update_valid & _columns_2_io_update_repair_T; // @[predictor.scala:185:30]
wire _columns_2_io_update_repair_T_2 = _columns_2_io_update_repair_T_1 & s1_update_bits_is_repair_update; // @[predictor.scala:185:30]
wire _columns_3_io_f3_req_fire_T = s3_mask[3]; // @[predictor.scala:175:24]
wire _columns_3_io_f3_req_fire_T_1 = s3_valid & _columns_3_io_f3_req_fire_T; // @[predictor.scala:170:25]
wire _columns_3_io_f3_req_fire_T_2 = _columns_3_io_f3_req_fire_T_1 & io_f3_fire_0; // @[loop.scala:20:7, :192:{44,58}]
wire _columns_3_io_f3_req_fire_T_3 = io_resp_in_0_f2_3_predicted_pc_valid_0 & io_resp_in_0_f2_3_is_br_0; // @[loop.scala:20:7, :193:54]
reg columns_3_io_f3_req_fire_REG; // @[loop.scala:193:14]
wire _columns_3_io_f3_req_fire_T_4 = _columns_3_io_f3_req_fire_T_2 & columns_3_io_f3_req_fire_REG; // @[loop.scala:192:{58,72}, :193:14]
wire _columns_3_io_update_mispredict_T = s1_update_bits_br_mask[3]; // @[predictor.scala:185:30]
wire _columns_3_io_update_repair_T = s1_update_bits_br_mask[3]; // @[predictor.scala:185:30]
wire _columns_3_io_update_mispredict_T_1 = s1_update_valid & _columns_3_io_update_mispredict_T; // @[predictor.scala:185:30]
wire _columns_3_io_update_mispredict_T_2 = _columns_3_io_update_mispredict_T_1 & s1_update_bits_is_mispredict_update; // @[predictor.scala:185:30]
wire _columns_3_io_update_mispredict_T_3 = _columns_3_io_update_mispredict_T_2 & s1_update_bits_cfi_mispredicted; // @[predictor.scala:185:30]
wire _columns_3_io_update_repair_T_1 = s1_update_valid & _columns_3_io_update_repair_T; // @[predictor.scala:185:30]
wire _columns_3_io_update_repair_T_2 = _columns_3_io_update_repair_T_1 & s1_update_bits_is_repair_update; // @[predictor.scala:185:30]
wire [39:0] _io_f3_meta_T = {io_f3_meta_hi, io_f3_meta_lo}; // @[loop.scala:212:25]
assign io_f3_meta_0 = {80'h0, _io_f3_meta_T}; // @[loop.scala:20:7, :212:{14,25}]
always @(posedge clock) begin // @[loop.scala:20:7]
s1_idx <= s0_idx; // @[frontend.scala:149:35]
s2_idx <= s1_idx; // @[predictor.scala:163:29, :164:29]
s3_idx <= s2_idx; // @[predictor.scala:164:29, :165:29]
s1_valid <= io_f0_valid_0; // @[predictor.scala:168:25]
s2_valid <= s1_valid; // @[predictor.scala:168:25, :169:25]
s3_valid <= s2_valid; // @[predictor.scala:169:25, :170:25]
s1_mask <= io_f0_mask_0; // @[predictor.scala:173:24]
s2_mask <= s1_mask; // @[predictor.scala:173:24, :174:24]
s3_mask <= s2_mask; // @[predictor.scala:174:24, :175:24]
s1_pc <= s0_pc; // @[frontend.scala:147:31]
s2_pc <= s1_pc; // @[predictor.scala:178:22, :179:22]
s1_update_valid <= io_update_valid_0; // @[predictor.scala:185:30]
s1_update_bits_is_mispredict_update <= io_update_bits_is_mispredict_update_0; // @[predictor.scala:185:30]
s1_update_bits_is_repair_update <= io_update_bits_is_repair_update_0; // @[predictor.scala:185:30]
s1_update_bits_btb_mispredicts <= io_update_bits_btb_mispredicts_0; // @[predictor.scala:185:30]
s1_update_bits_pc <= _s1_update_bits_pc_T_2; // @[frontend.scala:147:31]
s1_update_bits_br_mask <= io_update_bits_br_mask_0; // @[predictor.scala:185:30]
s1_update_bits_cfi_idx_valid <= io_update_bits_cfi_idx_valid_0; // @[predictor.scala:185:30]
s1_update_bits_cfi_idx_bits <= io_update_bits_cfi_idx_bits_0; // @[predictor.scala:185:30]
s1_update_bits_cfi_taken <= io_update_bits_cfi_taken_0; // @[predictor.scala:185:30]
s1_update_bits_cfi_mispredicted <= io_update_bits_cfi_mispredicted_0; // @[predictor.scala:185:30]
s1_update_bits_cfi_is_br <= io_update_bits_cfi_is_br_0; // @[predictor.scala:185:30]
s1_update_bits_cfi_is_jal <= io_update_bits_cfi_is_jal_0; // @[predictor.scala:185:30]
s1_update_bits_cfi_is_jalr <= io_update_bits_cfi_is_jalr_0; // @[predictor.scala:185:30]
s1_update_bits_ghist <= io_update_bits_ghist_0; // @[predictor.scala:185:30]
s1_update_bits_lhist <= io_update_bits_lhist_0; // @[predictor.scala:185:30]
s1_update_bits_target <= io_update_bits_target_0; // @[predictor.scala:185:30]
s1_update_bits_meta <= io_update_bits_meta_0; // @[predictor.scala:185:30]
s1_update_idx <= s0_update_idx; // @[frontend.scala:149:35]
s1_update_valid_0 <= io_update_valid_0; // @[predictor.scala:188:32]
columns_0_io_f3_req_fire_REG <= _columns_0_io_f3_req_fire_T_3; // @[loop.scala:193:{14,54}]
columns_1_io_f3_req_fire_REG <= _columns_1_io_f3_req_fire_T_3; // @[loop.scala:193:{14,54}]
columns_2_io_f3_req_fire_REG <= _columns_2_io_f3_req_fire_T_3; // @[loop.scala:193:{14,54}]
columns_3_io_f3_req_fire_REG <= _columns_3_io_f3_req_fire_T_3; // @[loop.scala:193:{14,54}]
always @(posedge)
LoopBranchPredictorColumn columns_0 ( // @[loop.scala:182:45]
.clock (clock),
.reset (reset),
.io_f2_req_valid (s2_valid), // @[predictor.scala:169:25]
.io_f2_req_idx (s2_idx), // @[predictor.scala:164:29]
.io_f3_req_fire (_columns_0_io_f3_req_fire_T_4), // @[loop.scala:192:72]
.io_f3_pred_in (io_resp_in_0_f3_0_taken_0), // @[loop.scala:20:7]
.io_f3_pred (io_resp_f3_0_taken_0),
.io_f3_meta_s_cnt (f3_meta_0_s_cnt),
.io_update_mispredict (_columns_0_io_update_mispredict_T_3), // @[loop.scala:200:82]
.io_update_repair (_columns_0_io_update_repair_T_2), // @[loop.scala:203:72]
.io_update_idx (s1_update_idx), // @[predictor.scala:187:30]
.io_update_resolve_dir (s1_update_bits_cfi_taken), // @[predictor.scala:185:30]
.io_update_meta_s_cnt (update_meta_0_s_cnt) // @[loop.scala:187:49]
); // @[loop.scala:182:45]
LoopBranchPredictorColumn_1 columns_1 ( // @[loop.scala:182:45]
.clock (clock),
.reset (reset),
.io_f2_req_valid (s2_valid), // @[predictor.scala:169:25]
.io_f2_req_idx (s2_idx), // @[predictor.scala:164:29]
.io_f3_req_fire (_columns_1_io_f3_req_fire_T_4), // @[loop.scala:192:72]
.io_f3_pred_in (io_resp_in_0_f3_1_taken_0), // @[loop.scala:20:7]
.io_f3_pred (io_resp_f3_1_taken_0),
.io_f3_meta_s_cnt (f3_meta_1_s_cnt),
.io_update_mispredict (_columns_1_io_update_mispredict_T_3), // @[loop.scala:200:82]
.io_update_repair (_columns_1_io_update_repair_T_2), // @[loop.scala:203:72]
.io_update_idx (s1_update_idx), // @[predictor.scala:187:30]
.io_update_resolve_dir (s1_update_bits_cfi_taken), // @[predictor.scala:185:30]
.io_update_meta_s_cnt (update_meta_1_s_cnt) // @[loop.scala:187:49]
); // @[loop.scala:182:45]
LoopBranchPredictorColumn_2 columns_2 ( // @[loop.scala:182:45]
.clock (clock),
.reset (reset),
.io_f2_req_valid (s2_valid), // @[predictor.scala:169:25]
.io_f2_req_idx (s2_idx), // @[predictor.scala:164:29]
.io_f3_req_fire (_columns_2_io_f3_req_fire_T_4), // @[loop.scala:192:72]
.io_f3_pred_in (io_resp_in_0_f3_2_taken_0), // @[loop.scala:20:7]
.io_f3_pred (io_resp_f3_2_taken_0),
.io_f3_meta_s_cnt (f3_meta_2_s_cnt),
.io_update_mispredict (_columns_2_io_update_mispredict_T_3), // @[loop.scala:200:82]
.io_update_repair (_columns_2_io_update_repair_T_2), // @[loop.scala:203:72]
.io_update_idx (s1_update_idx), // @[predictor.scala:187:30]
.io_update_resolve_dir (s1_update_bits_cfi_taken), // @[predictor.scala:185:30]
.io_update_meta_s_cnt (update_meta_2_s_cnt) // @[loop.scala:187:49]
); // @[loop.scala:182:45]
LoopBranchPredictorColumn_3 columns_3 ( // @[loop.scala:182:45]
.clock (clock),
.reset (reset),
.io_f2_req_valid (s2_valid), // @[predictor.scala:169:25]
.io_f2_req_idx (s2_idx), // @[predictor.scala:164:29]
.io_f3_req_fire (_columns_3_io_f3_req_fire_T_4), // @[loop.scala:192:72]
.io_f3_pred_in (io_resp_in_0_f3_3_taken_0), // @[loop.scala:20:7]
.io_f3_pred (io_resp_f3_3_taken_0),
.io_f3_meta_s_cnt (f3_meta_3_s_cnt),
.io_update_mispredict (_columns_3_io_update_mispredict_T_3), // @[loop.scala:200:82]
.io_update_repair (_columns_3_io_update_repair_T_2), // @[loop.scala:203:72]
.io_update_idx (s1_update_idx), // @[predictor.scala:187:30]
.io_update_resolve_dir (s1_update_bits_cfi_taken), // @[predictor.scala:185:30]
.io_update_meta_s_cnt (update_meta_3_s_cnt) // @[loop.scala:187:49]
); // @[loop.scala:182:45]
assign io_resp_f1_0_taken = io_resp_f1_0_taken_0; // @[loop.scala:20:7]
assign io_resp_f1_0_is_br = io_resp_f1_0_is_br_0; // @[loop.scala:20:7]
assign io_resp_f1_0_is_jal = io_resp_f1_0_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f1_0_predicted_pc_valid = io_resp_f1_0_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f1_0_predicted_pc_bits = io_resp_f1_0_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f1_1_taken = io_resp_f1_1_taken_0; // @[loop.scala:20:7]
assign io_resp_f1_1_is_br = io_resp_f1_1_is_br_0; // @[loop.scala:20:7]
assign io_resp_f1_1_is_jal = io_resp_f1_1_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f1_1_predicted_pc_valid = io_resp_f1_1_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f1_1_predicted_pc_bits = io_resp_f1_1_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f1_2_taken = io_resp_f1_2_taken_0; // @[loop.scala:20:7]
assign io_resp_f1_2_is_br = io_resp_f1_2_is_br_0; // @[loop.scala:20:7]
assign io_resp_f1_2_is_jal = io_resp_f1_2_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f1_2_predicted_pc_valid = io_resp_f1_2_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f1_2_predicted_pc_bits = io_resp_f1_2_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f1_3_taken = io_resp_f1_3_taken_0; // @[loop.scala:20:7]
assign io_resp_f1_3_is_br = io_resp_f1_3_is_br_0; // @[loop.scala:20:7]
assign io_resp_f1_3_is_jal = io_resp_f1_3_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f1_3_predicted_pc_valid = io_resp_f1_3_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f1_3_predicted_pc_bits = io_resp_f1_3_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f2_0_taken = io_resp_f2_0_taken_0; // @[loop.scala:20:7]
assign io_resp_f2_0_is_br = io_resp_f2_0_is_br_0; // @[loop.scala:20:7]
assign io_resp_f2_0_is_jal = io_resp_f2_0_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f2_0_predicted_pc_valid = io_resp_f2_0_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f2_0_predicted_pc_bits = io_resp_f2_0_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f2_1_taken = io_resp_f2_1_taken_0; // @[loop.scala:20:7]
assign io_resp_f2_1_is_br = io_resp_f2_1_is_br_0; // @[loop.scala:20:7]
assign io_resp_f2_1_is_jal = io_resp_f2_1_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f2_1_predicted_pc_valid = io_resp_f2_1_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f2_1_predicted_pc_bits = io_resp_f2_1_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f2_2_taken = io_resp_f2_2_taken_0; // @[loop.scala:20:7]
assign io_resp_f2_2_is_br = io_resp_f2_2_is_br_0; // @[loop.scala:20:7]
assign io_resp_f2_2_is_jal = io_resp_f2_2_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f2_2_predicted_pc_valid = io_resp_f2_2_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f2_2_predicted_pc_bits = io_resp_f2_2_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f2_3_taken = io_resp_f2_3_taken_0; // @[loop.scala:20:7]
assign io_resp_f2_3_is_br = io_resp_f2_3_is_br_0; // @[loop.scala:20:7]
assign io_resp_f2_3_is_jal = io_resp_f2_3_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f2_3_predicted_pc_valid = io_resp_f2_3_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f2_3_predicted_pc_bits = io_resp_f2_3_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f3_0_taken = io_resp_f3_0_taken_0; // @[loop.scala:20:7]
assign io_resp_f3_0_is_br = io_resp_f3_0_is_br_0; // @[loop.scala:20:7]
assign io_resp_f3_0_is_jal = io_resp_f3_0_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f3_0_predicted_pc_valid = io_resp_f3_0_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f3_0_predicted_pc_bits = io_resp_f3_0_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f3_1_taken = io_resp_f3_1_taken_0; // @[loop.scala:20:7]
assign io_resp_f3_1_is_br = io_resp_f3_1_is_br_0; // @[loop.scala:20:7]
assign io_resp_f3_1_is_jal = io_resp_f3_1_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f3_1_predicted_pc_valid = io_resp_f3_1_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f3_1_predicted_pc_bits = io_resp_f3_1_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f3_2_taken = io_resp_f3_2_taken_0; // @[loop.scala:20:7]
assign io_resp_f3_2_is_br = io_resp_f3_2_is_br_0; // @[loop.scala:20:7]
assign io_resp_f3_2_is_jal = io_resp_f3_2_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f3_2_predicted_pc_valid = io_resp_f3_2_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f3_2_predicted_pc_bits = io_resp_f3_2_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_resp_f3_3_taken = io_resp_f3_3_taken_0; // @[loop.scala:20:7]
assign io_resp_f3_3_is_br = io_resp_f3_3_is_br_0; // @[loop.scala:20:7]
assign io_resp_f3_3_is_jal = io_resp_f3_3_is_jal_0; // @[loop.scala:20:7]
assign io_resp_f3_3_predicted_pc_valid = io_resp_f3_3_predicted_pc_valid_0; // @[loop.scala:20:7]
assign io_resp_f3_3_predicted_pc_bits = io_resp_f3_3_predicted_pc_bits_0; // @[loop.scala:20:7]
assign io_f3_meta = io_f3_meta_0; // @[loop.scala:20:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Buffer.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.BufferParams
class TLBufferNode (
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit valName: ValName) extends TLAdapterNode(
clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) },
managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) }
) {
override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}"
override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none)
}
class TLBuffer(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters) extends LazyModule
{
def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace)
def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde)
def this()(implicit p: Parameters) = this(BufferParams.default)
val node = new TLBufferNode(a, b, c, d, e)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def headBundle = node.out.head._2.bundle
override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.a <> a(in .a)
in .d <> d(out.d)
if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) {
in .b <> b(out.b)
out.c <> c(in .c)
out.e <> e(in .e)
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLBuffer
{
def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default)
def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde)
def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace)
def apply(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)(implicit p: Parameters): TLNode =
{
val buffer = LazyModule(new TLBuffer(a, b, c, d, e))
buffer.node
}
def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = {
val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) }
name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } }
buffers.map(_.node)
}
def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = {
chain(depth, name)
.reduceLeftOption(_ :*=* _)
.getOrElse(TLNameNode("no_buffer"))
}
}
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes}
import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1}
import scala.math.min
import freechips.rocketchip.util.DataToAugmentedData
object EarlyAck {
sealed trait T
case object AllPuts extends T
case object PutFulls extends T
case object None extends T
}
// minSize: minimum size of transfers supported by all outward managers
// maxSize: maximum size of transfers supported after the Fragmenter is applied
// alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager)
// earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat
// holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst
// nameSuffix: appends a suffix to the module name
// Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint
// Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin)
// Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides
class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize")
require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize")
require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize")
val fragmentBits = log2Ceil(maxSize / minSize)
val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0
val toggleBits = 1
val addedBits = fragmentBits + toggleBits + fullBits
def expandTransfer(x: TransferSizes, op: String) = if (!x) x else {
// validate that we can apply the fragmenter correctly
require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})")
TransferSizes(x.min, maxSize)
}
private def noChangeRequired = minSize == maxSize
private def shrinkTransfer(x: TransferSizes) =
if (!alwaysMin) x
else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max))
else TransferSizes.none
private def mapManager(m: TLSlaveParameters) = m.v1copy(
supportsArithmetic = shrinkTransfer(m.supportsArithmetic),
supportsLogical = shrinkTransfer(m.supportsLogical),
supportsGet = expandTransfer(m.supportsGet, "Get"),
supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"),
supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"),
supportsHint = expandTransfer(m.supportsHint, "Hint"))
val node = new TLAdapterNode(
// We require that all the responses are mutually FIFO
// Thus we need to compact all of the masters into one big master
clientFn = { c => (if (noChangeRequired) c else c.v2copy(
masters = Seq(TLMasterParameters.v2(
name = "TLFragmenter",
sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)),
requestFifo = true,
emits = TLMasterToSlaveTransferSizes(
acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)),
acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)),
arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)),
logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)),
get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)),
putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)),
putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)),
hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _))
)
))
))},
managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) }
) {
override def circuitIdentity = noChangeRequired
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
if (noChangeRequired) {
out <> in
} else {
// All managers must share a common FIFO domain (responses might end up interleaved)
val manager = edgeOut.manager
val managers = manager.managers
val beatBytes = manager.beatBytes
val fifoId = managers(0).fifoId
require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _))
require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe,
s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region")
require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses")
// We can't support devices which are cached on both sides of us
require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe)
// We can't support denied because we reassemble fragments
require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true")
require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None)
/* The Fragmenter is a bit tricky, because there are 5 sizes in play:
* max size -- the maximum transfer size possible
* orig size -- the original pre-fragmenter size
* frag size -- the modified post-fragmenter size
* min size -- the threshold below which frag=orig
* beat size -- the amount transfered on any given beat
*
* The relationships are as follows:
* max >= orig >= frag
* max > min >= beat
* It IS possible that orig <= min (then frag=orig; ie: no fragmentation)
*
* The fragment# (sent via TL.source) is measured in multiples of min size.
* Meanwhile, to track the progress, counters measure in multiples of beat size.
*
* Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16.
*
* in.A out.A (frag#) out.D (frag#) in.D gen# ack#
* get64 get16 6 ackD16 6 ackD64 12 15
* ackD16 6 ackD64 14
* ackD16 6 ackD64 13
* ackD16 6 ackD64 12
* get16 4 ackD16 4 ackD64 8 11
* ackD16 4 ackD64 10
* ackD16 4 ackD64 9
* ackD16 4 ackD64 8
* get16 2 ackD16 2 ackD64 4 7
* ackD16 2 ackD64 6
* ackD16 2 ackD64 5
* ackD16 2 ackD64 4
* get16 0 ackD16 0 ackD64 0 3
* ackD16 0 ackD64 2
* ackD16 0 ackD64 1
* ackD16 0 ackD64 0
*
* get8 get8 0 ackD8 0 ackD8 0 1
* ackD8 0 ackD8 0
*
* get4 get4 0 ackD4 0 ackD4 0 0
* get1 get1 0 ackD1 0 ackD1 0 0
*
* put64 put16 6 15
* put64 put16 6 14
* put64 put16 6 13
* put64 put16 6 ack16 6 12 12
* put64 put16 4 11
* put64 put16 4 10
* put64 put16 4 9
* put64 put16 4 ack16 4 8 8
* put64 put16 2 7
* put64 put16 2 6
* put64 put16 2 5
* put64 put16 2 ack16 2 4 4
* put64 put16 0 3
* put64 put16 0 2
* put64 put16 0 1
* put64 put16 0 ack16 0 ack64 0 0
*
* put8 put8 0 1
* put8 put8 0 ack8 0 ack8 0 0
*
* put4 put4 0 ack4 0 ack4 0 0
* put1 put1 0 ack1 0 ack1 0 0
*/
val counterBits = log2Up(maxSize/beatBytes)
val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize)
// Consider the following waveform for two 4-beat bursts:
// ---A----A------------
// -------D-----DDD-DDDD
// Under TL rules, the second A can use the same source as the first A,
// because the source is released for reuse on the first response beat.
//
// However, if we fragment the requests, it looks like this:
// ---3210-3210---------
// -------3-----210-3210
// ... now we've broken the rules because 210 are twice inflight.
//
// This phenomenon means we can have essentially 2*maxSize/minSize-1
// fragmented transactions in flight per original transaction source.
//
// To keep the source unique, we encode the beat counter in the low
// bits of the source. To solve the overlap, we use a toggle bit.
// Whatever toggle bit the D is reassembling, A will use the opposite.
// First, handle the return path
val acknum = RegInit(0.U(counterBits.W))
val dOrig = Reg(UInt())
val dToggle = RegInit(false.B)
val dFragnum = out.d.bits.source(fragmentBits-1, 0)
val dFirst = acknum === 0.U
val dLast = dFragnum === 0.U // only for AccessAck (!Data)
val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1)
val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize))
val dHasData = edgeOut.hasData(out.d.bits)
// calculate new acknum
val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes)
val acknum_size = dsizeOH1 >> log2Ceil(beatBytes)
assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U)
val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U)
val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes))
// calculate the original size
val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1)
when (out.d.fire) {
acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement)
when (dFirst) {
dOrig := dFirst_size
dToggle := out.d.bits.source(fragmentBits)
}
}
// Swallow up non-data ack fragments
val doEarlyAck = earlyAck match {
case EarlyAck.AllPuts => true.B
case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1)
case EarlyAck.None => false.B
}
val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast)
out.d.ready := in.d.ready || drop
in.d.valid := out.d.valid && !drop
in.d.bits := out.d.bits // pass most stuff unchanged
in.d.bits.source := out.d.bits.source >> addedBits
in.d.bits.size := Mux(dFirst, dFirst_size, dOrig)
if (edgeOut.manager.mayDenyPut) {
val r_denied = Reg(Bool())
val d_denied = (!dFirst && r_denied) || out.d.bits.denied
when (out.d.fire) { r_denied := d_denied }
in.d.bits.denied := d_denied
}
if (edgeOut.manager.mayDenyGet) {
// Take denied only from the first beat and hold that value
val d_denied = out.d.bits.denied holdUnless dFirst
when (dHasData) {
in.d.bits.denied := d_denied
in.d.bits.corrupt := d_denied || out.d.bits.corrupt
}
}
// What maximum transfer sizes do downstream devices support?
val maxArithmetics = managers.map(_.supportsArithmetic.max)
val maxLogicals = managers.map(_.supportsLogical.max)
val maxGets = managers.map(_.supportsGet.max)
val maxPutFulls = managers.map(_.supportsPutFull.max)
val maxPutPartials = managers.map(_.supportsPutPartial.max)
val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0)
// We assume that the request is valid => size 0 is impossible
val lgMinSize = log2Ceil(minSize).U
val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
// Make the request repeatable
val repeater = Module(new Repeater(in.a.bits))
repeater.io.enq <> in.a
val in_a = repeater.io.deq
// If this is infront of a single manager, these become constants
val find = manager.findFast(edgeIn.address(in_a.bits))
val maxLgArithmetic = Mux1H(find, maxLgArithmetics)
val maxLgLogical = Mux1H(find, maxLgLogicals)
val maxLgGet = Mux1H(find, maxLgGets)
val maxLgPutFull = Mux1H(find, maxLgPutFulls)
val maxLgPutPartial = Mux1H(find, maxLgPutPartials)
val maxLgHint = Mux1H(find, maxLgHints)
val limit = if (alwaysMin) lgMinSize else
MuxLookup(in_a.bits.opcode, lgMinSize)(Array(
TLMessages.PutFullData -> maxLgPutFull,
TLMessages.PutPartialData -> maxLgPutPartial,
TLMessages.ArithmeticData -> maxLgArithmetic,
TLMessages.LogicalData -> maxLgLogical,
TLMessages.Get -> maxLgGet,
TLMessages.Hint -> maxLgHint))
val aOrig = in_a.bits.size
val aFrag = Mux(aOrig > limit, limit, aOrig)
val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize))
val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize))
val aHasData = edgeIn.hasData(in_a.bits)
val aMask = Mux(aHasData, 0.U, aFragOH1)
val gennum = RegInit(0.U(counterBits.W))
val aFirst = gennum === 0.U
val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U)
val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe
val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize)))
val aLast = aFragnum === 0.U
val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst))
val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None
when (out.a.fire) { gennum := new_gennum }
repeater.io.repeat := !aHasData && aFragnum =/= 0.U
out.a <> in_a
out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U)
out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum))
out.a.bits.size := aFrag
// Optimize away some of the Repeater's registers
assert (!repeater.io.full || !aHasData)
out.a.bits.data := in.a.bits.data
val fullMask = ((BigInt(1) << beatBytes) - 1).U
assert (!repeater.io.full || in_a.bits.mask === fullMask)
out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask)
out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData)
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFragmenter
{
def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
if (minSize <= maxSize) {
val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix))
fragmenter.node
} else { TLEphemeralNode()(ValName("no_fragmenter")) }
}
def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix)
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Fragmenter"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes))
(ram.node
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLDelayer(0.1)
:= TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLFragmenter(ramBeatBytes, maxSize/2)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLInterconnectCoupler_cbus_to_l2_ctrl( // @[LazyModuleImp.scala:138:7]
input clock, // @[LazyModuleImp.scala:138:7]
input reset, // @[LazyModuleImp.scala:138:7]
input auto_buffer_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_buffer_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_buffer_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [8:0] auto_buffer_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [25:0] auto_buffer_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_buffer_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_buffer_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_buffer_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_buffer_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_buffer_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_buffer_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [8:0] auto_buffer_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_buffer_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [4:0] auto_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [25:0] auto_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_tl_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_tl_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [4:0] auto_tl_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_tl_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_tl_in_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire tlOut_d_valid; // @[MixedNode.scala:542:17]
wire tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17]
wire [63:0] tlOut_d_bits_data; // @[MixedNode.scala:542:17]
wire tlOut_d_bits_denied; // @[MixedNode.scala:542:17]
wire tlOut_d_bits_sink; // @[MixedNode.scala:542:17]
wire [4:0] tlOut_d_bits_source; // @[MixedNode.scala:542:17]
wire [2:0] tlOut_d_bits_size; // @[MixedNode.scala:542:17]
wire [1:0] tlOut_d_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] tlOut_d_bits_opcode; // @[MixedNode.scala:542:17]
wire tlOut_a_ready; // @[MixedNode.scala:542:17]
wire _fragmenter_auto_anon_out_a_valid; // @[Fragmenter.scala:345:34]
wire [2:0] _fragmenter_auto_anon_out_a_bits_opcode; // @[Fragmenter.scala:345:34]
wire [2:0] _fragmenter_auto_anon_out_a_bits_param; // @[Fragmenter.scala:345:34]
wire [1:0] _fragmenter_auto_anon_out_a_bits_size; // @[Fragmenter.scala:345:34]
wire [8:0] _fragmenter_auto_anon_out_a_bits_source; // @[Fragmenter.scala:345:34]
wire [25:0] _fragmenter_auto_anon_out_a_bits_address; // @[Fragmenter.scala:345:34]
wire [7:0] _fragmenter_auto_anon_out_a_bits_mask; // @[Fragmenter.scala:345:34]
wire [63:0] _fragmenter_auto_anon_out_a_bits_data; // @[Fragmenter.scala:345:34]
wire _fragmenter_auto_anon_out_a_bits_corrupt; // @[Fragmenter.scala:345:34]
wire _fragmenter_auto_anon_out_d_ready; // @[Fragmenter.scala:345:34]
wire _buffer_auto_in_a_ready; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_valid; // @[Buffer.scala:75:28]
wire [2:0] _buffer_auto_in_d_bits_opcode; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_d_bits_param; // @[Buffer.scala:75:28]
wire [1:0] _buffer_auto_in_d_bits_size; // @[Buffer.scala:75:28]
wire [8:0] _buffer_auto_in_d_bits_source; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_sink; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_denied; // @[Buffer.scala:75:28]
wire [63:0] _buffer_auto_in_d_bits_data; // @[Buffer.scala:75:28]
wire _buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28]
wire auto_buffer_out_a_ready_0 = auto_buffer_out_a_ready; // @[LazyModuleImp.scala:138:7]
wire auto_buffer_out_d_valid_0 = auto_buffer_out_d_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_buffer_out_d_bits_opcode_0 = auto_buffer_out_d_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_buffer_out_d_bits_size_0 = auto_buffer_out_d_bits_size; // @[LazyModuleImp.scala:138:7]
wire [8:0] auto_buffer_out_d_bits_source_0 = auto_buffer_out_d_bits_source; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_buffer_out_d_bits_data_0 = auto_buffer_out_d_bits_data; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_a_valid_0 = auto_tl_in_a_valid; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_in_a_bits_opcode_0 = auto_tl_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_in_a_bits_param_0 = auto_tl_in_a_bits_param; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_in_a_bits_size_0 = auto_tl_in_a_bits_size; // @[LazyModuleImp.scala:138:7]
wire [4:0] auto_tl_in_a_bits_source_0 = auto_tl_in_a_bits_source; // @[LazyModuleImp.scala:138:7]
wire [25:0] auto_tl_in_a_bits_address_0 = auto_tl_in_a_bits_address; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_tl_in_a_bits_mask_0 = auto_tl_in_a_bits_mask; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_tl_in_a_bits_data_0 = auto_tl_in_a_bits_data; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_a_bits_corrupt_0 = auto_tl_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_d_ready_0 = auto_tl_in_d_ready; // @[LazyModuleImp.scala:138:7]
wire auto_buffer_out_d_bits_sink = 1'h0; // @[Buffer.scala:75:28]
wire auto_buffer_out_d_bits_denied = 1'h0; // @[Buffer.scala:75:28]
wire auto_buffer_out_d_bits_corrupt = 1'h0; // @[Buffer.scala:75:28]
wire [1:0] auto_buffer_out_d_bits_param = 2'h0; // @[Buffer.scala:75:28]
wire tlIn_a_ready; // @[MixedNode.scala:551:17]
wire tlIn_a_valid = auto_tl_in_a_valid_0; // @[MixedNode.scala:551:17]
wire [2:0] tlIn_a_bits_opcode = auto_tl_in_a_bits_opcode_0; // @[MixedNode.scala:551:17]
wire [2:0] tlIn_a_bits_param = auto_tl_in_a_bits_param_0; // @[MixedNode.scala:551:17]
wire [2:0] tlIn_a_bits_size = auto_tl_in_a_bits_size_0; // @[MixedNode.scala:551:17]
wire [4:0] tlIn_a_bits_source = auto_tl_in_a_bits_source_0; // @[MixedNode.scala:551:17]
wire [25:0] tlIn_a_bits_address = auto_tl_in_a_bits_address_0; // @[MixedNode.scala:551:17]
wire [7:0] tlIn_a_bits_mask = auto_tl_in_a_bits_mask_0; // @[MixedNode.scala:551:17]
wire [63:0] tlIn_a_bits_data = auto_tl_in_a_bits_data_0; // @[MixedNode.scala:551:17]
wire tlIn_a_bits_corrupt = auto_tl_in_a_bits_corrupt_0; // @[MixedNode.scala:551:17]
wire tlIn_d_ready = auto_tl_in_d_ready_0; // @[MixedNode.scala:551:17]
wire tlIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] tlIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] tlIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [2:0] tlIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [4:0] tlIn_d_bits_source; // @[MixedNode.scala:551:17]
wire tlIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire tlIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [63:0] tlIn_d_bits_data; // @[MixedNode.scala:551:17]
wire tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire [2:0] auto_buffer_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_buffer_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_buffer_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7]
wire [8:0] auto_buffer_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7]
wire [25:0] auto_buffer_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7]
wire [7:0] auto_buffer_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_buffer_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7]
wire auto_buffer_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7]
wire auto_buffer_out_a_valid_0; // @[LazyModuleImp.scala:138:7]
wire auto_buffer_out_d_ready_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_a_ready_0; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_in_d_bits_opcode_0; // @[LazyModuleImp.scala:138:7]
wire [1:0] auto_tl_in_d_bits_param_0; // @[LazyModuleImp.scala:138:7]
wire [2:0] auto_tl_in_d_bits_size_0; // @[LazyModuleImp.scala:138:7]
wire [4:0] auto_tl_in_d_bits_source_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_d_bits_sink_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_d_bits_denied_0; // @[LazyModuleImp.scala:138:7]
wire [63:0] auto_tl_in_d_bits_data_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_d_bits_corrupt_0; // @[LazyModuleImp.scala:138:7]
wire auto_tl_in_d_valid_0; // @[LazyModuleImp.scala:138:7]
assign tlIn_a_ready = tlOut_a_ready; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_valid = tlOut_d_valid; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_bits_opcode = tlOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_bits_param = tlOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_bits_size = tlOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_bits_source = tlOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_bits_sink = tlOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_bits_denied = tlOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17]
assign tlIn_d_bits_data = tlOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire [2:0] tlOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] tlOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [2:0] tlOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [4:0] tlOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [25:0] tlOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] tlOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] tlOut_a_bits_data; // @[MixedNode.scala:542:17]
wire tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
assign tlIn_d_bits_corrupt = tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
wire tlOut_a_valid; // @[MixedNode.scala:542:17]
wire tlOut_d_ready; // @[MixedNode.scala:542:17]
assign auto_tl_in_a_ready_0 = tlIn_a_ready; // @[MixedNode.scala:551:17]
assign tlOut_a_valid = tlIn_a_valid; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_opcode = tlIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_param = tlIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_size = tlIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_source = tlIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_address = tlIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_mask = tlIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_data = tlIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_a_bits_corrupt = tlIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17]
assign tlOut_d_ready = tlIn_d_ready; // @[MixedNode.scala:542:17, :551:17]
assign auto_tl_in_d_valid_0 = tlIn_d_valid; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_opcode_0 = tlIn_d_bits_opcode; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_param_0 = tlIn_d_bits_param; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_size_0 = tlIn_d_bits_size; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_source_0 = tlIn_d_bits_source; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_sink_0 = tlIn_d_bits_sink; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_denied_0 = tlIn_d_bits_denied; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_data_0 = tlIn_d_bits_data; // @[MixedNode.scala:551:17]
assign auto_tl_in_d_bits_corrupt_0 = tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
TLBuffer_a26d64s9k1z2u buffer ( // @[Buffer.scala:75:28]
.clock (clock),
.reset (reset),
.auto_in_a_ready (_buffer_auto_in_a_ready),
.auto_in_a_valid (_fragmenter_auto_anon_out_a_valid), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_opcode (_fragmenter_auto_anon_out_a_bits_opcode), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_param (_fragmenter_auto_anon_out_a_bits_param), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_size (_fragmenter_auto_anon_out_a_bits_size), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_source (_fragmenter_auto_anon_out_a_bits_source), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_address (_fragmenter_auto_anon_out_a_bits_address), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_mask (_fragmenter_auto_anon_out_a_bits_mask), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_data (_fragmenter_auto_anon_out_a_bits_data), // @[Fragmenter.scala:345:34]
.auto_in_a_bits_corrupt (_fragmenter_auto_anon_out_a_bits_corrupt), // @[Fragmenter.scala:345:34]
.auto_in_d_ready (_fragmenter_auto_anon_out_d_ready), // @[Fragmenter.scala:345:34]
.auto_in_d_valid (_buffer_auto_in_d_valid),
.auto_in_d_bits_opcode (_buffer_auto_in_d_bits_opcode),
.auto_in_d_bits_param (_buffer_auto_in_d_bits_param),
.auto_in_d_bits_size (_buffer_auto_in_d_bits_size),
.auto_in_d_bits_source (_buffer_auto_in_d_bits_source),
.auto_in_d_bits_sink (_buffer_auto_in_d_bits_sink),
.auto_in_d_bits_denied (_buffer_auto_in_d_bits_denied),
.auto_in_d_bits_data (_buffer_auto_in_d_bits_data),
.auto_in_d_bits_corrupt (_buffer_auto_in_d_bits_corrupt),
.auto_out_a_ready (auto_buffer_out_a_ready_0), // @[LazyModuleImp.scala:138:7]
.auto_out_a_valid (auto_buffer_out_a_valid_0),
.auto_out_a_bits_opcode (auto_buffer_out_a_bits_opcode_0),
.auto_out_a_bits_param (auto_buffer_out_a_bits_param_0),
.auto_out_a_bits_size (auto_buffer_out_a_bits_size_0),
.auto_out_a_bits_source (auto_buffer_out_a_bits_source_0),
.auto_out_a_bits_address (auto_buffer_out_a_bits_address_0),
.auto_out_a_bits_mask (auto_buffer_out_a_bits_mask_0),
.auto_out_a_bits_data (auto_buffer_out_a_bits_data_0),
.auto_out_a_bits_corrupt (auto_buffer_out_a_bits_corrupt_0),
.auto_out_d_ready (auto_buffer_out_d_ready_0),
.auto_out_d_valid (auto_buffer_out_d_valid_0), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_opcode (auto_buffer_out_d_bits_opcode_0), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_size (auto_buffer_out_d_bits_size_0), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_source (auto_buffer_out_d_bits_source_0), // @[LazyModuleImp.scala:138:7]
.auto_out_d_bits_data (auto_buffer_out_d_bits_data_0) // @[LazyModuleImp.scala:138:7]
); // @[Buffer.scala:75:28]
TLFragmenter_LLCCtrl fragmenter ( // @[Fragmenter.scala:345:34]
.clock (clock),
.reset (reset),
.auto_anon_in_a_ready (tlOut_a_ready),
.auto_anon_in_a_valid (tlOut_a_valid), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_opcode (tlOut_a_bits_opcode), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_param (tlOut_a_bits_param), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_size (tlOut_a_bits_size), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_source (tlOut_a_bits_source), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_address (tlOut_a_bits_address), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_mask (tlOut_a_bits_mask), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_data (tlOut_a_bits_data), // @[MixedNode.scala:542:17]
.auto_anon_in_a_bits_corrupt (tlOut_a_bits_corrupt), // @[MixedNode.scala:542:17]
.auto_anon_in_d_ready (tlOut_d_ready), // @[MixedNode.scala:542:17]
.auto_anon_in_d_valid (tlOut_d_valid),
.auto_anon_in_d_bits_opcode (tlOut_d_bits_opcode),
.auto_anon_in_d_bits_param (tlOut_d_bits_param),
.auto_anon_in_d_bits_size (tlOut_d_bits_size),
.auto_anon_in_d_bits_source (tlOut_d_bits_source),
.auto_anon_in_d_bits_sink (tlOut_d_bits_sink),
.auto_anon_in_d_bits_denied (tlOut_d_bits_denied),
.auto_anon_in_d_bits_data (tlOut_d_bits_data),
.auto_anon_in_d_bits_corrupt (tlOut_d_bits_corrupt),
.auto_anon_out_a_ready (_buffer_auto_in_a_ready), // @[Buffer.scala:75:28]
.auto_anon_out_a_valid (_fragmenter_auto_anon_out_a_valid),
.auto_anon_out_a_bits_opcode (_fragmenter_auto_anon_out_a_bits_opcode),
.auto_anon_out_a_bits_param (_fragmenter_auto_anon_out_a_bits_param),
.auto_anon_out_a_bits_size (_fragmenter_auto_anon_out_a_bits_size),
.auto_anon_out_a_bits_source (_fragmenter_auto_anon_out_a_bits_source),
.auto_anon_out_a_bits_address (_fragmenter_auto_anon_out_a_bits_address),
.auto_anon_out_a_bits_mask (_fragmenter_auto_anon_out_a_bits_mask),
.auto_anon_out_a_bits_data (_fragmenter_auto_anon_out_a_bits_data),
.auto_anon_out_a_bits_corrupt (_fragmenter_auto_anon_out_a_bits_corrupt),
.auto_anon_out_d_ready (_fragmenter_auto_anon_out_d_ready),
.auto_anon_out_d_valid (_buffer_auto_in_d_valid), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_opcode (_buffer_auto_in_d_bits_opcode), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_param (_buffer_auto_in_d_bits_param), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_size (_buffer_auto_in_d_bits_size), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_source (_buffer_auto_in_d_bits_source), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_sink (_buffer_auto_in_d_bits_sink), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_denied (_buffer_auto_in_d_bits_denied), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_data (_buffer_auto_in_d_bits_data), // @[Buffer.scala:75:28]
.auto_anon_out_d_bits_corrupt (_buffer_auto_in_d_bits_corrupt) // @[Buffer.scala:75:28]
); // @[Fragmenter.scala:345:34]
assign auto_buffer_out_a_valid = auto_buffer_out_a_valid_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_opcode = auto_buffer_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_param = auto_buffer_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_size = auto_buffer_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_source = auto_buffer_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_address = auto_buffer_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_mask = auto_buffer_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_data = auto_buffer_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_a_bits_corrupt = auto_buffer_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7]
assign auto_buffer_out_d_ready = auto_buffer_out_d_ready_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_a_ready = auto_tl_in_a_ready_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_valid = auto_tl_in_d_valid_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_opcode = auto_tl_in_d_bits_opcode_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_param = auto_tl_in_d_bits_param_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_size = auto_tl_in_d_bits_size_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_source = auto_tl_in_d_bits_source_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_sink = auto_tl_in_d_bits_sink_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_denied = auto_tl_in_d_bits_denied_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_data = auto_tl_in_d_bits_data_0; // @[LazyModuleImp.scala:138:7]
assign auto_tl_in_d_bits_corrupt = auto_tl_in_d_bits_corrupt_0; // @[LazyModuleImp.scala:138:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_46( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [5:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [5:0] source_1; // @[Monitor.scala:541:22]
reg denied; // @[Monitor.scala:543:22]
reg [57:0] inflight; // @[Monitor.scala:614:27]
reg [231:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [231:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [63:0] _GEN_0 = {58'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [63:0] _GEN_3 = {58'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [57:0] inflight_1; // @[Monitor.scala:726:35]
reg [231:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module MacUnit_227( // @[PE.scala:14:7]
input clock, // @[PE.scala:14:7]
input reset, // @[PE.scala:14:7]
input [7:0] io_in_a, // @[PE.scala:16:14]
input [7:0] io_in_b, // @[PE.scala:16:14]
input [19:0] io_in_c, // @[PE.scala:16:14]
output [19:0] io_out_d // @[PE.scala:16:14]
);
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7]
wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7]
wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54]
wire [19:0] io_out_d_0; // @[PE.scala:14:7]
wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7]
wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7]
wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54]
assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54]
assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7]
assign io_out_d = io_out_d_0; // @[PE.scala:14:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_11( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [8:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [3:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [8:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [3:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg [2:0] sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [15:0] inflight; // @[Monitor.scala:614:27]
reg [63:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [127:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [8:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [8:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25]
wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [15:0] inflight_1; // @[Monitor.scala:726:35]
reg [127:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [8:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_51( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [8:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [31:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [8:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [31:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [31:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_a_bits_source = 1'h0; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7]
wire mask_sizeOH_shiftAmount = 1'h0; // @[OneHot.scala:64:49]
wire mask_sub_size = 1'h0; // @[Misc.scala:209:26]
wire _mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38]
wire _mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count = 1'h0; // @[Edges.scala:234:25]
wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27]
wire c_first_count = 1'h0; // @[Edges.scala:234:25]
wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21]
wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59]
wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14]
wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25]
wire c_set = 1'h0; // @[Monitor.scala:738:34]
wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire _source_ok_T = 1'h1; // @[Parameters.scala:46:9]
wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31]
wire mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21]
wire mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_size = 1'h1; // @[Misc.scala:209:26]
wire mask_acc = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_1 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_2 = 1'h1; // @[Misc.scala:215:29]
wire mask_acc_3 = 1'h1; // @[Misc.scala:215:29]
wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last = 1'h1; // @[Edges.scala:232:33]
wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33]
wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43]
wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33]
wire [1:0] is_aligned_mask = 2'h3; // @[package.scala:243:46]
wire [1:0] mask_lo = 2'h3; // @[Misc.scala:222:10]
wire [1:0] mask_hi = 2'h3; // @[Misc.scala:222:10]
wire [1:0] _a_first_beats1_decode_T_2 = 2'h3; // @[package.scala:243:46]
wire [1:0] _a_first_beats1_decode_T_5 = 2'h3; // @[package.scala:243:46]
wire [1:0] _c_first_beats1_decode_T_1 = 2'h3; // @[package.scala:243:76]
wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28]
wire [1:0] io_in_a_bits_size = 2'h2; // @[Monitor.scala:36:7]
wire [1:0] _mask_sizeOH_T = 2'h2; // @[Misc.scala:202:34]
wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [3:0] io_in_a_bits_mask = 4'hF; // @[Monitor.scala:36:7]
wire [3:0] mask = 4'hF; // @[Misc.scala:222:10]
wire [31:0] _c_first_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_first_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_first_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_wo_ready_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_wo_ready_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_opcodes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_opcodes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_sizes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_sizes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _c_probe_ack_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _c_probe_ack_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [31:0] _same_cycle_resp_WIRE_4_bits_data = 32'h0; // @[Bundles.scala:265:74]
wire [31:0] _same_cycle_resp_WIRE_5_bits_data = 32'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_first_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_first_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_first_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_first_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_set_wo_ready_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_set_wo_ready_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_opcodes_set_interm_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_opcodes_set_interm_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_sizes_set_interm_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_sizes_set_interm_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_opcodes_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_opcodes_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_sizes_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_sizes_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_probe_ack_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_probe_ack_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _c_probe_ack_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _c_probe_ack_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [8:0] _same_cycle_resp_WIRE_4_bits_address = 9'h0; // @[Bundles.scala:265:74]
wire [8:0] _same_cycle_resp_WIRE_5_bits_address = 9'h0; // @[Bundles.scala:265:61]
wire [1:0] _is_aligned_mask_T_1 = 2'h0; // @[package.scala:243:76]
wire [1:0] _a_first_beats1_decode_T_1 = 2'h0; // @[package.scala:243:76]
wire [1:0] _a_first_beats1_decode_T_4 = 2'h0; // @[package.scala:243:76]
wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_first_beats1_decode_T_2 = 2'h0; // @[package.scala:243:46]
wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74]
wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [17:0] _c_sizes_set_T_1 = 18'h0; // @[Monitor.scala:768:52]
wire [3:0] _a_opcodes_set_T = 4'h0; // @[Monitor.scala:659:79]
wire [3:0] _a_sizes_set_T = 4'h0; // @[Monitor.scala:660:77]
wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34]
wire [3:0] c_sizes_set = 4'h0; // @[Monitor.scala:741:34]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79]
wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77]
wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [1:0] _mask_sizeOH_T_1 = 2'h1; // @[OneHot.scala:65:12]
wire [1:0] _mask_sizeOH_T_2 = 2'h1; // @[OneHot.scala:65:27]
wire [1:0] mask_sizeOH = 2'h1; // @[Misc.scala:202:81]
wire [1:0] _a_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _a_set_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35]
wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35]
wire [4:0] _c_first_beats1_decode_T = 5'h3; // @[package.scala:243:71]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] _a_sizes_set_interm_T_1 = 3'h5; // @[Monitor.scala:658:59]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] _a_sizes_set_interm_T = 3'h4; // @[Monitor.scala:658:51]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [4:0] _is_aligned_mask_T = 5'hC; // @[package.scala:243:71]
wire [4:0] _a_first_beats1_decode_T = 5'hC; // @[package.scala:243:71]
wire [4:0] _a_first_beats1_decode_T_3 = 5'hC; // @[package.scala:243:71]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [8:0] _is_aligned_T = {7'h0, io_in_a_bits_address_0[1:0]}; // @[Monitor.scala:36:7]
wire is_aligned = _is_aligned_T == 9'h0; // @[Edges.scala:21:{16,24}]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_1_2 = mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_eq; // @[Misc.scala:214:27, :215:38]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_eq_1; // @[Misc.scala:214:27, :215:38]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_eq_2; // @[Misc.scala:214:27, :215:38]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_eq_3; // @[Misc.scala:214:27, :215:38]
wire _source_ok_T_1 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_1; // @[Parameters.scala:1138:31]
wire _T_905 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_905; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_905; // @[Decoupled.scala:51:35]
wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
reg a_first_counter; // @[Edges.scala:229:27]
wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28]
wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [8:0] address; // @[Monitor.scala:391:22]
wire _T_978 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_978; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_978; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_978; // @[Decoupled.scala:51:35]
wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35]
wire [4:0] _GEN = 5'h3 << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [4:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN; // @[package.scala:243:71]
wire [1:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
reg d_first_counter; // @[Edges.scala:229:27]
wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28]
wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [1:0] inflight; // @[Monitor.scala:614:27]
reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [3:0] inflight_sizes; // @[Monitor.scala:618:33]
wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
reg a_first_counter_1; // @[Edges.scala:229:27]
wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35]
wire [1:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28]
wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21]
wire a_set; // @[Monitor.scala:626:34]
wire a_set_wo_ready; // @[Monitor.scala:627:34]
wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [3:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [3:0] _GEN_0 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [3:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_0; // @[Monitor.scala:637:69]
wire [3:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_0; // @[Monitor.scala:637:69, :641:65]
wire [3:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_0; // @[Monitor.scala:637:69, :680:101]
wire [3:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_0; // @[Monitor.scala:637:69, :681:99]
wire [3:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_0; // @[Monitor.scala:637:69, :749:69]
wire [3:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_0; // @[Monitor.scala:637:69, :750:67]
wire [3:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_0; // @[Monitor.scala:637:69, :790:101]
wire [3:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_0; // @[Monitor.scala:637:69, :791:99]
wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}]
wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [3:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [15:0] _a_size_lookup_T_6 = {12'h0, _a_size_lookup_T_1}; // @[Monitor.scala:637:97, :641:{40,91}]
wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _T_828 = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26]
assign a_set_wo_ready = _T_828; // @[Monitor.scala:627:34, :651:26]
wire _same_cycle_resp_T; // @[Monitor.scala:684:44]
assign _same_cycle_resp_T = _T_828; // @[Monitor.scala:651:26, :684:44]
assign a_set = _T_905 & a_first_1; // @[Decoupled.scala:51:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = a_set ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:626:34, :646:40, :655:70, :657:{28,61}]
assign a_sizes_set_interm = a_set ? 3'h5 : 3'h0; // @[Monitor.scala:626:34, :648:38, :655:70, :658:28]
wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm}; // @[Monitor.scala:646:40, :659:54]
assign a_opcodes_set = a_set ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :630:33, :655:70, :659:{28,54}]
wire [17:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm}; // @[Monitor.scala:648:38, :659:54, :660:52]
assign a_sizes_set = a_set ? _a_sizes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :632:31, :655:70, :660:{28,52}]
wire d_clr; // @[Monitor.scala:664:34]
wire d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [3:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_1 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_1; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_1; // @[Monitor.scala:673:46, :783:46]
wire _T_877 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [1:0] _GEN_2 = {1'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35]
wire [1:0] _GEN_3 = 2'h1 << _GEN_2; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_3; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_3; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_3; // @[OneHot.scala:58:35]
wire [1:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_3; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_877 & ~d_release_ack & _d_clr_wo_ready_T[0]; // @[OneHot.scala:58:35]
wire _T_846 = _T_978 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_846 & _d_clr_T[0]; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_5 = 31'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_846 ? _d_opcodes_clr_T_5[3:0] : 4'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [30:0] _d_sizes_clr_T_5 = 31'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_846 ? _d_sizes_clr_T_5[3:0] : 4'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27]
wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}]
wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [3:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [3:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [3:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [1:0] inflight_1; // @[Monitor.scala:726:35]
wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [3:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [3:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35]
wire [1:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[1:0]; // @[package.scala:243:{71,76}]
wire [1:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
reg d_first_counter_2; // @[Edges.scala:229:27]
wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25]
wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28]
wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28]
wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25]
wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:637:97, :749:{44,97}]
wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [3:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [15:0] _c_size_lookup_T_6 = {12'h0, _c_size_lookup_T_1}; // @[Monitor.scala:637:97, :750:{42,93}]
wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire d_clr_1; // @[Monitor.scala:774:34]
wire d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [3:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_949 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_949 & d_release_ack_1 & _d_clr_wo_ready_T_1[0]; // @[OneHot.scala:58:35]
wire _T_931 = _T_978 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_931 & _d_clr_T_1[0]; // @[OneHot.scala:58:35]
wire [30:0] _d_opcodes_clr_T_11 = 31'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_931 ? _d_opcodes_clr_T_11[3:0] : 4'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [30:0] _d_sizes_clr_T_11 = 31'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_931 ? _d_sizes_clr_T_11[3:0] : 4'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = ~io_in_d_bits_source_0; // @[Monitor.scala:36:7, :795:113]
wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}]
wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [3:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [3:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File PE.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle {
val dataflow = UInt(1.W) // TODO make this an Enum
val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)?
val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats
}
class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module {
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(inputType)
val in_c = Input(cType)
val out_d = Output(dType)
})
io.out_d := io.in_c.mac(io.in_a, io.in_b)
}
// TODO update documentation
/**
* A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh.
* @param width Data width of operands
*/
class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int)
(implicit ev: Arithmetic[T]) extends Module { // Debugging variables
import ev._
val io = IO(new Bundle {
val in_a = Input(inputType)
val in_b = Input(outputType)
val in_d = Input(outputType)
val out_a = Output(inputType)
val out_b = Output(outputType)
val out_c = Output(outputType)
val in_control = Input(new PEControl(accType))
val out_control = Output(new PEControl(accType))
val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W))
val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W))
val in_last = Input(Bool())
val out_last = Output(Bool())
val in_valid = Input(Bool())
val out_valid = Output(Bool())
val bad_dataflow = Output(Bool())
})
val cType = if (df == Dataflow.WS) inputType else accType
// When creating PEs that support multiple dataflows, the
// elaboration/synthesis tools often fail to consolidate and de-duplicate
// MAC units. To force mac circuitry to be re-used, we create a "mac_unit"
// module here which just performs a single MAC operation
val mac_unit = Module(new MacUnit(inputType,
if (df == Dataflow.WS) outputType else accType, outputType))
val a = io.in_a
val b = io.in_b
val d = io.in_d
val c1 = Reg(cType)
val c2 = Reg(cType)
val dataflow = io.in_control.dataflow
val prop = io.in_control.propagate
val shift = io.in_control.shift
val id = io.in_id
val last = io.in_last
val valid = io.in_valid
io.out_a := a
io.out_control.dataflow := dataflow
io.out_control.propagate := prop
io.out_control.shift := shift
io.out_id := id
io.out_last := last
io.out_valid := valid
mac_unit.io.in_a := a
val last_s = RegEnable(prop, valid)
val flip = last_s =/= prop
val shift_offset = Mux(flip, shift, 0.U)
// Which dataflow are we using?
val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W)
val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W)
// Is c1 being computed on, or propagated forward (in the output-stationary dataflow)?
val COMPUTE = 0.U(1.W)
val PROPAGATE = 1.U(1.W)
io.bad_dataflow := false.B
when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
c2 := mac_unit.io.out_d
c1 := d.withWidthOf(cType)
}.otherwise {
io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType)
io.out_b := b
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c1
c1 := mac_unit.io.out_d
c2 := d.withWidthOf(cType)
}
}.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) {
when(prop === PROPAGATE) {
io.out_c := c1
mac_unit.io.in_b := c2.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c1 := d
}.otherwise {
io.out_c := c2
mac_unit.io.in_b := c1.asTypeOf(inputType)
mac_unit.io.in_c := b
io.out_b := mac_unit.io.out_d
c2 := d
}
}.otherwise {
io.bad_dataflow := true.B
//assert(false.B, "unknown dataflow")
io.out_c := DontCare
io.out_b := DontCare
mac_unit.io.in_b := b.asTypeOf(inputType)
mac_unit.io.in_c := c2
}
when (!valid) {
c1 := c1
c2 := c2
mac_unit.io.in_b := DontCare
mac_unit.io.in_c := DontCare
}
}
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
| module PE_491( // @[PE.scala:31:7]
input clock, // @[PE.scala:31:7]
input reset, // @[PE.scala:31:7]
input [7:0] io_in_a, // @[PE.scala:35:14]
input [19:0] io_in_b, // @[PE.scala:35:14]
input [19:0] io_in_d, // @[PE.scala:35:14]
output [7:0] io_out_a, // @[PE.scala:35:14]
output [19:0] io_out_b, // @[PE.scala:35:14]
output [19:0] io_out_c, // @[PE.scala:35:14]
input io_in_control_dataflow, // @[PE.scala:35:14]
input io_in_control_propagate, // @[PE.scala:35:14]
input [4:0] io_in_control_shift, // @[PE.scala:35:14]
output io_out_control_dataflow, // @[PE.scala:35:14]
output io_out_control_propagate, // @[PE.scala:35:14]
output [4:0] io_out_control_shift, // @[PE.scala:35:14]
input [2:0] io_in_id, // @[PE.scala:35:14]
output [2:0] io_out_id, // @[PE.scala:35:14]
input io_in_last, // @[PE.scala:35:14]
output io_out_last, // @[PE.scala:35:14]
input io_in_valid, // @[PE.scala:35:14]
output io_out_valid, // @[PE.scala:35:14]
output io_bad_dataflow // @[PE.scala:35:14]
);
wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24]
wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7]
wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7]
wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7]
wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7]
wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7]
wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7]
wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7]
wire io_in_last_0 = io_in_last; // @[PE.scala:31:7]
wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7]
wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7]
wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7]
wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37]
wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37]
wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35]
wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7]
wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7]
wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7]
wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7]
wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7]
wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7]
wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7]
wire [19:0] io_out_b_0; // @[PE.scala:31:7]
wire [19:0] io_out_c_0; // @[PE.scala:31:7]
reg [31:0] c1; // @[PE.scala:70:15]
wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15]
wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38]
reg [31:0] c2; // @[PE.scala:71:15]
wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15]
wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38]
reg last_s; // @[PE.scala:89:25]
wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21]
wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25]
wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25]
wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32]
wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32]
assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32]
wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25]
wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53]
assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53]
wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66]
assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66]
wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15]
wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}]
wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25]
wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27]
wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27]
assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27]
wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25]
wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15]
wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15]
assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37]
wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37]
wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7]
wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18]
wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61]
wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53]
wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15]
wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50]
wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}]
wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66]
wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}]
wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}]
wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81]
wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}]
wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}]
wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}]
wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15]
wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30]
assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30]
wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15]
assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15]
wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30]
wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38]
wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}]
wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}]
wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33]
wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}]
wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28]
wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28]
wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33]
wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60]
wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16]
wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16]
wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}]
wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37]
wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37]
wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18]
wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18]
assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18]
wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18]
wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18]
wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18]
wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18]
wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}]
wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}]
wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61]
wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38]
wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38]
wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38]
wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38]
assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16]
assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101]
wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35]
wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35]
wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10]
wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10]
always @(posedge clock) begin // @[PE.scala:31:7]
if (io_in_valid_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0) begin // @[PE.scala:31:7]
if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10]
c1 <= _GEN_7; // @[PE.scala:70:15, :124:10]
if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30]
end
else // @[PE.scala:71:15, :118:101, :119:30]
c2 <= _GEN_7; // @[PE.scala:71:15, :124:10]
end
else begin // @[PE.scala:31:7]
c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10]
c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10]
end
last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25]
end
always @(posedge)
MacUnit_235 mac_unit ( // @[PE.scala:64:24]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0), // @[PE.scala:31:7]
.io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}]
.io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24]
.io_out_d (_mac_unit_io_out_d)
); // @[PE.scala:64:24]
assign io_out_a = io_out_a_0; // @[PE.scala:31:7]
assign io_out_b = io_out_b_0; // @[PE.scala:31:7]
assign io_out_c = io_out_c_0; // @[PE.scala:31:7]
assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7]
assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7]
assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7]
assign io_out_id = io_out_id_0; // @[PE.scala:31:7]
assign io_out_last = io_out_last_0; // @[PE.scala:31:7]
assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_35( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_71 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Frontend.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.bundlebridge._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.tile.{CoreBundle, BaseTile}
import freechips.rocketchip.tilelink.{TLWidthWidget, TLEdgeOut}
import freechips.rocketchip.util.{ClockGate, ShiftQueue, property}
import freechips.rocketchip.util.UIntToAugmentedUInt
class FrontendReq(implicit p: Parameters) extends CoreBundle()(p) {
val pc = UInt(vaddrBitsExtended.W)
val speculative = Bool()
}
class FrontendExceptions extends Bundle {
val pf = new Bundle {
val inst = Bool()
}
val gf = new Bundle {
val inst = Bool()
}
val ae = new Bundle {
val inst = Bool()
}
}
class FrontendResp(implicit p: Parameters) extends CoreBundle()(p) {
val btb = new BTBResp
val pc = UInt(vaddrBitsExtended.W) // ID stage PC
val data = UInt((fetchWidth * coreInstBits).W)
val mask = Bits(fetchWidth.W)
val xcpt = new FrontendExceptions
val replay = Bool()
}
class FrontendPerfEvents extends Bundle {
val acquire = Bool()
val tlbMiss = Bool()
}
class FrontendIO(implicit p: Parameters) extends CoreBundle()(p) {
val might_request = Output(Bool())
val clock_enabled = Input(Bool())
val req = Valid(new FrontendReq)
val sfence = Valid(new SFenceReq)
val resp = Flipped(Decoupled(new FrontendResp))
val gpa = Flipped(Valid(UInt(vaddrBitsExtended.W)))
val gpa_is_pte = Input(Bool())
val btb_update = Valid(new BTBUpdate)
val bht_update = Valid(new BHTUpdate)
val ras_update = Valid(new RASUpdate)
val flush_icache = Output(Bool())
val npc = Input(UInt(vaddrBitsExtended.W))
val perf = Input(new FrontendPerfEvents())
val progress = Output(Bool())
}
class Frontend(val icacheParams: ICacheParams, tileId: Int)(implicit p: Parameters) extends LazyModule {
lazy val module = new FrontendModule(this)
val icache = LazyModule(new ICache(icacheParams, tileId))
val masterNode = icache.masterNode
val slaveNode = icache.slaveNode
val resetVectorSinkNode = BundleBridgeSink[UInt](Some(() => UInt(masterNode.edges.out.head.bundle.addressBits.W)))
}
class FrontendBundle(val outer: Frontend) extends CoreBundle()(outer.p) {
val cpu = Flipped(new FrontendIO())
val ptw = new TLBPTWIO()
val errors = new ICacheErrors
}
class FrontendModule(outer: Frontend) extends LazyModuleImp(outer)
with HasRocketCoreParameters
with HasL1ICacheParameters {
val io = IO(new FrontendBundle(outer))
val io_reset_vector = outer.resetVectorSinkNode.bundle
implicit val edge: TLEdgeOut = outer.masterNode.edges.out(0)
val icache = outer.icache.module
require(fetchWidth*coreInstBytes == outer.icacheParams.fetchBytes)
val fq = withReset(reset.asBool || io.cpu.req.valid) { Module(new ShiftQueue(new FrontendResp, 5, flow = true)) }
val clock_en_reg = Reg(Bool())
val clock_en = clock_en_reg || io.cpu.might_request
io.cpu.clock_enabled := clock_en
assert(!(io.cpu.req.valid || io.cpu.sfence.valid || io.cpu.flush_icache || io.cpu.bht_update.valid || io.cpu.btb_update.valid) || io.cpu.might_request)
val gated_clock =
if (!rocketParams.clockGate) clock
else ClockGate(clock, clock_en, "icache_clock_gate")
icache.clock := gated_clock
icache.io.clock_enabled := clock_en
withClock (gated_clock) { // entering gated-clock domain
val tlb = Module(new TLB(true, log2Ceil(fetchBytes), TLBConfig(nTLBSets, nTLBWays, outer.icacheParams.nTLBBasePageSectors, outer.icacheParams.nTLBSuperpages)))
val s1_valid = Reg(Bool())
val s2_valid = RegInit(false.B)
val s0_fq_has_space =
!fq.io.mask(fq.io.mask.getWidth-3) ||
(!fq.io.mask(fq.io.mask.getWidth-2) && (!s1_valid || !s2_valid)) ||
(!fq.io.mask(fq.io.mask.getWidth-1) && (!s1_valid && !s2_valid))
val s0_valid = io.cpu.req.valid || s0_fq_has_space
s1_valid := s0_valid
val s1_pc = Reg(UInt(vaddrBitsExtended.W))
val s1_speculative = Reg(Bool())
val s2_pc = RegInit(t = UInt(vaddrBitsExtended.W), alignPC(io_reset_vector))
val s2_btb_resp_valid = if (usingBTB) Reg(Bool()) else false.B
val s2_btb_resp_bits = Reg(new BTBResp)
val s2_btb_taken = s2_btb_resp_valid && s2_btb_resp_bits.taken
val s2_tlb_resp = Reg(tlb.io.resp.cloneType)
val s2_xcpt = s2_tlb_resp.ae.inst || s2_tlb_resp.pf.inst || s2_tlb_resp.gf.inst
val s2_speculative = RegInit(false.B)
val s2_partial_insn_valid = RegInit(false.B)
val s2_partial_insn = Reg(UInt(coreInstBits.W))
val wrong_path = RegInit(false.B)
val s1_base_pc = ~(~s1_pc | (fetchBytes - 1).U)
val ntpc = s1_base_pc + fetchBytes.U
val predicted_npc = WireDefault(ntpc)
val predicted_taken = WireDefault(false.B)
val s2_replay = Wire(Bool())
s2_replay := (s2_valid && !fq.io.enq.fire) || RegNext(s2_replay && !s0_valid, true.B)
val npc = Mux(s2_replay, s2_pc, predicted_npc)
s1_pc := io.cpu.npc
// consider RVC fetches across blocks to be non-speculative if the first
// part was non-speculative
val s0_speculative =
if (usingCompressed) s1_speculative || s2_valid && !s2_speculative || predicted_taken
else true.B
s1_speculative := Mux(io.cpu.req.valid, io.cpu.req.bits.speculative, Mux(s2_replay, s2_speculative, s0_speculative))
val s2_redirect = WireDefault(io.cpu.req.valid)
s2_valid := false.B
when (!s2_replay) {
s2_valid := !s2_redirect
s2_pc := s1_pc
s2_speculative := s1_speculative
s2_tlb_resp := tlb.io.resp
}
val recent_progress_counter_init = 3.U
val recent_progress_counter = RegInit(recent_progress_counter_init)
val recent_progress = recent_progress_counter > 0.U
when(io.ptw.req.fire && recent_progress) { recent_progress_counter := recent_progress_counter - 1.U }
when(io.cpu.progress) { recent_progress_counter := recent_progress_counter_init }
val s2_kill_speculative_tlb_refill = s2_speculative && !recent_progress
io.ptw <> tlb.io.ptw
tlb.io.req.valid := s1_valid && !s2_replay
tlb.io.req.bits.cmd := M_XRD // Frontend only reads
tlb.io.req.bits.vaddr := s1_pc
tlb.io.req.bits.passthrough := false.B
tlb.io.req.bits.size := log2Ceil(coreInstBytes*fetchWidth).U
tlb.io.req.bits.prv := io.ptw.status.prv
tlb.io.req.bits.v := io.ptw.status.v
tlb.io.sfence := io.cpu.sfence
tlb.io.kill := !s2_valid || s2_kill_speculative_tlb_refill
icache.io.req.valid := s0_valid
icache.io.req.bits.addr := io.cpu.npc
icache.io.invalidate := io.cpu.flush_icache
icache.io.s1_paddr := tlb.io.resp.paddr
icache.io.s2_vaddr := s2_pc
icache.io.s1_kill := s2_redirect || tlb.io.resp.miss || s2_replay
val s2_can_speculatively_refill = s2_tlb_resp.cacheable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill
icache.io.s2_kill := s2_speculative && !s2_can_speculatively_refill || s2_xcpt
icache.io.s2_cacheable := s2_tlb_resp.cacheable
icache.io.s2_prefetch := s2_tlb_resp.prefetchable && !io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableICachePrefetch
fq.io.enq.valid := RegNext(s1_valid) && s2_valid && (icache.io.resp.valid || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss) || (!s2_tlb_resp.miss && icache.io.s2_kill))
fq.io.enq.bits.pc := s2_pc
io.cpu.npc := alignPC(Mux(io.cpu.req.valid, io.cpu.req.bits.pc, npc))
fq.io.enq.bits.data := icache.io.resp.bits.data
fq.io.enq.bits.mask := ((1 << fetchWidth)-1).U << s2_pc.extract(log2Ceil(fetchWidth)+log2Ceil(coreInstBytes)-1, log2Ceil(coreInstBytes))
fq.io.enq.bits.replay := (icache.io.resp.bits.replay || icache.io.s2_kill && !icache.io.resp.valid && !s2_xcpt) || (s2_kill_speculative_tlb_refill && s2_tlb_resp.miss)
fq.io.enq.bits.btb := s2_btb_resp_bits
fq.io.enq.bits.btb.taken := s2_btb_taken
fq.io.enq.bits.xcpt := s2_tlb_resp
assert(!(s2_speculative && io.ptw.customCSRs.asInstanceOf[RocketCustomCSRs].disableSpeculativeICacheRefill && !icache.io.s2_kill))
when (icache.io.resp.valid && icache.io.resp.bits.ae) { fq.io.enq.bits.xcpt.ae.inst := true.B }
if (usingBTB) {
val btb = Module(new BTB)
btb.io.flush := false.B
btb.io.req.valid := false.B
btb.io.req.bits.addr := s1_pc
btb.io.btb_update := io.cpu.btb_update
btb.io.bht_update := io.cpu.bht_update
btb.io.ras_update.valid := false.B
btb.io.ras_update.bits := DontCare
btb.io.bht_advance.valid := false.B
btb.io.bht_advance.bits := DontCare
when (!s2_replay) {
btb.io.req.valid := !s2_redirect
s2_btb_resp_valid := btb.io.resp.valid
s2_btb_resp_bits := btb.io.resp.bits
}
when (btb.io.resp.valid && btb.io.resp.bits.taken) {
predicted_npc := btb.io.resp.bits.target.sextTo(vaddrBitsExtended)
predicted_taken := true.B
}
val force_taken = io.ptw.customCSRs.bpmStatic
when (io.ptw.customCSRs.flushBTB) { btb.io.flush := true.B }
when (force_taken) { btb.io.bht_update.valid := false.B }
val s2_base_pc = ~(~s2_pc | (fetchBytes-1).U)
val taken_idx = Wire(UInt())
val after_idx = Wire(UInt())
val useRAS = WireDefault(false.B)
val updateBTB = WireDefault(false.B)
// If !prevTaken, ras_update / bht_update is always invalid.
taken_idx := DontCare
after_idx := DontCare
def scanInsns(idx: Int, prevValid: Bool, prevBits: UInt, prevTaken: Bool): Bool = {
def insnIsRVC(bits: UInt) = bits(1,0) =/= 3.U
val prevRVI = prevValid && !insnIsRVC(prevBits)
val valid = fq.io.enq.bits.mask(idx) && !prevRVI
val bits = fq.io.enq.bits.data(coreInstBits*(idx+1)-1, coreInstBits*idx)
val rvc = insnIsRVC(bits)
val rviBits = Cat(bits, prevBits)
val rviBranch = rviBits(6,0) === Instructions.BEQ.value.U.extract(6,0)
val rviJump = rviBits(6,0) === Instructions.JAL.value.U.extract(6,0)
val rviJALR = rviBits(6,0) === Instructions.JALR.value.U.extract(6,0)
val rviReturn = rviJALR && !rviBits(7) && BitPat("b00?01") === rviBits(19,15)
val rviCall = (rviJALR || rviJump) && rviBits(7)
val rvcBranch = bits === Instructions.C_BEQZ || bits === Instructions.C_BNEZ
val rvcJAL = (xLen == 32).B && bits === Instructions32.C_JAL
val rvcJump = bits === Instructions.C_J || rvcJAL
val rvcImm = Mux(bits(14), new RVCDecoder(bits, xLen, fLen).bImm.asSInt, new RVCDecoder(bits, xLen, fLen).jImm.asSInt)
val rvcJR = bits === Instructions.C_MV && bits(6,2) === 0.U
val rvcReturn = rvcJR && BitPat("b00?01") === bits(11,7)
val rvcJALR = bits === Instructions.C_ADD && bits(6,2) === 0.U
val rvcCall = rvcJAL || rvcJALR
val rviImm = Mux(rviBits(3), ImmGen(IMM_UJ, rviBits), ImmGen(IMM_SB, rviBits))
val predict_taken = s2_btb_resp_bits.bht.taken || force_taken
val taken =
prevRVI && (rviJump || rviJALR || rviBranch && predict_taken) ||
valid && (rvcJump || rvcJALR || rvcJR || rvcBranch && predict_taken)
val predictReturn = btb.io.ras_head.valid && (prevRVI && rviReturn || valid && rvcReturn)
val predictJump = prevRVI && rviJump || valid && rvcJump
val predictBranch = predict_taken && (prevRVI && rviBranch || valid && rvcBranch)
when (s2_valid && s2_btb_resp_valid && s2_btb_resp_bits.bridx === idx.U && valid && !rvc) {
// The BTB has predicted that the middle of an RVI instruction is
// a branch! Flush the BTB and the pipeline.
btb.io.flush := true.B
fq.io.enq.bits.replay := true.B
wrong_path := true.B
ccover(wrong_path, "BTB_NON_CFI_ON_WRONG_PATH", "BTB predicted a non-branch was taken while on the wrong path")
}
when (!prevTaken) {
taken_idx := idx.U
after_idx := (idx + 1).U
btb.io.ras_update.valid := fq.io.enq.fire && !wrong_path && (prevRVI && (rviCall || rviReturn) || valid && (rvcCall || rvcReturn))
btb.io.ras_update.bits.cfiType := Mux(Mux(prevRVI, rviReturn, rvcReturn), CFIType.ret,
Mux(Mux(prevRVI, rviCall, rvcCall), CFIType.call,
Mux(Mux(prevRVI, rviBranch, rvcBranch) && !force_taken, CFIType.branch,
CFIType.jump)))
when (!s2_btb_taken) {
when (fq.io.enq.fire && taken && !predictBranch && !predictJump && !predictReturn) {
wrong_path := true.B
}
when (s2_valid && predictReturn) {
useRAS := true.B
}
when (s2_valid && (predictBranch || predictJump)) {
val pc = s2_base_pc | (idx*coreInstBytes).U
val npc =
if (idx == 0) pc.asSInt + Mux(prevRVI, rviImm -& 2.S, rvcImm)
else Mux(prevRVI, pc - coreInstBytes.U, pc).asSInt + Mux(prevRVI, rviImm, rvcImm)
predicted_npc := npc.asUInt
}
}
when (prevRVI && rviBranch || valid && rvcBranch) {
btb.io.bht_advance.valid := fq.io.enq.fire && !wrong_path
btb.io.bht_advance.bits := s2_btb_resp_bits
}
when (!s2_btb_resp_valid && (predictBranch && s2_btb_resp_bits.bht.strongly_taken || predictJump || predictReturn)) {
updateBTB := true.B
}
}
if (idx == fetchWidth-1) {
when (fq.io.enq.fire) {
s2_partial_insn_valid := false.B
when (valid && !prevTaken && !rvc) {
s2_partial_insn_valid := true.B
s2_partial_insn := bits | 0x3.U
}
}
prevTaken || taken
} else {
scanInsns(idx + 1, valid, bits, prevTaken || taken)
}
}
when (!io.cpu.btb_update.valid) {
val fetch_bubble_likely = !fq.io.mask(1)
btb.io.btb_update.valid := fq.io.enq.fire && !wrong_path && fetch_bubble_likely && updateBTB
btb.io.btb_update.bits.prediction.entry := tileParams.btb.get.nEntries.U
btb.io.btb_update.bits.isValid := true.B
btb.io.btb_update.bits.cfiType := btb.io.ras_update.bits.cfiType
btb.io.btb_update.bits.br_pc := s2_base_pc | (taken_idx << log2Ceil(coreInstBytes))
btb.io.btb_update.bits.pc := s2_base_pc
}
btb.io.ras_update.bits.returnAddr := s2_base_pc + (after_idx << log2Ceil(coreInstBytes))
val taken = scanInsns(0, s2_partial_insn_valid, s2_partial_insn, false.B)
when (useRAS) {
predicted_npc := btb.io.ras_head.bits
}
when (fq.io.enq.fire && (s2_btb_taken || taken)) {
s2_partial_insn_valid := false.B
}
when (!s2_btb_taken) {
when (taken) {
fq.io.enq.bits.btb.bridx := taken_idx
fq.io.enq.bits.btb.taken := true.B
fq.io.enq.bits.btb.entry := tileParams.btb.get.nEntries.U
when (fq.io.enq.fire) { s2_redirect := true.B }
}
}
assert(!s2_partial_insn_valid || fq.io.enq.bits.mask(0))
when (s2_redirect) { s2_partial_insn_valid := false.B }
when (io.cpu.req.valid) { wrong_path := false.B }
}
io.cpu.resp <> fq.io.deq
// supply guest physical address to commit stage
val gpa_valid = Reg(Bool())
val gpa = Reg(UInt(vaddrBitsExtended.W))
val gpa_is_pte = Reg(Bool())
when (fq.io.enq.fire && s2_tlb_resp.gf.inst) {
when (!gpa_valid) {
gpa := s2_tlb_resp.gpa
gpa_is_pte := s2_tlb_resp.gpa_is_pte
}
gpa_valid := true.B
}
when (io.cpu.req.valid) {
gpa_valid := false.B
}
io.cpu.gpa.valid := gpa_valid
io.cpu.gpa.bits := gpa
io.cpu.gpa_is_pte := gpa_is_pte
// performance events
io.cpu.perf.acquire := icache.io.perf.acquire
io.cpu.perf.tlbMiss := io.ptw.req.fire
io.errors := icache.io.errors
// gate the clock
clock_en_reg := !rocketParams.clockGate.B ||
io.cpu.might_request || // chicken bit
icache.io.keep_clock_enabled || // I$ miss or ITIM access
s1_valid || s2_valid || // some fetch in flight
!tlb.io.req.ready || // handling TLB miss
!fq.io.mask(fq.io.mask.getWidth-1) // queue not full
} // leaving gated-clock domain
def alignPC(pc: UInt) = ~(~pc | (coreInstBytes - 1).U)
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
property.cover(cond, s"FRONTEND_$label", "Rocket;;" + desc)
}
/** Mix-ins for constructing tiles that have an ICache-based pipeline frontend */
trait HasICacheFrontend extends CanHavePTW { this: BaseTile =>
val module: HasICacheFrontendModule
val frontend = LazyModule(new Frontend(tileParams.icache.get, tileId))
tlMasterXbar.node := TLWidthWidget(tileParams.icache.get.rowBits/8) := frontend.masterNode
connectTLSlave(frontend.slaveNode, tileParams.core.fetchBytes)
frontend.icache.hartIdSinkNodeOpt.foreach { _ := hartIdNexusNode }
frontend.icache.mmioAddressPrefixSinkNodeOpt.foreach { _ := mmioAddressPrefixNexusNode }
frontend.resetVectorSinkNode := resetVectorNexusNode
nPTWPorts += 1
// This should be a None in the case of not having an ITIM address, when we
// don't actually use the device that is instantiated in the frontend.
private val deviceOpt = if (tileParams.icache.get.itimAddr.isDefined) Some(frontend.icache.device) else None
}
trait HasICacheFrontendModule extends CanHavePTWModule {
val outer: HasICacheFrontend
ptwPorts += outer.frontend.module.io.ptw
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File RocketCore.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import chisel3.withClock
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property
import scala.collection.mutable.ArrayBuffer
case class RocketCoreParams(
xLen: Int = 64,
pgLevels: Int = 3, // sv39 default
bootFreqHz: BigInt = 0,
useVM: Boolean = true,
useUser: Boolean = false,
useSupervisor: Boolean = false,
useHypervisor: Boolean = false,
useDebug: Boolean = true,
useAtomics: Boolean = true,
useAtomicsOnlyForIO: Boolean = false,
useCompressed: Boolean = true,
useRVE: Boolean = false,
useConditionalZero: Boolean = false,
useZba: Boolean = false,
useZbb: Boolean = false,
useZbs: Boolean = false,
nLocalInterrupts: Int = 0,
useNMI: Boolean = false,
nBreakpoints: Int = 1,
useBPWatch: Boolean = false,
mcontextWidth: Int = 0,
scontextWidth: Int = 0,
nPMPs: Int = 8,
nPerfCounters: Int = 0,
haveBasicCounters: Boolean = true,
haveCFlush: Boolean = false,
misaWritable: Boolean = true,
nL2TLBEntries: Int = 0,
nL2TLBWays: Int = 1,
nPTECacheEntries: Int = 8,
mtvecInit: Option[BigInt] = Some(BigInt(0)),
mtvecWritable: Boolean = true,
fastLoadWord: Boolean = true,
fastLoadByte: Boolean = false,
branchPredictionModeCSR: Boolean = false,
clockGate: Boolean = false,
mvendorid: Int = 0, // 0 means non-commercial implementation
mimpid: Int = 0x20181004, // release date in BCD
mulDiv: Option[MulDivParams] = Some(MulDivParams()),
fpu: Option[FPUParams] = Some(FPUParams()),
debugROB: Option[DebugROBParams] = None, // if size < 1, SW ROB, else HW ROB
haveCease: Boolean = true, // non-standard CEASE instruction
haveSimTimeout: Boolean = true, // add plusarg for simulation timeout
vector: Option[RocketCoreVectorParams] = None
) extends CoreParams {
val lgPauseCycles = 5
val haveFSDirty = false
val pmpGranularity: Int = if (useHypervisor) 4096 else 4
val fetchWidth: Int = if (useCompressed) 2 else 1
// fetchWidth doubled, but coreInstBytes halved, for RVC:
val decodeWidth: Int = fetchWidth / (if (useCompressed) 2 else 1)
val retireWidth: Int = 1
val instBits: Int = if (useCompressed) 16 else 32
val lrscCycles: Int = 80 // worst case is 14 mispredicted branches + slop
val traceHasWdata: Boolean = debugROB.isDefined // ooo wb, so no wdata in trace
override val useVector = vector.isDefined
override val vectorUseDCache = vector.map(_.useDCache).getOrElse(false)
override def vLen = vector.map(_.vLen).getOrElse(0)
override def eLen = vector.map(_.eLen).getOrElse(0)
override def vfLen = vector.map(_.vfLen).getOrElse(0)
override def vfh = vector.map(_.vfh).getOrElse(false)
override def vExts = vector.map(_.vExts).getOrElse(Nil)
override def vMemDataBits = vector.map(_.vMemDataBits).getOrElse(0)
override val customIsaExt = Option.when(haveCease)("xrocket") // CEASE instruction
override def minFLen: Int = fpu.map(_.minFLen).getOrElse(32)
override def customCSRs(implicit p: Parameters) = new RocketCustomCSRs
}
trait HasRocketCoreParameters extends HasCoreParameters {
lazy val rocketParams: RocketCoreParams = tileParams.core.asInstanceOf[RocketCoreParams]
val fastLoadWord = rocketParams.fastLoadWord
val fastLoadByte = rocketParams.fastLoadByte
val mulDivParams = rocketParams.mulDiv.getOrElse(MulDivParams()) // TODO ask andrew about this
require(!fastLoadByte || fastLoadWord)
require(!rocketParams.haveFSDirty, "rocket doesn't support setting fs dirty from outside, please disable haveFSDirty")
}
class RocketCustomCSRs(implicit p: Parameters) extends CustomCSRs with HasRocketCoreParameters {
override def bpmCSR = {
rocketParams.branchPredictionModeCSR.option(CustomCSR(bpmCSRId, BigInt(1), Some(BigInt(0))))
}
private def haveDCache = tileParams.dcache.get.scratch.isEmpty
override def chickenCSR = {
val mask = BigInt(
tileParams.dcache.get.clockGate.toInt << 0 |
rocketParams.clockGate.toInt << 1 |
rocketParams.clockGate.toInt << 2 |
1 << 3 | // disableSpeculativeICacheRefill
haveDCache.toInt << 9 | // suppressCorruptOnGrantData
tileParams.icache.get.prefetch.toInt << 17
)
Some(CustomCSR(chickenCSRId, mask, Some(mask)))
}
def disableICachePrefetch = getOrElse(chickenCSR, _.value(17), true.B)
def marchid = CustomCSR.constant(CSRs.marchid, BigInt(1))
def mvendorid = CustomCSR.constant(CSRs.mvendorid, BigInt(rocketParams.mvendorid))
// mimpid encodes a release version in the form of a BCD-encoded datestamp.
def mimpid = CustomCSR.constant(CSRs.mimpid, BigInt(rocketParams.mimpid))
override def decls = super.decls :+ marchid :+ mvendorid :+ mimpid
}
class CoreInterrupts(val hasBeu: Boolean)(implicit p: Parameters) extends TileInterrupts()(p) {
val buserror = Option.when(hasBeu)(Bool())
}
trait HasRocketCoreIO extends HasRocketCoreParameters {
implicit val p: Parameters
def nTotalRoCCCSRs: Int
val io = IO(new CoreBundle()(p) {
val hartid = Input(UInt(hartIdLen.W))
val reset_vector = Input(UInt(resetVectorLen.W))
val interrupts = Input(new CoreInterrupts(tileParams.asInstanceOf[RocketTileParams].beuAddr.isDefined))
val imem = new FrontendIO
val dmem = new HellaCacheIO
val ptw = Flipped(new DatapathPTWIO())
val fpu = Flipped(new FPUCoreIO())
val rocc = Flipped(new RoCCCoreIO(nTotalRoCCCSRs))
val trace = Output(new TraceBundle)
val bpwatch = Output(Vec(coreParams.nBreakpoints, new BPWatch(coreParams.retireWidth)))
val cease = Output(Bool())
val wfi = Output(Bool())
val traceStall = Input(Bool())
val vector = if (usingVector) Some(Flipped(new VectorCoreIO)) else None
})
}
class Rocket(tile: RocketTile)(implicit p: Parameters) extends CoreModule()(p)
with HasRocketCoreParameters
with HasRocketCoreIO {
def nTotalRoCCCSRs = tile.roccCSRs.flatten.size
import ALU._
val clock_en_reg = RegInit(true.B)
val long_latency_stall = Reg(Bool())
val id_reg_pause = Reg(Bool())
val imem_might_request_reg = Reg(Bool())
val clock_en = WireDefault(true.B)
val gated_clock =
if (!rocketParams.clockGate) clock
else ClockGate(clock, clock_en, "rocket_clock_gate")
class RocketImpl { // entering gated-clock domain
// performance counters
def pipelineIDToWB[T <: Data](x: T): T =
RegEnable(RegEnable(RegEnable(x, !ctrl_killd), ex_pc_valid), mem_pc_valid)
val perfEvents = new EventSets(Seq(
new EventSet((mask, hits) => Mux(wb_xcpt, mask(0), wb_valid && pipelineIDToWB((mask & hits).orR)), Seq(
("exception", () => false.B),
("load", () => id_ctrl.mem && id_ctrl.mem_cmd === M_XRD && !id_ctrl.fp),
("store", () => id_ctrl.mem && id_ctrl.mem_cmd === M_XWR && !id_ctrl.fp),
("amo", () => usingAtomics.B && id_ctrl.mem && (isAMO(id_ctrl.mem_cmd) || id_ctrl.mem_cmd.isOneOf(M_XLR, M_XSC))),
("system", () => id_ctrl.csr =/= CSR.N),
("arith", () => id_ctrl.wxd && !(id_ctrl.jal || id_ctrl.jalr || id_ctrl.mem || id_ctrl.fp || id_ctrl.mul || id_ctrl.div || id_ctrl.csr =/= CSR.N)),
("branch", () => id_ctrl.branch),
("jal", () => id_ctrl.jal),
("jalr", () => id_ctrl.jalr))
++ (if (!usingMulDiv) Seq() else Seq(
("mul", () => if (pipelinedMul) id_ctrl.mul else id_ctrl.div && (id_ctrl.alu_fn & FN_DIV) =/= FN_DIV),
("div", () => if (pipelinedMul) id_ctrl.div else id_ctrl.div && (id_ctrl.alu_fn & FN_DIV) === FN_DIV)))
++ (if (!usingFPU) Seq() else Seq(
("fp load", () => id_ctrl.fp && io.fpu.dec.ldst && io.fpu.dec.wen),
("fp store", () => id_ctrl.fp && io.fpu.dec.ldst && !io.fpu.dec.wen),
("fp add", () => id_ctrl.fp && io.fpu.dec.fma && io.fpu.dec.swap23),
("fp mul", () => id_ctrl.fp && io.fpu.dec.fma && !io.fpu.dec.swap23 && !io.fpu.dec.ren3),
("fp mul-add", () => id_ctrl.fp && io.fpu.dec.fma && io.fpu.dec.ren3),
("fp div/sqrt", () => id_ctrl.fp && (io.fpu.dec.div || io.fpu.dec.sqrt)),
("fp other", () => id_ctrl.fp && !(io.fpu.dec.ldst || io.fpu.dec.fma || io.fpu.dec.div || io.fpu.dec.sqrt))))),
new EventSet((mask, hits) => (mask & hits).orR, Seq(
("load-use interlock", () => id_ex_hazard && ex_ctrl.mem || id_mem_hazard && mem_ctrl.mem || id_wb_hazard && wb_ctrl.mem),
("long-latency interlock", () => id_sboard_hazard),
("csr interlock", () => id_ex_hazard && ex_ctrl.csr =/= CSR.N || id_mem_hazard && mem_ctrl.csr =/= CSR.N || id_wb_hazard && wb_ctrl.csr =/= CSR.N),
("I$ blocked", () => icache_blocked),
("D$ blocked", () => id_ctrl.mem && dcache_blocked),
("branch misprediction", () => take_pc_mem && mem_direction_misprediction),
("control-flow target misprediction", () => take_pc_mem && mem_misprediction && mem_cfi && !mem_direction_misprediction && !icache_blocked),
("flush", () => wb_reg_flush_pipe),
("replay", () => replay_wb))
++ (if (!usingMulDiv) Seq() else Seq(
("mul/div interlock", () => id_ex_hazard && (ex_ctrl.mul || ex_ctrl.div) || id_mem_hazard && (mem_ctrl.mul || mem_ctrl.div) || id_wb_hazard && wb_ctrl.div)))
++ (if (!usingFPU) Seq() else Seq(
("fp interlock", () => id_ex_hazard && ex_ctrl.fp || id_mem_hazard && mem_ctrl.fp || id_wb_hazard && wb_ctrl.fp || id_ctrl.fp && id_stall_fpu)))),
new EventSet((mask, hits) => (mask & hits).orR, Seq(
("I$ miss", () => io.imem.perf.acquire),
("D$ miss", () => io.dmem.perf.acquire),
("D$ release", () => io.dmem.perf.release),
("ITLB miss", () => io.imem.perf.tlbMiss),
("DTLB miss", () => io.dmem.perf.tlbMiss),
("L2 TLB miss", () => io.ptw.perf.l2miss)))))
val pipelinedMul = usingMulDiv && mulDivParams.mulUnroll == xLen
val decode_table = {
(if (usingMulDiv) new MDecode(pipelinedMul) +: (xLen > 32).option(new M64Decode(pipelinedMul)).toSeq else Nil) ++:
(if (usingAtomics) new ADecode +: (xLen > 32).option(new A64Decode).toSeq else Nil) ++:
(if (fLen >= 32) new FDecode +: (xLen > 32).option(new F64Decode).toSeq else Nil) ++:
(if (fLen >= 64) new DDecode +: (xLen > 32).option(new D64Decode).toSeq else Nil) ++:
(if (minFLen == 16) new HDecode +: (xLen > 32).option(new H64Decode).toSeq ++: (fLen >= 64).option(new HDDecode).toSeq else Nil) ++:
(usingRoCC.option(new RoCCDecode)) ++:
(if (xLen == 32) new I32Decode else new I64Decode) +:
(usingVM.option(new SVMDecode)) ++:
(usingSupervisor.option(new SDecode)) ++:
(usingHypervisor.option(new HypervisorDecode)) ++:
((usingHypervisor && (xLen == 64)).option(new Hypervisor64Decode)) ++:
(usingDebug.option(new DebugDecode)) ++:
(usingNMI.option(new NMIDecode)) ++:
(usingConditionalZero.option(new ConditionalZeroDecode)) ++:
Seq(new FenceIDecode(tile.dcache.flushOnFenceI)) ++:
coreParams.haveCFlush.option(new CFlushDecode(tile.dcache.canSupportCFlushLine)) ++:
rocketParams.haveCease.option(new CeaseDecode) ++:
usingVector.option(new VCFGDecode) ++:
(if (coreParams.useZba) new ZbaDecode +: (xLen > 32).option(new Zba64Decode).toSeq else Nil) ++:
(if (coreParams.useZbb) Seq(new ZbbDecode, if (xLen == 32) new Zbb32Decode else new Zbb64Decode) else Nil) ++:
coreParams.useZbs.option(new ZbsDecode) ++:
Seq(new IDecode)
} flatMap(_.table)
val ex_ctrl = Reg(new IntCtrlSigs)
val mem_ctrl = Reg(new IntCtrlSigs)
val wb_ctrl = Reg(new IntCtrlSigs)
val ex_reg_xcpt_interrupt = Reg(Bool())
val ex_reg_valid = Reg(Bool())
val ex_reg_rvc = Reg(Bool())
val ex_reg_btb_resp = Reg(new BTBResp)
val ex_reg_xcpt = Reg(Bool())
val ex_reg_flush_pipe = Reg(Bool())
val ex_reg_load_use = Reg(Bool())
val ex_reg_cause = Reg(UInt())
val ex_reg_replay = Reg(Bool())
val ex_reg_pc = Reg(UInt())
val ex_reg_mem_size = Reg(UInt())
val ex_reg_hls = Reg(Bool())
val ex_reg_inst = Reg(Bits())
val ex_reg_raw_inst = Reg(UInt())
val ex_reg_wphit = Reg(Vec(nBreakpoints, Bool()))
val ex_reg_set_vconfig = Reg(Bool())
val mem_reg_xcpt_interrupt = Reg(Bool())
val mem_reg_valid = Reg(Bool())
val mem_reg_rvc = Reg(Bool())
val mem_reg_btb_resp = Reg(new BTBResp)
val mem_reg_xcpt = Reg(Bool())
val mem_reg_replay = Reg(Bool())
val mem_reg_flush_pipe = Reg(Bool())
val mem_reg_cause = Reg(UInt())
val mem_reg_slow_bypass = Reg(Bool())
val mem_reg_load = Reg(Bool())
val mem_reg_store = Reg(Bool())
val mem_reg_set_vconfig = Reg(Bool())
val mem_reg_sfence = Reg(Bool())
val mem_reg_pc = Reg(UInt())
val mem_reg_inst = Reg(Bits())
val mem_reg_mem_size = Reg(UInt())
val mem_reg_hls_or_dv = Reg(Bool())
val mem_reg_raw_inst = Reg(UInt())
val mem_reg_wdata = Reg(Bits())
val mem_reg_rs2 = Reg(Bits())
val mem_br_taken = Reg(Bool())
val take_pc_mem = Wire(Bool())
val mem_reg_wphit = Reg(Vec(nBreakpoints, Bool()))
val wb_reg_valid = Reg(Bool())
val wb_reg_xcpt = Reg(Bool())
val wb_reg_replay = Reg(Bool())
val wb_reg_flush_pipe = Reg(Bool())
val wb_reg_cause = Reg(UInt())
val wb_reg_set_vconfig = Reg(Bool())
val wb_reg_sfence = Reg(Bool())
val wb_reg_pc = Reg(UInt())
val wb_reg_mem_size = Reg(UInt())
val wb_reg_hls_or_dv = Reg(Bool())
val wb_reg_hfence_v = Reg(Bool())
val wb_reg_hfence_g = Reg(Bool())
val wb_reg_inst = Reg(Bits())
val wb_reg_raw_inst = Reg(UInt())
val wb_reg_wdata = Reg(Bits())
val wb_reg_rs2 = Reg(Bits())
val take_pc_wb = Wire(Bool())
val wb_reg_wphit = Reg(Vec(nBreakpoints, Bool()))
val take_pc_mem_wb = take_pc_wb || take_pc_mem
val take_pc = take_pc_mem_wb
// decode stage
val ibuf = Module(new IBuf)
val id_expanded_inst = ibuf.io.inst.map(_.bits.inst)
val id_raw_inst = ibuf.io.inst.map(_.bits.raw)
val id_inst = id_expanded_inst.map(_.bits)
ibuf.io.imem <> io.imem.resp
ibuf.io.kill := take_pc
require(decodeWidth == 1 /* TODO */ && retireWidth == decodeWidth)
require(!(coreParams.useRVE && coreParams.fpu.nonEmpty), "Can't select both RVE and floating-point")
require(!(coreParams.useRVE && coreParams.useHypervisor), "Can't select both RVE and Hypervisor")
val id_ctrl = Wire(new IntCtrlSigs).decode(id_inst(0), decode_table)
val lgNXRegs = if (coreParams.useRVE) 4 else 5
val regAddrMask = (1 << lgNXRegs) - 1
def decodeReg(x: UInt) = (x.extract(x.getWidth-1, lgNXRegs).asBool, x(lgNXRegs-1, 0))
val (id_raddr3_illegal, id_raddr3) = decodeReg(id_expanded_inst(0).rs3)
val (id_raddr2_illegal, id_raddr2) = decodeReg(id_expanded_inst(0).rs2)
val (id_raddr1_illegal, id_raddr1) = decodeReg(id_expanded_inst(0).rs1)
val (id_waddr_illegal, id_waddr) = decodeReg(id_expanded_inst(0).rd)
val id_load_use = Wire(Bool())
val id_reg_fence = RegInit(false.B)
val id_ren = IndexedSeq(id_ctrl.rxs1, id_ctrl.rxs2)
val id_raddr = IndexedSeq(id_raddr1, id_raddr2)
val rf = new RegFile(regAddrMask, xLen)
val id_rs = id_raddr.map(rf.read _)
val ctrl_killd = Wire(Bool())
val id_npc = (ibuf.io.pc.asSInt + ImmGen(IMM_UJ, id_inst(0))).asUInt
val csr = Module(new CSRFile(perfEvents, coreParams.customCSRs.decls, tile.roccCSRs.flatten, tile.rocketParams.beuAddr.isDefined))
val id_csr_en = id_ctrl.csr.isOneOf(CSR.S, CSR.C, CSR.W)
val id_system_insn = id_ctrl.csr === CSR.I
val id_csr_ren = id_ctrl.csr.isOneOf(CSR.S, CSR.C) && id_expanded_inst(0).rs1 === 0.U
val id_csr = Mux(id_system_insn && id_ctrl.mem, CSR.N, Mux(id_csr_ren, CSR.R, id_ctrl.csr))
val id_csr_flush = id_system_insn || (id_csr_en && !id_csr_ren && csr.io.decode(0).write_flush)
val id_set_vconfig = Seq(Instructions.VSETVLI, Instructions.VSETIVLI, Instructions.VSETVL).map(_ === id_inst(0)).orR && usingVector.B
id_ctrl.vec := false.B
if (usingVector) {
val v_decode = rocketParams.vector.get.decoder(p)
v_decode.io.inst := id_inst(0)
v_decode.io.vconfig := csr.io.vector.get.vconfig
when (v_decode.io.legal) {
id_ctrl.legal := !csr.io.vector.get.vconfig.vtype.vill
id_ctrl.fp := v_decode.io.fp
id_ctrl.rocc := false.B
id_ctrl.branch := false.B
id_ctrl.jal := false.B
id_ctrl.jalr := false.B
id_ctrl.rxs2 := v_decode.io.read_rs2
id_ctrl.rxs1 := v_decode.io.read_rs1
id_ctrl.mem := false.B
id_ctrl.rfs1 := v_decode.io.read_frs1
id_ctrl.rfs2 := false.B
id_ctrl.rfs3 := false.B
id_ctrl.wfd := v_decode.io.write_frd
id_ctrl.mul := false.B
id_ctrl.div := false.B
id_ctrl.wxd := v_decode.io.write_rd
id_ctrl.csr := CSR.N
id_ctrl.fence_i := false.B
id_ctrl.fence := false.B
id_ctrl.amo := false.B
id_ctrl.dp := false.B
id_ctrl.vec := true.B
}
}
val id_illegal_insn = !id_ctrl.legal ||
(id_ctrl.mul || id_ctrl.div) && !csr.io.status.isa('m'-'a') ||
id_ctrl.amo && !csr.io.status.isa('a'-'a') ||
id_ctrl.fp && (csr.io.decode(0).fp_illegal || (io.fpu.illegal_rm && !id_ctrl.vec)) ||
(id_ctrl.vec) && (csr.io.decode(0).vector_illegal || csr.io.vector.map(_.vconfig.vtype.vill).getOrElse(false.B)) ||
id_ctrl.dp && !csr.io.status.isa('d'-'a') ||
ibuf.io.inst(0).bits.rvc && !csr.io.status.isa('c'-'a') ||
id_raddr2_illegal && id_ctrl.rxs2 ||
id_raddr1_illegal && id_ctrl.rxs1 ||
id_waddr_illegal && id_ctrl.wxd ||
id_ctrl.rocc && csr.io.decode(0).rocc_illegal ||
id_csr_en && (csr.io.decode(0).read_illegal || !id_csr_ren && csr.io.decode(0).write_illegal) ||
!ibuf.io.inst(0).bits.rvc && (id_system_insn && csr.io.decode(0).system_illegal)
val id_virtual_insn = id_ctrl.legal &&
((id_csr_en && !(!id_csr_ren && csr.io.decode(0).write_illegal) && csr.io.decode(0).virtual_access_illegal) ||
(!ibuf.io.inst(0).bits.rvc && id_system_insn && csr.io.decode(0).virtual_system_illegal))
// stall decode for fences (now, for AMO.rl; later, for AMO.aq and FENCE)
val id_amo_aq = id_inst(0)(26)
val id_amo_rl = id_inst(0)(25)
val id_fence_pred = id_inst(0)(27,24)
val id_fence_succ = id_inst(0)(23,20)
val id_fence_next = id_ctrl.fence || id_ctrl.amo && id_amo_aq
val id_mem_busy = !io.dmem.ordered || io.dmem.req.valid
when (!id_mem_busy) { id_reg_fence := false.B }
val id_rocc_busy = usingRoCC.B &&
(io.rocc.busy || ex_reg_valid && ex_ctrl.rocc ||
mem_reg_valid && mem_ctrl.rocc || wb_reg_valid && wb_ctrl.rocc)
val id_csr_rocc_write = tile.roccCSRs.flatten.map(_.id.U === id_inst(0)(31,20)).orR && id_csr_en && !id_csr_ren
val id_vec_busy = io.vector.map(v => v.backend_busy || v.trap_check_busy).getOrElse(false.B)
val id_do_fence = WireDefault(id_rocc_busy && (id_ctrl.fence || id_csr_rocc_write) ||
id_vec_busy && id_ctrl.fence ||
id_mem_busy && (id_ctrl.amo && id_amo_rl || id_ctrl.fence_i || id_reg_fence && (id_ctrl.mem || id_ctrl.rocc)))
val bpu = Module(new BreakpointUnit(nBreakpoints))
bpu.io.status := csr.io.status
bpu.io.bp := csr.io.bp
bpu.io.pc := ibuf.io.pc
bpu.io.ea := mem_reg_wdata
bpu.io.mcontext := csr.io.mcontext
bpu.io.scontext := csr.io.scontext
val id_xcpt0 = ibuf.io.inst(0).bits.xcpt0
val id_xcpt1 = ibuf.io.inst(0).bits.xcpt1
val (id_xcpt, id_cause) = checkExceptions(List(
(csr.io.interrupt, csr.io.interrupt_cause),
(bpu.io.debug_if, CSR.debugTriggerCause.U),
(bpu.io.xcpt_if, Causes.breakpoint.U),
(id_xcpt0.pf.inst, Causes.fetch_page_fault.U),
(id_xcpt0.gf.inst, Causes.fetch_guest_page_fault.U),
(id_xcpt0.ae.inst, Causes.fetch_access.U),
(id_xcpt1.pf.inst, Causes.fetch_page_fault.U),
(id_xcpt1.gf.inst, Causes.fetch_guest_page_fault.U),
(id_xcpt1.ae.inst, Causes.fetch_access.U),
(id_virtual_insn, Causes.virtual_instruction.U),
(id_illegal_insn, Causes.illegal_instruction.U)))
val idCoverCauses = List(
(CSR.debugTriggerCause, "DEBUG_TRIGGER"),
(Causes.breakpoint, "BREAKPOINT"),
(Causes.fetch_access, "FETCH_ACCESS"),
(Causes.illegal_instruction, "ILLEGAL_INSTRUCTION")
) ++ (if (usingVM) List(
(Causes.fetch_page_fault, "FETCH_PAGE_FAULT")
) else Nil)
coverExceptions(id_xcpt, id_cause, "DECODE", idCoverCauses)
val dcache_bypass_data =
if (fastLoadByte) io.dmem.resp.bits.data(xLen-1, 0)
else if (fastLoadWord) io.dmem.resp.bits.data_word_bypass(xLen-1, 0)
else wb_reg_wdata
// detect bypass opportunities
val ex_waddr = ex_reg_inst(11,7) & regAddrMask.U
val mem_waddr = mem_reg_inst(11,7) & regAddrMask.U
val wb_waddr = wb_reg_inst(11,7) & regAddrMask.U
val bypass_sources = IndexedSeq(
(true.B, 0.U, 0.U), // treat reading x0 as a bypass
(ex_reg_valid && ex_ctrl.wxd, ex_waddr, mem_reg_wdata),
(mem_reg_valid && mem_ctrl.wxd && !mem_ctrl.mem, mem_waddr, wb_reg_wdata),
(mem_reg_valid && mem_ctrl.wxd, mem_waddr, dcache_bypass_data))
val id_bypass_src = id_raddr.map(raddr => bypass_sources.map(s => s._1 && s._2 === raddr))
// execute stage
val bypass_mux = bypass_sources.map(_._3)
val ex_reg_rs_bypass = Reg(Vec(id_raddr.size, Bool()))
val ex_reg_rs_lsb = Reg(Vec(id_raddr.size, UInt(log2Ceil(bypass_sources.size).W)))
val ex_reg_rs_msb = Reg(Vec(id_raddr.size, UInt()))
val ex_rs = for (i <- 0 until id_raddr.size)
yield Mux(ex_reg_rs_bypass(i), bypass_mux(ex_reg_rs_lsb(i)), Cat(ex_reg_rs_msb(i), ex_reg_rs_lsb(i)))
val ex_imm = ImmGen(ex_ctrl.sel_imm, ex_reg_inst)
val ex_rs1shl = Mux(ex_reg_inst(3), ex_rs(0)(31,0), ex_rs(0)) << ex_reg_inst(14,13)
val ex_op1 = MuxLookup(ex_ctrl.sel_alu1, 0.S)(Seq(
A1_RS1 -> ex_rs(0).asSInt,
A1_PC -> ex_reg_pc.asSInt,
A1_RS1SHL -> (if (rocketParams.useZba) ex_rs1shl.asSInt else 0.S)
))
val ex_op2_oh = UIntToOH(Mux(ex_ctrl.sel_alu2(0), (ex_reg_inst >> 20).asUInt, ex_rs(1))(log2Ceil(xLen)-1,0)).asSInt
val ex_op2 = MuxLookup(ex_ctrl.sel_alu2, 0.S)(Seq(
A2_RS2 -> ex_rs(1).asSInt,
A2_IMM -> ex_imm,
A2_SIZE -> Mux(ex_reg_rvc, 2.S, 4.S),
) ++ (if (coreParams.useZbs) Seq(
A2_RS2OH -> ex_op2_oh,
A2_IMMOH -> ex_op2_oh,
) else Nil))
val (ex_new_vl, ex_new_vconfig) = if (usingVector) {
val ex_new_vtype = VType.fromUInt(MuxCase(ex_rs(1), Seq(
ex_reg_inst(31,30).andR -> ex_reg_inst(29,20),
!ex_reg_inst(31) -> ex_reg_inst(30,20))))
val ex_avl = Mux(ex_ctrl.rxs1,
Mux(ex_reg_inst(19,15) === 0.U,
Mux(ex_reg_inst(11,7) === 0.U, csr.io.vector.get.vconfig.vl, ex_new_vtype.vlMax),
ex_rs(0)
),
ex_reg_inst(19,15))
val ex_new_vl = ex_new_vtype.vl(ex_avl, csr.io.vector.get.vconfig.vl, false.B, false.B, false.B)
val ex_new_vconfig = Wire(new VConfig)
ex_new_vconfig.vtype := ex_new_vtype
ex_new_vconfig.vl := ex_new_vl
(Some(ex_new_vl), Some(ex_new_vconfig))
} else { (None, None) }
val alu = Module(new ALU)
alu.io.dw := ex_ctrl.alu_dw
alu.io.fn := ex_ctrl.alu_fn
alu.io.in2 := ex_op2.asUInt
alu.io.in1 := ex_op1.asUInt
// multiplier and divider
val div = Module(new MulDiv(if (pipelinedMul) mulDivParams.copy(mulUnroll = 0) else mulDivParams, width = xLen))
div.io.req.valid := ex_reg_valid && ex_ctrl.div
div.io.req.bits.dw := ex_ctrl.alu_dw
div.io.req.bits.fn := ex_ctrl.alu_fn
div.io.req.bits.in1 := ex_rs(0)
div.io.req.bits.in2 := ex_rs(1)
div.io.req.bits.tag := ex_waddr
val mul = pipelinedMul.option {
val m = Module(new PipelinedMultiplier(xLen, 2))
m.io.req.valid := ex_reg_valid && ex_ctrl.mul
m.io.req.bits := div.io.req.bits
m
}
ex_reg_valid := !ctrl_killd
ex_reg_replay := !take_pc && ibuf.io.inst(0).valid && ibuf.io.inst(0).bits.replay
ex_reg_xcpt := !ctrl_killd && id_xcpt
ex_reg_xcpt_interrupt := !take_pc && ibuf.io.inst(0).valid && csr.io.interrupt
when (!ctrl_killd) {
ex_ctrl := id_ctrl
ex_reg_rvc := ibuf.io.inst(0).bits.rvc
ex_ctrl.csr := id_csr
when (id_ctrl.fence && id_fence_succ === 0.U) { id_reg_pause := true.B }
when (id_fence_next) { id_reg_fence := true.B }
when (id_xcpt) { // pass PC down ALU writeback pipeline for badaddr
ex_ctrl.alu_fn := FN_ADD
ex_ctrl.alu_dw := DW_XPR
ex_ctrl.sel_alu1 := A1_RS1 // badaddr := instruction
ex_ctrl.sel_alu2 := A2_ZERO
when (id_xcpt1.asUInt.orR) { // badaddr := PC+2
ex_ctrl.sel_alu1 := A1_PC
ex_ctrl.sel_alu2 := A2_SIZE
ex_reg_rvc := true.B
}
when (bpu.io.xcpt_if || id_xcpt0.asUInt.orR) { // badaddr := PC
ex_ctrl.sel_alu1 := A1_PC
ex_ctrl.sel_alu2 := A2_ZERO
}
}
ex_reg_flush_pipe := id_ctrl.fence_i || id_csr_flush
ex_reg_load_use := id_load_use
ex_reg_hls := usingHypervisor.B && id_system_insn && id_ctrl.mem_cmd.isOneOf(M_XRD, M_XWR, M_HLVX)
ex_reg_mem_size := Mux(usingHypervisor.B && id_system_insn, id_inst(0)(27, 26), id_inst(0)(13, 12))
when (id_ctrl.mem_cmd.isOneOf(M_SFENCE, M_HFENCEV, M_HFENCEG, M_FLUSH_ALL)) {
ex_reg_mem_size := Cat(id_raddr2 =/= 0.U, id_raddr1 =/= 0.U)
}
when (id_ctrl.mem_cmd === M_SFENCE && csr.io.status.v) {
ex_ctrl.mem_cmd := M_HFENCEV
}
if (tile.dcache.flushOnFenceI) {
when (id_ctrl.fence_i) {
ex_reg_mem_size := 0.U
}
}
for (i <- 0 until id_raddr.size) {
val do_bypass = id_bypass_src(i).reduce(_||_)
val bypass_src = PriorityEncoder(id_bypass_src(i))
ex_reg_rs_bypass(i) := do_bypass
ex_reg_rs_lsb(i) := bypass_src
when (id_ren(i) && !do_bypass) {
ex_reg_rs_lsb(i) := id_rs(i)(log2Ceil(bypass_sources.size)-1, 0)
ex_reg_rs_msb(i) := id_rs(i) >> log2Ceil(bypass_sources.size)
}
}
when (id_illegal_insn || id_virtual_insn) {
val inst = Mux(ibuf.io.inst(0).bits.rvc, id_raw_inst(0)(15, 0), id_raw_inst(0))
ex_reg_rs_bypass(0) := false.B
ex_reg_rs_lsb(0) := inst(log2Ceil(bypass_sources.size)-1, 0)
ex_reg_rs_msb(0) := inst >> log2Ceil(bypass_sources.size)
}
}
when (!ctrl_killd || csr.io.interrupt || ibuf.io.inst(0).bits.replay) {
ex_reg_cause := id_cause
ex_reg_inst := id_inst(0)
ex_reg_raw_inst := id_raw_inst(0)
ex_reg_pc := ibuf.io.pc
ex_reg_btb_resp := ibuf.io.btb_resp
ex_reg_wphit := bpu.io.bpwatch.map { bpw => bpw.ivalid(0) }
ex_reg_set_vconfig := id_set_vconfig && !id_xcpt
}
// replay inst in ex stage?
val ex_pc_valid = ex_reg_valid || ex_reg_replay || ex_reg_xcpt_interrupt
val wb_dcache_miss = wb_ctrl.mem && !io.dmem.resp.valid
val replay_ex_structural = ex_ctrl.mem && !io.dmem.req.ready ||
ex_ctrl.div && !div.io.req.ready ||
ex_ctrl.vec && !io.vector.map(_.ex.ready).getOrElse(true.B)
val replay_ex_load_use = wb_dcache_miss && ex_reg_load_use
val replay_ex = ex_reg_replay || (ex_reg_valid && (replay_ex_structural || replay_ex_load_use))
val ctrl_killx = take_pc_mem_wb || replay_ex || !ex_reg_valid
// detect 2-cycle load-use delay for LB/LH/SC
val ex_slow_bypass = ex_ctrl.mem_cmd === M_XSC || ex_reg_mem_size < 2.U
val ex_sfence = usingVM.B && ex_ctrl.mem && (ex_ctrl.mem_cmd === M_SFENCE || ex_ctrl.mem_cmd === M_HFENCEV || ex_ctrl.mem_cmd === M_HFENCEG)
val (ex_xcpt, ex_cause) = checkExceptions(List(
(ex_reg_xcpt_interrupt || ex_reg_xcpt, ex_reg_cause)))
val exCoverCauses = idCoverCauses
coverExceptions(ex_xcpt, ex_cause, "EXECUTE", exCoverCauses)
// memory stage
val mem_pc_valid = mem_reg_valid || mem_reg_replay || mem_reg_xcpt_interrupt
val mem_br_target = mem_reg_pc.asSInt +
Mux(mem_ctrl.branch && mem_br_taken, ImmGen(IMM_SB, mem_reg_inst),
Mux(mem_ctrl.jal, ImmGen(IMM_UJ, mem_reg_inst),
Mux(mem_reg_rvc, 2.S, 4.S)))
val mem_npc = (Mux(mem_ctrl.jalr || mem_reg_sfence, encodeVirtualAddress(mem_reg_wdata, mem_reg_wdata).asSInt, mem_br_target) & (-2).S).asUInt
val mem_wrong_npc =
Mux(ex_pc_valid, mem_npc =/= ex_reg_pc,
Mux(ibuf.io.inst(0).valid || ibuf.io.imem.valid, mem_npc =/= ibuf.io.pc, true.B))
val mem_npc_misaligned = !csr.io.status.isa('c'-'a') && mem_npc(1) && !mem_reg_sfence
val mem_int_wdata = Mux(!mem_reg_xcpt && (mem_ctrl.jalr ^ mem_npc_misaligned), mem_br_target, mem_reg_wdata.asSInt).asUInt
val mem_cfi = mem_ctrl.branch || mem_ctrl.jalr || mem_ctrl.jal
val mem_cfi_taken = (mem_ctrl.branch && mem_br_taken) || mem_ctrl.jalr || mem_ctrl.jal
val mem_direction_misprediction = mem_ctrl.branch && mem_br_taken =/= (usingBTB.B && mem_reg_btb_resp.taken)
val mem_misprediction = if (usingBTB) mem_wrong_npc else mem_cfi_taken
take_pc_mem := mem_reg_valid && !mem_reg_xcpt && (mem_misprediction || mem_reg_sfence)
mem_reg_valid := !ctrl_killx
mem_reg_replay := !take_pc_mem_wb && replay_ex
mem_reg_xcpt := !ctrl_killx && ex_xcpt
mem_reg_xcpt_interrupt := !take_pc_mem_wb && ex_reg_xcpt_interrupt
// on pipeline flushes, cause mem_npc to hold the sequential npc, which
// will drive the W-stage npc mux
when (mem_reg_valid && mem_reg_flush_pipe) {
mem_reg_sfence := false.B
}.elsewhen (ex_pc_valid) {
mem_ctrl := ex_ctrl
mem_reg_rvc := ex_reg_rvc
mem_reg_load := ex_ctrl.mem && isRead(ex_ctrl.mem_cmd)
mem_reg_store := ex_ctrl.mem && isWrite(ex_ctrl.mem_cmd)
mem_reg_sfence := ex_sfence
mem_reg_btb_resp := ex_reg_btb_resp
mem_reg_flush_pipe := ex_reg_flush_pipe
mem_reg_slow_bypass := ex_slow_bypass
mem_reg_wphit := ex_reg_wphit
mem_reg_set_vconfig := ex_reg_set_vconfig
mem_reg_cause := ex_cause
mem_reg_inst := ex_reg_inst
mem_reg_raw_inst := ex_reg_raw_inst
mem_reg_mem_size := ex_reg_mem_size
mem_reg_hls_or_dv := io.dmem.req.bits.dv
mem_reg_pc := ex_reg_pc
// IDecode ensured they are 1H
mem_reg_wdata := Mux(ex_reg_set_vconfig, ex_new_vl.getOrElse(alu.io.out), alu.io.out)
mem_br_taken := alu.io.cmp_out
when (ex_ctrl.rxs2 && (ex_ctrl.mem || ex_ctrl.rocc || ex_sfence)) {
val size = Mux(ex_ctrl.rocc, log2Ceil(xLen/8).U, ex_reg_mem_size)
mem_reg_rs2 := new StoreGen(size, 0.U, ex_rs(1), coreDataBytes).data
}
if (usingVector) { when (ex_reg_set_vconfig) {
mem_reg_rs2 := ex_new_vconfig.get.asUInt
} }
when (ex_ctrl.jalr && csr.io.status.debug) {
// flush I$ on D-mode JALR to effect uncached fetch without D$ flush
mem_ctrl.fence_i := true.B
mem_reg_flush_pipe := true.B
}
}
val mem_breakpoint = (mem_reg_load && bpu.io.xcpt_ld) || (mem_reg_store && bpu.io.xcpt_st)
val mem_debug_breakpoint = (mem_reg_load && bpu.io.debug_ld) || (mem_reg_store && bpu.io.debug_st)
val (mem_ldst_xcpt, mem_ldst_cause) = checkExceptions(List(
(mem_debug_breakpoint, CSR.debugTriggerCause.U),
(mem_breakpoint, Causes.breakpoint.U)))
val (mem_xcpt, mem_cause) = checkExceptions(List(
(mem_reg_xcpt_interrupt || mem_reg_xcpt, mem_reg_cause),
(mem_reg_valid && mem_npc_misaligned, Causes.misaligned_fetch.U),
(mem_reg_valid && mem_ldst_xcpt, mem_ldst_cause)))
val memCoverCauses = (exCoverCauses ++ List(
(CSR.debugTriggerCause, "DEBUG_TRIGGER"),
(Causes.breakpoint, "BREAKPOINT"),
(Causes.misaligned_fetch, "MISALIGNED_FETCH")
)).distinct
coverExceptions(mem_xcpt, mem_cause, "MEMORY", memCoverCauses)
val dcache_kill_mem = mem_reg_valid && mem_ctrl.wxd && io.dmem.replay_next // structural hazard on writeback port
val fpu_kill_mem = mem_reg_valid && mem_ctrl.fp && io.fpu.nack_mem
val vec_kill_mem = mem_reg_valid && mem_ctrl.mem && io.vector.map(_.mem.block_mem).getOrElse(false.B)
val vec_kill_all = mem_reg_valid && io.vector.map(_.mem.block_all).getOrElse(false.B)
val replay_mem = dcache_kill_mem || mem_reg_replay || fpu_kill_mem || vec_kill_mem || vec_kill_all
val killm_common = dcache_kill_mem || take_pc_wb || mem_reg_xcpt || !mem_reg_valid
div.io.kill := killm_common && RegNext(div.io.req.fire)
val ctrl_killm = killm_common || mem_xcpt || fpu_kill_mem || vec_kill_mem
// writeback stage
wb_reg_valid := !ctrl_killm
wb_reg_replay := replay_mem && !take_pc_wb
wb_reg_xcpt := mem_xcpt && !take_pc_wb && !io.vector.map(_.mem.block_all).getOrElse(false.B)
wb_reg_flush_pipe := !ctrl_killm && mem_reg_flush_pipe
when (mem_pc_valid) {
wb_ctrl := mem_ctrl
wb_reg_sfence := mem_reg_sfence
wb_reg_wdata := Mux(!mem_reg_xcpt && mem_ctrl.fp && mem_ctrl.wxd, io.fpu.toint_data, mem_int_wdata)
when (mem_ctrl.rocc || mem_reg_sfence || mem_reg_set_vconfig) {
wb_reg_rs2 := mem_reg_rs2
}
wb_reg_cause := mem_cause
wb_reg_inst := mem_reg_inst
wb_reg_raw_inst := mem_reg_raw_inst
wb_reg_mem_size := mem_reg_mem_size
wb_reg_hls_or_dv := mem_reg_hls_or_dv
wb_reg_hfence_v := mem_ctrl.mem_cmd === M_HFENCEV
wb_reg_hfence_g := mem_ctrl.mem_cmd === M_HFENCEG
wb_reg_pc := mem_reg_pc
wb_reg_wphit := mem_reg_wphit | bpu.io.bpwatch.map { bpw => (bpw.rvalid(0) && mem_reg_load) || (bpw.wvalid(0) && mem_reg_store) }
wb_reg_set_vconfig := mem_reg_set_vconfig
}
val (wb_xcpt, wb_cause) = checkExceptions(List(
(wb_reg_xcpt, wb_reg_cause),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.pf.st, Causes.store_page_fault.U),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.pf.ld, Causes.load_page_fault.U),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.gf.st, Causes.store_guest_page_fault.U),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.gf.ld, Causes.load_guest_page_fault.U),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ae.st, Causes.store_access.U),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ae.ld, Causes.load_access.U),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ma.st, Causes.misaligned_store.U),
(wb_reg_valid && wb_ctrl.mem && io.dmem.s2_xcpt.ma.ld, Causes.misaligned_load.U)
))
val wbCoverCauses = List(
(Causes.misaligned_store, "MISALIGNED_STORE"),
(Causes.misaligned_load, "MISALIGNED_LOAD"),
(Causes.store_access, "STORE_ACCESS"),
(Causes.load_access, "LOAD_ACCESS")
) ++ (if(usingVM) List(
(Causes.store_page_fault, "STORE_PAGE_FAULT"),
(Causes.load_page_fault, "LOAD_PAGE_FAULT")
) else Nil) ++ (if (usingHypervisor) List(
(Causes.store_guest_page_fault, "STORE_GUEST_PAGE_FAULT"),
(Causes.load_guest_page_fault, "LOAD_GUEST_PAGE_FAULT"),
) else Nil)
coverExceptions(wb_xcpt, wb_cause, "WRITEBACK", wbCoverCauses)
val wb_pc_valid = wb_reg_valid || wb_reg_replay || wb_reg_xcpt
val wb_wxd = wb_reg_valid && wb_ctrl.wxd
val wb_set_sboard = wb_ctrl.div || wb_dcache_miss || wb_ctrl.rocc || wb_ctrl.vec
val replay_wb_common = io.dmem.s2_nack || wb_reg_replay
val replay_wb_rocc = wb_reg_valid && wb_ctrl.rocc && !io.rocc.cmd.ready
val replay_wb_csr: Bool = wb_reg_valid && csr.io.rw_stall
val replay_wb_vec = wb_reg_valid && io.vector.map(_.wb.replay).getOrElse(false.B)
val replay_wb = replay_wb_common || replay_wb_rocc || replay_wb_csr || replay_wb_vec
take_pc_wb := replay_wb || wb_xcpt || csr.io.eret || wb_reg_flush_pipe
// writeback arbitration
val dmem_resp_xpu = !io.dmem.resp.bits.tag(0).asBool
val dmem_resp_fpu = io.dmem.resp.bits.tag(0).asBool
val dmem_resp_waddr = io.dmem.resp.bits.tag(5, 1)
val dmem_resp_valid = io.dmem.resp.valid && io.dmem.resp.bits.has_data
val dmem_resp_replay = dmem_resp_valid && io.dmem.resp.bits.replay
class LLWB extends Bundle {
val data = UInt(xLen.W)
val tag = UInt(5.W)
}
val ll_arb = Module(new Arbiter(new LLWB, 3)) // div, rocc, vec
ll_arb.io.in.foreach(_.valid := false.B)
ll_arb.io.in.foreach(_.bits := DontCare)
val ll_wdata = WireInit(ll_arb.io.out.bits.data)
val ll_waddr = WireInit(ll_arb.io.out.bits.tag)
val ll_wen = WireInit(ll_arb.io.out.fire)
ll_arb.io.out.ready := !wb_wxd
div.io.resp.ready := ll_arb.io.in(0).ready
ll_arb.io.in(0).valid := div.io.resp.valid
ll_arb.io.in(0).bits.data := div.io.resp.bits.data
ll_arb.io.in(0).bits.tag := div.io.resp.bits.tag
if (usingRoCC) {
io.rocc.resp.ready := ll_arb.io.in(1).ready
ll_arb.io.in(1).valid := io.rocc.resp.valid
ll_arb.io.in(1).bits.data := io.rocc.resp.bits.data
ll_arb.io.in(1).bits.tag := io.rocc.resp.bits.rd
} else {
// tie off RoCC
io.rocc.resp.ready := false.B
io.rocc.mem.req.ready := false.B
}
io.vector.map { v =>
v.resp.ready := Mux(v.resp.bits.fp, !(dmem_resp_valid && dmem_resp_fpu), ll_arb.io.in(2).ready)
ll_arb.io.in(2).valid := v.resp.valid && !v.resp.bits.fp
ll_arb.io.in(2).bits.data := v.resp.bits.data
ll_arb.io.in(2).bits.tag := v.resp.bits.rd
}
// Dont care mem since not all RoCC need accessing memory
io.rocc.mem := DontCare
when (dmem_resp_replay && dmem_resp_xpu) {
ll_arb.io.out.ready := false.B
ll_waddr := dmem_resp_waddr
ll_wen := true.B
}
val wb_valid = wb_reg_valid && !replay_wb && !wb_xcpt
val wb_wen = wb_valid && wb_ctrl.wxd
val rf_wen = wb_wen || ll_wen
val rf_waddr = Mux(ll_wen, ll_waddr, wb_waddr)
val rf_wdata = Mux(dmem_resp_valid && dmem_resp_xpu, io.dmem.resp.bits.data(xLen-1, 0),
Mux(ll_wen, ll_wdata,
Mux(wb_ctrl.csr =/= CSR.N, csr.io.rw.rdata,
Mux(wb_ctrl.mul, mul.map(_.io.resp.bits.data).getOrElse(wb_reg_wdata),
wb_reg_wdata))))
when (rf_wen) { rf.write(rf_waddr, rf_wdata) }
// hook up control/status regfile
csr.io.ungated_clock := clock
csr.io.decode(0).inst := id_inst(0)
csr.io.exception := wb_xcpt
csr.io.cause := wb_cause
csr.io.retire := wb_valid
csr.io.inst(0) := (if (usingCompressed) Cat(Mux(wb_reg_raw_inst(1, 0).andR, wb_reg_inst >> 16, 0.U), wb_reg_raw_inst(15, 0)) else wb_reg_inst)
csr.io.interrupts := io.interrupts
csr.io.hartid := io.hartid
io.fpu.fcsr_rm := csr.io.fcsr_rm
val vector_fcsr_flags = io.vector.map(_.set_fflags.bits).getOrElse(0.U(5.W))
val vector_fcsr_flags_valid = io.vector.map(_.set_fflags.valid).getOrElse(false.B)
csr.io.fcsr_flags.valid := io.fpu.fcsr_flags.valid | vector_fcsr_flags_valid
csr.io.fcsr_flags.bits := (io.fpu.fcsr_flags.bits & Fill(5, io.fpu.fcsr_flags.valid)) | (vector_fcsr_flags & Fill(5, vector_fcsr_flags_valid))
io.fpu.time := csr.io.time(31,0)
io.fpu.hartid := io.hartid
csr.io.rocc_interrupt := io.rocc.interrupt
csr.io.pc := wb_reg_pc
val tval_dmem_addr = !wb_reg_xcpt
val tval_any_addr = tval_dmem_addr ||
wb_reg_cause.isOneOf(Causes.breakpoint.U, Causes.fetch_access.U, Causes.fetch_page_fault.U, Causes.fetch_guest_page_fault.U)
val tval_inst = wb_reg_cause === Causes.illegal_instruction.U
val tval_valid = wb_xcpt && (tval_any_addr || tval_inst)
csr.io.gva := wb_xcpt && (tval_any_addr && csr.io.status.v || tval_dmem_addr && wb_reg_hls_or_dv)
csr.io.tval := Mux(tval_valid, encodeVirtualAddress(wb_reg_wdata, wb_reg_wdata), 0.U)
val (htval, mhtinst_read_pseudo) = {
val htval_valid_imem = wb_reg_xcpt && wb_reg_cause === Causes.fetch_guest_page_fault.U
val htval_imem = Mux(htval_valid_imem, io.imem.gpa.bits, 0.U)
assert(!htval_valid_imem || io.imem.gpa.valid)
val htval_valid_dmem = wb_xcpt && tval_dmem_addr && io.dmem.s2_xcpt.gf.asUInt.orR && !io.dmem.s2_xcpt.pf.asUInt.orR
val htval_dmem = Mux(htval_valid_dmem, io.dmem.s2_gpa, 0.U)
val htval = (htval_dmem | htval_imem) >> hypervisorExtraAddrBits
// read pseudoinstruction if a guest-page fault is caused by an implicit memory access for VS-stage address translation
val mhtinst_read_pseudo = (io.imem.gpa_is_pte && htval_valid_imem) || (io.dmem.s2_gpa_is_pte && htval_valid_dmem)
(htval, mhtinst_read_pseudo)
}
csr.io.vector.foreach { v =>
v.set_vconfig.valid := wb_reg_set_vconfig && wb_reg_valid
v.set_vconfig.bits := wb_reg_rs2.asTypeOf(new VConfig)
v.set_vs_dirty := wb_valid && wb_ctrl.vec
v.set_vstart.valid := wb_valid && wb_reg_set_vconfig
v.set_vstart.bits := 0.U
}
io.vector.foreach { v =>
when (v.wb.retire || v.wb.xcpt || wb_ctrl.vec) {
csr.io.pc := v.wb.pc
csr.io.retire := v.wb.retire
csr.io.inst(0) := v.wb.inst
when (v.wb.xcpt && !wb_reg_xcpt) {
wb_xcpt := true.B
wb_cause := v.wb.cause
csr.io.tval := v.wb.tval
}
}
v.wb.store_pending := io.dmem.store_pending
v.wb.vxrm := csr.io.vector.get.vxrm
v.wb.frm := csr.io.fcsr_rm
csr.io.vector.get.set_vxsat := v.set_vxsat
when (v.set_vconfig.valid) {
csr.io.vector.get.set_vconfig.valid := true.B
csr.io.vector.get.set_vconfig.bits := v.set_vconfig.bits
}
when (v.set_vstart.valid) {
csr.io.vector.get.set_vstart.valid := true.B
csr.io.vector.get.set_vstart.bits := v.set_vstart.bits
}
}
csr.io.htval := htval
csr.io.mhtinst_read_pseudo := mhtinst_read_pseudo
io.ptw.ptbr := csr.io.ptbr
io.ptw.hgatp := csr.io.hgatp
io.ptw.vsatp := csr.io.vsatp
(io.ptw.customCSRs.csrs zip csr.io.customCSRs).map { case (lhs, rhs) => lhs <> rhs }
io.ptw.status := csr.io.status
io.ptw.hstatus := csr.io.hstatus
io.ptw.gstatus := csr.io.gstatus
io.ptw.pmp := csr.io.pmp
csr.io.rw.addr := wb_reg_inst(31,20)
csr.io.rw.cmd := CSR.maskCmd(wb_reg_valid, wb_ctrl.csr)
csr.io.rw.wdata := wb_reg_wdata
io.rocc.csrs <> csr.io.roccCSRs
io.trace.time := csr.io.time
io.trace.insns := csr.io.trace
if (rocketParams.debugROB.isDefined) {
val sz = rocketParams.debugROB.get.size
if (sz < 1) { // use unsynthesizable ROB
val csr_trace_with_wdata = WireInit(csr.io.trace(0))
csr_trace_with_wdata.wdata.get := rf_wdata
val should_wb = WireInit((wb_ctrl.wfd || (wb_ctrl.wxd && wb_waddr =/= 0.U)) && !csr.io.trace(0).exception)
val has_wb = WireInit(wb_ctrl.wxd && wb_wen && !wb_set_sboard)
val wb_addr = WireInit(wb_waddr + Mux(wb_ctrl.wfd, 32.U, 0.U))
io.vector.foreach { v => when (v.wb.retire) {
should_wb := v.wb.rob_should_wb
has_wb := false.B
wb_addr := Cat(v.wb.rob_should_wb_fp, csr_trace_with_wdata.insn(11,7))
}}
DebugROB.pushTrace(clock, reset,
io.hartid, csr_trace_with_wdata,
should_wb, has_wb, wb_addr)
io.trace.insns(0) := DebugROB.popTrace(clock, reset, io.hartid)
DebugROB.pushWb(clock, reset, io.hartid, ll_wen, rf_waddr, rf_wdata)
} else { // synthesizable ROB (no FPRs)
require(!usingVector, "Synthesizable ROB does not support vector implementations")
val csr_trace_with_wdata = WireInit(csr.io.trace(0))
csr_trace_with_wdata.wdata.get := rf_wdata
val debug_rob = Module(new HardDebugROB(sz, 32))
debug_rob.io.i_insn := csr_trace_with_wdata
debug_rob.io.should_wb := (wb_ctrl.wfd || (wb_ctrl.wxd && wb_waddr =/= 0.U)) &&
!csr.io.trace(0).exception
debug_rob.io.has_wb := wb_ctrl.wxd && wb_wen && !wb_set_sboard
debug_rob.io.tag := wb_waddr + Mux(wb_ctrl.wfd, 32.U, 0.U)
debug_rob.io.wb_val := ll_wen
debug_rob.io.wb_tag := rf_waddr
debug_rob.io.wb_data := rf_wdata
io.trace.insns(0) := debug_rob.io.o_insn
}
} else {
io.trace.insns := csr.io.trace
}
for (((iobpw, wphit), bp) <- io.bpwatch zip wb_reg_wphit zip csr.io.bp) {
iobpw.valid(0) := wphit
iobpw.action := bp.control.action
// tie off bpwatch valids
iobpw.rvalid.foreach(_ := false.B)
iobpw.wvalid.foreach(_ := false.B)
iobpw.ivalid.foreach(_ := false.B)
}
val hazard_targets = Seq((id_ctrl.rxs1 && id_raddr1 =/= 0.U, id_raddr1),
(id_ctrl.rxs2 && id_raddr2 =/= 0.U, id_raddr2),
(id_ctrl.wxd && id_waddr =/= 0.U, id_waddr))
val fp_hazard_targets = Seq((io.fpu.dec.ren1, id_raddr1),
(io.fpu.dec.ren2, id_raddr2),
(io.fpu.dec.ren3, id_raddr3),
(io.fpu.dec.wen, id_waddr))
val sboard = new Scoreboard(32, true)
sboard.clear(ll_wen, ll_waddr)
def id_sboard_clear_bypass(r: UInt) = {
// ll_waddr arrives late when D$ has ECC, so reshuffle the hazard check
if (!tileParams.dcache.get.dataECC.isDefined) ll_wen && ll_waddr === r
else div.io.resp.fire && div.io.resp.bits.tag === r || dmem_resp_replay && dmem_resp_xpu && dmem_resp_waddr === r
}
val id_sboard_hazard = checkHazards(hazard_targets, rd => sboard.read(rd) && !id_sboard_clear_bypass(rd))
sboard.set(wb_set_sboard && wb_wen, wb_waddr)
// stall for RAW/WAW hazards on CSRs, loads, AMOs, and mul/div in execute stage.
val ex_cannot_bypass = ex_ctrl.csr =/= CSR.N || ex_ctrl.jalr || ex_ctrl.mem || ex_ctrl.mul || ex_ctrl.div || ex_ctrl.fp || ex_ctrl.rocc || ex_ctrl.vec
val data_hazard_ex = ex_ctrl.wxd && checkHazards(hazard_targets, _ === ex_waddr)
val fp_data_hazard_ex = id_ctrl.fp && ex_ctrl.wfd && checkHazards(fp_hazard_targets, _ === ex_waddr)
val id_ex_hazard = ex_reg_valid && (data_hazard_ex && ex_cannot_bypass || fp_data_hazard_ex)
// stall for RAW/WAW hazards on CSRs, LB/LH, and mul/div in memory stage.
val mem_mem_cmd_bh =
if (fastLoadWord) (!fastLoadByte).B && mem_reg_slow_bypass
else true.B
val mem_cannot_bypass = mem_ctrl.csr =/= CSR.N || mem_ctrl.mem && mem_mem_cmd_bh || mem_ctrl.mul || mem_ctrl.div || mem_ctrl.fp || mem_ctrl.rocc || mem_ctrl.vec
val data_hazard_mem = mem_ctrl.wxd && checkHazards(hazard_targets, _ === mem_waddr)
val fp_data_hazard_mem = id_ctrl.fp && mem_ctrl.wfd && checkHazards(fp_hazard_targets, _ === mem_waddr)
val id_mem_hazard = mem_reg_valid && (data_hazard_mem && mem_cannot_bypass || fp_data_hazard_mem)
id_load_use := mem_reg_valid && data_hazard_mem && mem_ctrl.mem
val id_vconfig_hazard = id_ctrl.vec && (
(ex_reg_valid && ex_reg_set_vconfig) ||
(mem_reg_valid && mem_reg_set_vconfig) ||
(wb_reg_valid && wb_reg_set_vconfig))
// stall for RAW/WAW hazards on load/AMO misses and mul/div in writeback.
val data_hazard_wb = wb_ctrl.wxd && checkHazards(hazard_targets, _ === wb_waddr)
val fp_data_hazard_wb = id_ctrl.fp && wb_ctrl.wfd && checkHazards(fp_hazard_targets, _ === wb_waddr)
val id_wb_hazard = wb_reg_valid && (data_hazard_wb && wb_set_sboard || fp_data_hazard_wb)
val id_stall_fpu = if (usingFPU) {
val fp_sboard = new Scoreboard(32)
fp_sboard.set(((wb_dcache_miss || wb_ctrl.vec) && wb_ctrl.wfd || io.fpu.sboard_set) && wb_valid, wb_waddr)
val v_ll = io.vector.map(v => v.resp.fire && v.resp.bits.fp).getOrElse(false.B)
fp_sboard.clear((dmem_resp_replay && dmem_resp_fpu) || v_ll, io.fpu.ll_resp_tag)
fp_sboard.clear(io.fpu.sboard_clr, io.fpu.sboard_clra)
checkHazards(fp_hazard_targets, fp_sboard.read _)
} else false.B
val dcache_blocked = {
// speculate that a blocked D$ will unblock the cycle after a Grant
val blocked = Reg(Bool())
blocked := !io.dmem.req.ready && io.dmem.clock_enabled && !io.dmem.perf.grant && (blocked || io.dmem.req.valid || io.dmem.s2_nack)
blocked && !io.dmem.perf.grant
}
val rocc_blocked = Reg(Bool())
rocc_blocked := !wb_xcpt && !io.rocc.cmd.ready && (io.rocc.cmd.valid || rocc_blocked)
val ctrl_stalld =
id_ex_hazard || id_mem_hazard || id_wb_hazard || id_sboard_hazard ||
id_vconfig_hazard ||
csr.io.singleStep && (ex_reg_valid || mem_reg_valid || wb_reg_valid) ||
id_csr_en && csr.io.decode(0).fp_csr && !io.fpu.fcsr_rdy ||
id_csr_en && csr.io.decode(0).vector_csr && id_vec_busy ||
id_ctrl.fp && id_stall_fpu ||
id_ctrl.mem && dcache_blocked || // reduce activity during D$ misses
id_ctrl.rocc && rocc_blocked || // reduce activity while RoCC is busy
id_ctrl.div && (!(div.io.req.ready || (div.io.resp.valid && !wb_wxd)) || div.io.req.valid) || // reduce odds of replay
!clock_en ||
id_do_fence ||
csr.io.csr_stall ||
id_reg_pause ||
io.traceStall
ctrl_killd := !ibuf.io.inst(0).valid || ibuf.io.inst(0).bits.replay || take_pc_mem_wb || ctrl_stalld || csr.io.interrupt
io.imem.req.valid := take_pc
io.imem.req.bits.speculative := !take_pc_wb
io.imem.req.bits.pc :=
Mux(wb_xcpt || csr.io.eret, csr.io.evec, // exception or [m|s]ret
Mux(replay_wb, wb_reg_pc, // replay
mem_npc)) // flush or branch misprediction
io.imem.flush_icache := wb_reg_valid && wb_ctrl.fence_i && !io.dmem.s2_nack
io.imem.might_request := {
imem_might_request_reg := ex_pc_valid || mem_pc_valid || io.ptw.customCSRs.disableICacheClockGate || io.vector.map(_.trap_check_busy).getOrElse(false.B)
imem_might_request_reg
}
io.imem.progress := RegNext(wb_reg_valid && !replay_wb_common)
io.imem.sfence.valid := wb_reg_valid && wb_reg_sfence
io.imem.sfence.bits.rs1 := wb_reg_mem_size(0)
io.imem.sfence.bits.rs2 := wb_reg_mem_size(1)
io.imem.sfence.bits.addr := wb_reg_wdata
io.imem.sfence.bits.asid := wb_reg_rs2
io.imem.sfence.bits.hv := wb_reg_hfence_v
io.imem.sfence.bits.hg := wb_reg_hfence_g
io.ptw.sfence := io.imem.sfence
ibuf.io.inst(0).ready := !ctrl_stalld
io.imem.btb_update.valid := mem_reg_valid && !take_pc_wb && mem_wrong_npc && (!mem_cfi || mem_cfi_taken)
io.imem.btb_update.bits.isValid := mem_cfi
io.imem.btb_update.bits.cfiType :=
Mux((mem_ctrl.jal || mem_ctrl.jalr) && mem_waddr(0), CFIType.call,
Mux(mem_ctrl.jalr && (mem_reg_inst(19,15) & regAddrMask.U) === BitPat("b00?01"), CFIType.ret,
Mux(mem_ctrl.jal || mem_ctrl.jalr, CFIType.jump,
CFIType.branch)))
io.imem.btb_update.bits.target := io.imem.req.bits.pc
io.imem.btb_update.bits.br_pc := (if (usingCompressed) mem_reg_pc + Mux(mem_reg_rvc, 0.U, 2.U) else mem_reg_pc)
io.imem.btb_update.bits.pc := ~(~io.imem.btb_update.bits.br_pc | (coreInstBytes*fetchWidth-1).U)
io.imem.btb_update.bits.prediction := mem_reg_btb_resp
io.imem.btb_update.bits.taken := DontCare
io.imem.bht_update.valid := mem_reg_valid && !take_pc_wb
io.imem.bht_update.bits.pc := io.imem.btb_update.bits.pc
io.imem.bht_update.bits.taken := mem_br_taken
io.imem.bht_update.bits.mispredict := mem_wrong_npc
io.imem.bht_update.bits.branch := mem_ctrl.branch
io.imem.bht_update.bits.prediction := mem_reg_btb_resp.bht
// Connect RAS in Frontend
io.imem.ras_update := DontCare
io.fpu.valid := !ctrl_killd && id_ctrl.fp
io.fpu.killx := ctrl_killx
io.fpu.killm := killm_common
io.fpu.inst := id_inst(0)
io.fpu.fromint_data := ex_rs(0)
io.fpu.ll_resp_val := dmem_resp_valid && dmem_resp_fpu
io.fpu.ll_resp_data := (if (minFLen == 32) io.dmem.resp.bits.data_word_bypass else io.dmem.resp.bits.data)
io.fpu.ll_resp_type := io.dmem.resp.bits.size
io.fpu.ll_resp_tag := dmem_resp_waddr
io.fpu.keep_clock_enabled := io.ptw.customCSRs.disableCoreClockGate
io.fpu.v_sew := csr.io.vector.map(_.vconfig.vtype.vsew).getOrElse(0.U)
io.vector.map { v =>
when (!(dmem_resp_valid && dmem_resp_fpu)) {
io.fpu.ll_resp_val := v.resp.valid && v.resp.bits.fp
io.fpu.ll_resp_data := v.resp.bits.data
io.fpu.ll_resp_type := v.resp.bits.size
io.fpu.ll_resp_tag := v.resp.bits.rd
}
}
io.vector.foreach { v =>
v.ex.valid := ex_reg_valid && (ex_ctrl.vec || rocketParams.vector.get.issueVConfig.B && ex_reg_set_vconfig) && !ctrl_killx
v.ex.inst := ex_reg_inst
v.ex.vconfig := csr.io.vector.get.vconfig
v.ex.vstart := Mux(mem_reg_valid && mem_ctrl.vec || wb_reg_valid && wb_ctrl.vec, 0.U, csr.io.vector.get.vstart)
v.ex.rs1 := ex_rs(0)
v.ex.rs2 := ex_rs(1)
v.ex.pc := ex_reg_pc
v.mem.frs1 := io.fpu.store_data
v.killm := killm_common
v.status := csr.io.status
}
io.dmem.req.valid := ex_reg_valid && ex_ctrl.mem
val ex_dcache_tag = Cat(ex_waddr, ex_ctrl.fp)
require(coreParams.dcacheReqTagBits >= ex_dcache_tag.getWidth)
io.dmem.req.bits.tag := ex_dcache_tag
io.dmem.req.bits.cmd := ex_ctrl.mem_cmd
io.dmem.req.bits.size := ex_reg_mem_size
io.dmem.req.bits.signed := !Mux(ex_reg_hls, ex_reg_inst(20), ex_reg_inst(14))
io.dmem.req.bits.phys := false.B
io.dmem.req.bits.addr := encodeVirtualAddress(ex_rs(0), alu.io.adder_out)
io.dmem.req.bits.idx.foreach(_ := io.dmem.req.bits.addr)
io.dmem.req.bits.dprv := Mux(ex_reg_hls, csr.io.hstatus.spvp, csr.io.status.dprv)
io.dmem.req.bits.dv := ex_reg_hls || csr.io.status.dv
io.dmem.req.bits.no_resp := !isRead(ex_ctrl.mem_cmd) || (!ex_ctrl.fp && ex_waddr === 0.U)
io.dmem.req.bits.no_alloc := DontCare
io.dmem.req.bits.no_xcpt := DontCare
io.dmem.req.bits.data := DontCare
io.dmem.req.bits.mask := DontCare
io.dmem.s1_data.data := (if (fLen == 0) mem_reg_rs2 else Mux(mem_ctrl.fp, Fill(coreDataBits / fLen, io.fpu.store_data), mem_reg_rs2))
io.dmem.s1_data.mask := DontCare
io.dmem.s1_kill := killm_common || mem_ldst_xcpt || fpu_kill_mem || vec_kill_mem
io.dmem.s2_kill := false.B
// don't let D$ go to sleep if we're probably going to use it soon
io.dmem.keep_clock_enabled := ibuf.io.inst(0).valid && id_ctrl.mem && !csr.io.csr_stall
io.rocc.cmd.valid := wb_reg_valid && wb_ctrl.rocc && !replay_wb_common
io.rocc.exception := wb_xcpt && csr.io.status.xs.orR
io.rocc.cmd.bits.status := csr.io.status
io.rocc.cmd.bits.inst := wb_reg_inst.asTypeOf(new RoCCInstruction())
io.rocc.cmd.bits.rs1 := wb_reg_wdata
io.rocc.cmd.bits.rs2 := wb_reg_rs2
// gate the clock
val unpause = csr.io.time(rocketParams.lgPauseCycles-1, 0) === 0.U || csr.io.inhibit_cycle || io.dmem.perf.release || take_pc
when (unpause) { id_reg_pause := false.B }
io.cease := csr.io.status.cease && !clock_en_reg
io.wfi := csr.io.status.wfi
if (rocketParams.clockGate) {
long_latency_stall := csr.io.csr_stall || io.dmem.perf.blocked || id_reg_pause && !unpause
clock_en := clock_en_reg || ex_pc_valid || (!long_latency_stall && io.imem.resp.valid)
clock_en_reg :=
ex_pc_valid || mem_pc_valid || wb_pc_valid || // instruction in flight
io.ptw.customCSRs.disableCoreClockGate || // chicken bit
!div.io.req.ready || // mul/div in flight
usingFPU.B && !io.fpu.fcsr_rdy || // long-latency FPU in flight
io.dmem.replay_next || // long-latency load replaying
(!long_latency_stall && (ibuf.io.inst(0).valid || io.imem.resp.valid)) // instruction pending
assert(!(ex_pc_valid || mem_pc_valid || wb_pc_valid) || clock_en)
}
// evaluate performance counters
val icache_blocked = !(io.imem.resp.valid || RegNext(io.imem.resp.valid))
csr.io.counters foreach { c => c.inc := RegNext(perfEvents.evaluate(c.eventSel)) }
val coreMonitorBundle = Wire(new CoreMonitorBundle(xLen, fLen))
coreMonitorBundle.clock := clock
coreMonitorBundle.reset := reset
coreMonitorBundle.hartid := io.hartid
coreMonitorBundle.timer := csr.io.time(31,0)
coreMonitorBundle.valid := csr.io.trace(0).valid && !csr.io.trace(0).exception
coreMonitorBundle.pc := csr.io.trace(0).iaddr(vaddrBitsExtended-1, 0).sextTo(xLen)
coreMonitorBundle.wrenx := wb_wen && !wb_set_sboard
coreMonitorBundle.wrenf := false.B
coreMonitorBundle.wrdst := wb_waddr
coreMonitorBundle.wrdata := rf_wdata
coreMonitorBundle.rd0src := wb_reg_inst(19,15)
coreMonitorBundle.rd0val := RegNext(RegNext(ex_rs(0)))
coreMonitorBundle.rd1src := wb_reg_inst(24,20)
coreMonitorBundle.rd1val := RegNext(RegNext(ex_rs(1)))
coreMonitorBundle.inst := csr.io.trace(0).insn
coreMonitorBundle.excpt := csr.io.trace(0).exception
coreMonitorBundle.priv_mode := csr.io.trace(0).priv
if (enableCommitLog) {
val t = csr.io.trace(0)
val rd = wb_waddr
val wfd = wb_ctrl.wfd
val wxd = wb_ctrl.wxd
val has_data = wb_wen && !wb_set_sboard
when (t.valid && !t.exception) {
when (wfd) {
printf ("%d 0x%x (0x%x) f%d p%d 0xXXXXXXXXXXXXXXXX\n", t.priv, t.iaddr, t.insn, rd, rd+32.U)
}
.elsewhen (wxd && rd =/= 0.U && has_data) {
printf ("%d 0x%x (0x%x) x%d 0x%x\n", t.priv, t.iaddr, t.insn, rd, rf_wdata)
}
.elsewhen (wxd && rd =/= 0.U && !has_data) {
printf ("%d 0x%x (0x%x) x%d p%d 0xXXXXXXXXXXXXXXXX\n", t.priv, t.iaddr, t.insn, rd, rd)
}
.otherwise {
printf ("%d 0x%x (0x%x)\n", t.priv, t.iaddr, t.insn)
}
}
when (ll_wen && rf_waddr =/= 0.U) {
printf ("x%d p%d 0x%x\n", rf_waddr, rf_waddr, rf_wdata)
}
}
else {
when (csr.io.trace(0).valid) {
printf("C%d: %d [%d] pc=[%x] W[r%d=%x][%d] R[r%d=%x] R[r%d=%x] inst=[%x] DASM(%x)\n",
io.hartid, coreMonitorBundle.timer, coreMonitorBundle.valid,
coreMonitorBundle.pc,
Mux(wb_ctrl.wxd || wb_ctrl.wfd, coreMonitorBundle.wrdst, 0.U),
Mux(coreMonitorBundle.wrenx, coreMonitorBundle.wrdata, 0.U),
coreMonitorBundle.wrenx,
Mux(wb_ctrl.rxs1 || wb_ctrl.rfs1, coreMonitorBundle.rd0src, 0.U),
Mux(wb_ctrl.rxs1 || wb_ctrl.rfs1, coreMonitorBundle.rd0val, 0.U),
Mux(wb_ctrl.rxs2 || wb_ctrl.rfs2, coreMonitorBundle.rd1src, 0.U),
Mux(wb_ctrl.rxs2 || wb_ctrl.rfs2, coreMonitorBundle.rd1val, 0.U),
coreMonitorBundle.inst, coreMonitorBundle.inst)
}
}
// CoreMonitorBundle for late latency writes
val xrfWriteBundle = Wire(new CoreMonitorBundle(xLen, fLen))
xrfWriteBundle.clock := clock
xrfWriteBundle.reset := reset
xrfWriteBundle.hartid := io.hartid
xrfWriteBundle.timer := csr.io.time(31,0)
xrfWriteBundle.valid := false.B
xrfWriteBundle.pc := 0.U
xrfWriteBundle.wrdst := rf_waddr
xrfWriteBundle.wrenx := rf_wen && !(csr.io.trace(0).valid && wb_wen && (wb_waddr === rf_waddr))
xrfWriteBundle.wrenf := false.B
xrfWriteBundle.wrdata := rf_wdata
xrfWriteBundle.rd0src := 0.U
xrfWriteBundle.rd0val := 0.U
xrfWriteBundle.rd1src := 0.U
xrfWriteBundle.rd1val := 0.U
xrfWriteBundle.inst := 0.U
xrfWriteBundle.excpt := false.B
xrfWriteBundle.priv_mode := csr.io.trace(0).priv
if (rocketParams.haveSimTimeout) PlusArg.timeout(
name = "max_core_cycles",
docstring = "Kill the emulation after INT rdtime cycles. Off if 0."
)(csr.io.time)
} // leaving gated-clock domain
val rocketImpl = withClock (gated_clock) { new RocketImpl }
def checkExceptions(x: Seq[(Bool, UInt)]) =
(WireInit(x.map(_._1).reduce(_||_)), WireInit(PriorityMux(x)))
def coverExceptions(exceptionValid: Bool, cause: UInt, labelPrefix: String, coverCausesLabels: Seq[(Int, String)]): Unit = {
for ((coverCause, label) <- coverCausesLabels) {
property.cover(exceptionValid && (cause === coverCause.U), s"${labelPrefix}_${label}")
}
}
def checkHazards(targets: Seq[(Bool, UInt)], cond: UInt => Bool) =
targets.map(h => h._1 && cond(h._2)).reduce(_||_)
def encodeVirtualAddress(a0: UInt, ea: UInt) = if (vaddrBitsExtended == vaddrBits) ea else {
// efficient means to compress 64-bit VA into vaddrBits+1 bits
// (VA is bad if VA(vaddrBits) != VA(vaddrBits-1))
val b = vaddrBitsExtended-1
val a = (a0 >> b).asSInt
val msb = Mux(a === 0.S || a === -1.S, ea(b), !ea(b-1))
Cat(msb, ea(b-1, 0))
}
class Scoreboard(n: Int, zero: Boolean = false)
{
def set(en: Bool, addr: UInt): Unit = update(en, _next | mask(en, addr))
def clear(en: Bool, addr: UInt): Unit = update(en, _next & ~mask(en, addr))
def read(addr: UInt): Bool = r(addr)
def readBypassed(addr: UInt): Bool = _next(addr)
private val _r = RegInit(0.U(n.W))
private val r = if (zero) (_r >> 1 << 1) else _r
private var _next = r
private var ens = false.B
private def mask(en: Bool, addr: UInt) = Mux(en, 1.U << addr, 0.U)
private def update(en: Bool, update: UInt) = {
_next = update
ens = ens || en
when (ens) { _r := _next }
}
}
}
class RegFile(n: Int, w: Int, zero: Boolean = false) {
val rf = Mem(n, UInt(w.W))
private def access(addr: UInt) = rf(~addr(log2Up(n)-1,0))
private val reads = ArrayBuffer[(UInt,UInt)]()
private var canRead = true
def read(addr: UInt) = {
require(canRead)
reads += addr -> Wire(UInt())
reads.last._2 := Mux(zero.B && addr === 0.U, 0.U, access(addr))
reads.last._2
}
def write(addr: UInt, data: UInt) = {
canRead = false
when (addr =/= 0.U) {
access(addr) := data
for ((raddr, rdata) <- reads)
when (addr === raddr) { rdata := data }
}
}
}
object ImmGen {
def apply(sel: UInt, inst: UInt) = {
val sign = Mux(sel === IMM_Z, 0.S, inst(31).asSInt)
val b30_20 = Mux(sel === IMM_U, inst(30,20).asSInt, sign)
val b19_12 = Mux(sel =/= IMM_U && sel =/= IMM_UJ, sign, inst(19,12).asSInt)
val b11 = Mux(sel === IMM_U || sel === IMM_Z, 0.S,
Mux(sel === IMM_UJ, inst(20).asSInt,
Mux(sel === IMM_SB, inst(7).asSInt, sign)))
val b10_5 = Mux(sel === IMM_U || sel === IMM_Z, 0.U, inst(30,25))
val b4_1 = Mux(sel === IMM_U, 0.U,
Mux(sel === IMM_S || sel === IMM_SB, inst(11,8),
Mux(sel === IMM_Z, inst(19,16), inst(24,21))))
val b0 = Mux(sel === IMM_S, inst(7),
Mux(sel === IMM_I, inst(20),
Mux(sel === IMM_Z, inst(15), 0.U)))
Cat(sign, b30_20, b19_12, b11, b10_5, b4_1, b0).asSInt
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File RVC.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile._
import freechips.rocketchip.util._
class ExpandedInstruction extends Bundle {
val bits = UInt(32.W)
val rd = UInt(5.W)
val rs1 = UInt(5.W)
val rs2 = UInt(5.W)
val rs3 = UInt(5.W)
}
class RVCDecoder(x: UInt, xLen: Int, fLen: Int, useAddiForMv: Boolean = false) {
def inst(bits: UInt, rd: UInt = x(11,7), rs1: UInt = x(19,15), rs2: UInt = x(24,20), rs3: UInt = x(31,27)) = {
val res = Wire(new ExpandedInstruction)
res.bits := bits
res.rd := rd
res.rs1 := rs1
res.rs2 := rs2
res.rs3 := rs3
res
}
def rs1p = Cat(1.U(2.W), x(9,7))
def rs2p = Cat(1.U(2.W), x(4,2))
def rs2 = x(6,2)
def rd = x(11,7)
def addi4spnImm = Cat(x(10,7), x(12,11), x(5), x(6), 0.U(2.W))
def lwImm = Cat(x(5), x(12,10), x(6), 0.U(2.W))
def ldImm = Cat(x(6,5), x(12,10), 0.U(3.W))
def lwspImm = Cat(x(3,2), x(12), x(6,4), 0.U(2.W))
def ldspImm = Cat(x(4,2), x(12), x(6,5), 0.U(3.W))
def swspImm = Cat(x(8,7), x(12,9), 0.U(2.W))
def sdspImm = Cat(x(9,7), x(12,10), 0.U(3.W))
def luiImm = Cat(Fill(15, x(12)), x(6,2), 0.U(12.W))
def addi16spImm = Cat(Fill(3, x(12)), x(4,3), x(5), x(2), x(6), 0.U(4.W))
def addiImm = Cat(Fill(7, x(12)), x(6,2))
def jImm = Cat(Fill(10, x(12)), x(8), x(10,9), x(6), x(7), x(2), x(11), x(5,3), 0.U(1.W))
def bImm = Cat(Fill(5, x(12)), x(6,5), x(2), x(11,10), x(4,3), 0.U(1.W))
def shamt = Cat(x(12), x(6,2))
def x0 = 0.U(5.W)
def ra = 1.U(5.W)
def sp = 2.U(5.W)
def q0 = {
def addi4spn = {
val opc = Mux(x(12,5).orR, 0x13.U(7.W), 0x1F.U(7.W))
inst(Cat(addi4spnImm, sp, 0.U(3.W), rs2p, opc), rs2p, sp, rs2p)
}
def ld = inst(Cat(ldImm, rs1p, 3.U(3.W), rs2p, 0x03.U(7.W)), rs2p, rs1p, rs2p)
def lw = inst(Cat(lwImm, rs1p, 2.U(3.W), rs2p, 0x03.U(7.W)), rs2p, rs1p, rs2p)
def fld = inst(Cat(ldImm, rs1p, 3.U(3.W), rs2p, 0x07.U(7.W)), rs2p, rs1p, rs2p)
def flw = {
if (xLen == 32) inst(Cat(lwImm, rs1p, 2.U(3.W), rs2p, 0x07.U(7.W)), rs2p, rs1p, rs2p)
else ld
}
def unimp = inst(Cat(lwImm >> 5, rs2p, rs1p, 2.U(3.W), lwImm(4,0), 0x3F.U(7.W)), rs2p, rs1p, rs2p)
def sd = inst(Cat(ldImm >> 5, rs2p, rs1p, 3.U(3.W), ldImm(4,0), 0x23.U(7.W)), rs2p, rs1p, rs2p)
def sw = inst(Cat(lwImm >> 5, rs2p, rs1p, 2.U(3.W), lwImm(4,0), 0x23.U(7.W)), rs2p, rs1p, rs2p)
def fsd = inst(Cat(ldImm >> 5, rs2p, rs1p, 3.U(3.W), ldImm(4,0), 0x27.U(7.W)), rs2p, rs1p, rs2p)
def fsw = {
if (xLen == 32) inst(Cat(lwImm >> 5, rs2p, rs1p, 2.U(3.W), lwImm(4,0), 0x27.U(7.W)), rs2p, rs1p, rs2p)
else sd
}
Seq(addi4spn, fld, lw, flw, unimp, fsd, sw, fsw)
}
def q1 = {
def addi = inst(Cat(addiImm, rd, 0.U(3.W), rd, 0x13.U(7.W)), rd, rd, rs2p)
def addiw = {
val opc = Mux(rd.orR, 0x1B.U(7.W), 0x1F.U(7.W))
inst(Cat(addiImm, rd, 0.U(3.W), rd, opc), rd, rd, rs2p)
}
def jal = {
if (xLen == 32) inst(Cat(jImm(20), jImm(10,1), jImm(11), jImm(19,12), ra, 0x6F.U(7.W)), ra, rd, rs2p)
else addiw
}
def li = inst(Cat(addiImm, x0, 0.U(3.W), rd, 0x13.U(7.W)), rd, x0, rs2p)
def addi16sp = {
val opc = Mux(addiImm.orR, 0x13.U(7.W), 0x1F.U(7.W))
inst(Cat(addi16spImm, rd, 0.U(3.W), rd, opc), rd, rd, rs2p)
}
def lui = {
val opc = Mux(addiImm.orR, 0x37.U(7.W), 0x3F.U(7.W))
val me = inst(Cat(luiImm(31,12), rd, opc), rd, rd, rs2p)
Mux(rd === x0 || rd === sp, addi16sp, me)
}
def j = inst(Cat(jImm(20), jImm(10,1), jImm(11), jImm(19,12), x0, 0x6F.U(7.W)), x0, rs1p, rs2p)
def beqz = inst(Cat(bImm(12), bImm(10,5), x0, rs1p, 0.U(3.W), bImm(4,1), bImm(11), 0x63.U(7.W)), rs1p, rs1p, x0)
def bnez = inst(Cat(bImm(12), bImm(10,5), x0, rs1p, 1.U(3.W), bImm(4,1), bImm(11), 0x63.U(7.W)), x0, rs1p, x0)
def arith = {
def srli = Cat(shamt, rs1p, 5.U(3.W), rs1p, 0x13.U(7.W))
def srai = srli | (1 << 30).U
def andi = Cat(addiImm, rs1p, 7.U(3.W), rs1p, 0x13.U(7.W))
def rtype = {
val funct = Seq(0.U, 4.U, 6.U, 7.U, 0.U, 0.U, 2.U, 3.U)(Cat(x(12), x(6,5)))
val sub = Mux(x(6,5) === 0.U, (1 << 30).U, 0.U)
val opc = Mux(x(12), 0x3B.U(7.W), 0x33.U(7.W))
Cat(rs2p, rs1p, funct, rs1p, opc) | sub
}
inst(Seq(srli, srai, andi, rtype)(x(11,10)), rs1p, rs1p, rs2p)
}
Seq(addi, jal, li, lui, arith, j, beqz, bnez)
}
def q2 = {
val load_opc = Mux(rd.orR, 0x03.U(7.W), 0x1F.U(7.W))
def slli = inst(Cat(shamt, rd, 1.U(3.W), rd, 0x13.U(7.W)), rd, rd, rs2)
def ldsp = inst(Cat(ldspImm, sp, 3.U(3.W), rd, load_opc), rd, sp, rs2)
def lwsp = inst(Cat(lwspImm, sp, 2.U(3.W), rd, load_opc), rd, sp, rs2)
def fldsp = inst(Cat(ldspImm, sp, 3.U(3.W), rd, 0x07.U(7.W)), rd, sp, rs2)
def flwsp = {
if (xLen == 32) inst(Cat(lwspImm, sp, 2.U(3.W), rd, 0x07.U(7.W)), rd, sp, rs2)
else ldsp
}
def sdsp = inst(Cat(sdspImm >> 5, rs2, sp, 3.U(3.W), sdspImm(4,0), 0x23.U(7.W)), rd, sp, rs2)
def swsp = inst(Cat(swspImm >> 5, rs2, sp, 2.U(3.W), swspImm(4,0), 0x23.U(7.W)), rd, sp, rs2)
def fsdsp = inst(Cat(sdspImm >> 5, rs2, sp, 3.U(3.W), sdspImm(4,0), 0x27.U(7.W)), rd, sp, rs2)
def fswsp = {
if (xLen == 32) inst(Cat(swspImm >> 5, rs2, sp, 2.U(3.W), swspImm(4,0), 0x27.U(7.W)), rd, sp, rs2)
else sdsp
}
def jalr = {
val mv = {
if (useAddiForMv) inst(Cat(rs2, 0.U(3.W), rd, 0x13.U(7.W)), rd, rs2, x0)
else inst(Cat(rs2, x0, 0.U(3.W), rd, 0x33.U(7.W)), rd, x0, rs2)
}
val add = inst(Cat(rs2, rd, 0.U(3.W), rd, 0x33.U(7.W)), rd, rd, rs2)
val jr = Cat(rs2, rd, 0.U(3.W), x0, 0x67.U(7.W))
val reserved = Cat(jr >> 7, 0x1F.U(7.W))
val jr_reserved = inst(Mux(rd.orR, jr, reserved), x0, rd, rs2)
val jr_mv = Mux(rs2.orR, mv, jr_reserved)
val jalr = Cat(rs2, rd, 0.U(3.W), ra, 0x67.U(7.W))
val ebreak = Cat(jr >> 7, 0x73.U(7.W)) | (1 << 20).U
val jalr_ebreak = inst(Mux(rd.orR, jalr, ebreak), ra, rd, rs2)
val jalr_add = Mux(rs2.orR, add, jalr_ebreak)
Mux(x(12), jalr_add, jr_mv)
}
Seq(slli, fldsp, lwsp, flwsp, jalr, fsdsp, swsp, fswsp)
}
def q3 = Seq.fill(8)(passthrough)
def passthrough = inst(x)
def decode = {
val s = q0 ++ q1 ++ q2 ++ q3
s(Cat(x(1,0), x(15,13)))
}
def q0_ill = {
def allz = !(x(12, 2).orR)
def fld = if (fLen >= 64) false.B else true.B
def flw32 = if (xLen == 64 || fLen >= 32) false.B else true.B
def fsd = if (fLen >= 64) false.B else true.B
def fsw32 = if (xLen == 64 || fLen >= 32) false.B else true.B
Seq(allz, fld, false.B, flw32, true.B, fsd, false.B, fsw32)
}
def q1_ill = {
def rd0 = if (xLen == 32) false.B else rd === 0.U
def immz = !(x(12) | x(6, 2).orR)
def arith_res = x(12, 10).andR && (if (xLen == 32) true.B else x(6) === 1.U)
Seq(false.B, rd0, false.B, immz, arith_res, false.B, false.B, false.B)
}
def q2_ill = {
def fldsp = if (fLen >= 64) false.B else true.B
def rd0 = rd === 0.U
def flwsp = if (xLen == 64) rd0 else if (fLen >= 32) false.B else true.B
def jr_res = !(x(12 ,2).orR)
def fsdsp = if (fLen >= 64) false.B else true.B
def fswsp32 = if (xLen == 64) false.B else if (fLen >= 32) false.B else true.B
Seq(false.B, fldsp, rd0, flwsp, jr_res, fsdsp, false.B, fswsp32)
}
def q3_ill = Seq.fill(8)(false.B)
def ill = {
val s = q0_ill ++ q1_ill ++ q2_ill ++ q3_ill
s(Cat(x(1,0), x(15,13)))
}
}
class RVCExpander(useAddiForMv: Boolean = false)(implicit val p: Parameters) extends Module with HasCoreParameters {
val io = IO(new Bundle {
val in = Input(UInt(32.W))
val out = Output(new ExpandedInstruction)
val rvc = Output(Bool())
val ill = Output(Bool())
})
if (usingCompressed) {
io.rvc := io.in(1,0) =/= 3.U
val decoder = new RVCDecoder(io.in, xLen, fLen, useAddiForMv)
io.out := decoder.decode
io.ill := decoder.ill
} else {
io.rvc := false.B
io.out := new RVCDecoder(io.in, xLen, fLen, useAddiForMv).passthrough
io.ill := false.B // only used for RVC
}
}
| module Frontend_6( // @[Frontend.scala:82:7]
input clock, // @[Frontend.scala:82:7]
input reset, // @[Frontend.scala:82:7]
input auto_icache_master_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_icache_master_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_icache_master_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
input auto_icache_master_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_icache_master_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_icache_master_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_icache_master_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_icache_master_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_icache_master_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_icache_master_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_icache_master_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input io_cpu_might_request, // @[Frontend.scala:85:14]
input io_cpu_req_valid, // @[Frontend.scala:85:14]
input [39:0] io_cpu_req_bits_pc, // @[Frontend.scala:85:14]
input io_cpu_req_bits_speculative, // @[Frontend.scala:85:14]
input io_cpu_sfence_valid, // @[Frontend.scala:85:14]
input io_cpu_sfence_bits_rs1, // @[Frontend.scala:85:14]
input io_cpu_sfence_bits_rs2, // @[Frontend.scala:85:14]
input [38:0] io_cpu_sfence_bits_addr, // @[Frontend.scala:85:14]
input io_cpu_sfence_bits_asid, // @[Frontend.scala:85:14]
input io_cpu_sfence_bits_hv, // @[Frontend.scala:85:14]
input io_cpu_sfence_bits_hg, // @[Frontend.scala:85:14]
input io_cpu_resp_ready, // @[Frontend.scala:85:14]
output io_cpu_resp_valid, // @[Frontend.scala:85:14]
output [1:0] io_cpu_resp_bits_btb_cfiType, // @[Frontend.scala:85:14]
output io_cpu_resp_bits_btb_taken, // @[Frontend.scala:85:14]
output [1:0] io_cpu_resp_bits_btb_mask, // @[Frontend.scala:85:14]
output io_cpu_resp_bits_btb_bridx, // @[Frontend.scala:85:14]
output [38:0] io_cpu_resp_bits_btb_target, // @[Frontend.scala:85:14]
output [4:0] io_cpu_resp_bits_btb_entry, // @[Frontend.scala:85:14]
output [7:0] io_cpu_resp_bits_btb_bht_history, // @[Frontend.scala:85:14]
output io_cpu_resp_bits_btb_bht_value, // @[Frontend.scala:85:14]
output [39:0] io_cpu_resp_bits_pc, // @[Frontend.scala:85:14]
output [31:0] io_cpu_resp_bits_data, // @[Frontend.scala:85:14]
output [1:0] io_cpu_resp_bits_mask, // @[Frontend.scala:85:14]
output io_cpu_resp_bits_xcpt_pf_inst, // @[Frontend.scala:85:14]
output io_cpu_resp_bits_xcpt_gf_inst, // @[Frontend.scala:85:14]
output io_cpu_resp_bits_xcpt_ae_inst, // @[Frontend.scala:85:14]
output io_cpu_resp_bits_replay, // @[Frontend.scala:85:14]
output io_cpu_gpa_valid, // @[Frontend.scala:85:14]
output [39:0] io_cpu_gpa_bits, // @[Frontend.scala:85:14]
output io_cpu_gpa_is_pte, // @[Frontend.scala:85:14]
input io_cpu_btb_update_valid, // @[Frontend.scala:85:14]
input [1:0] io_cpu_btb_update_bits_prediction_cfiType, // @[Frontend.scala:85:14]
input io_cpu_btb_update_bits_prediction_taken, // @[Frontend.scala:85:14]
input [1:0] io_cpu_btb_update_bits_prediction_mask, // @[Frontend.scala:85:14]
input io_cpu_btb_update_bits_prediction_bridx, // @[Frontend.scala:85:14]
input [38:0] io_cpu_btb_update_bits_prediction_target, // @[Frontend.scala:85:14]
input [4:0] io_cpu_btb_update_bits_prediction_entry, // @[Frontend.scala:85:14]
input [7:0] io_cpu_btb_update_bits_prediction_bht_history, // @[Frontend.scala:85:14]
input io_cpu_btb_update_bits_prediction_bht_value, // @[Frontend.scala:85:14]
input [38:0] io_cpu_btb_update_bits_pc, // @[Frontend.scala:85:14]
input [38:0] io_cpu_btb_update_bits_target, // @[Frontend.scala:85:14]
input io_cpu_btb_update_bits_isValid, // @[Frontend.scala:85:14]
input [38:0] io_cpu_btb_update_bits_br_pc, // @[Frontend.scala:85:14]
input [1:0] io_cpu_btb_update_bits_cfiType, // @[Frontend.scala:85:14]
input io_cpu_bht_update_valid, // @[Frontend.scala:85:14]
input [7:0] io_cpu_bht_update_bits_prediction_history, // @[Frontend.scala:85:14]
input io_cpu_bht_update_bits_prediction_value, // @[Frontend.scala:85:14]
input [38:0] io_cpu_bht_update_bits_pc, // @[Frontend.scala:85:14]
input io_cpu_bht_update_bits_branch, // @[Frontend.scala:85:14]
input io_cpu_bht_update_bits_taken, // @[Frontend.scala:85:14]
input io_cpu_bht_update_bits_mispredict, // @[Frontend.scala:85:14]
input io_cpu_flush_icache, // @[Frontend.scala:85:14]
output [39:0] io_cpu_npc, // @[Frontend.scala:85:14]
output io_cpu_perf_acquire, // @[Frontend.scala:85:14]
output io_cpu_perf_tlbMiss, // @[Frontend.scala:85:14]
input io_cpu_progress, // @[Frontend.scala:85:14]
input io_ptw_req_ready, // @[Frontend.scala:85:14]
output io_ptw_req_valid, // @[Frontend.scala:85:14]
output io_ptw_req_bits_valid, // @[Frontend.scala:85:14]
output [26:0] io_ptw_req_bits_bits_addr, // @[Frontend.scala:85:14]
output io_ptw_req_bits_bits_need_gpa, // @[Frontend.scala:85:14]
input io_ptw_resp_valid, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_ae_ptw, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_ae_final, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pf, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_gf, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_hr, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_hw, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_hx, // @[Frontend.scala:85:14]
input [9:0] io_ptw_resp_bits_pte_reserved_for_future, // @[Frontend.scala:85:14]
input [43:0] io_ptw_resp_bits_pte_ppn, // @[Frontend.scala:85:14]
input [1:0] io_ptw_resp_bits_pte_reserved_for_software, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_d, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_a, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_g, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_u, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_x, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_w, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_r, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_pte_v, // @[Frontend.scala:85:14]
input [1:0] io_ptw_resp_bits_level, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_homogeneous, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_gpa_valid, // @[Frontend.scala:85:14]
input [38:0] io_ptw_resp_bits_gpa_bits, // @[Frontend.scala:85:14]
input io_ptw_resp_bits_gpa_is_pte, // @[Frontend.scala:85:14]
input [3:0] io_ptw_ptbr_mode, // @[Frontend.scala:85:14]
input [43:0] io_ptw_ptbr_ppn, // @[Frontend.scala:85:14]
input io_ptw_status_debug, // @[Frontend.scala:85:14]
input io_ptw_status_cease, // @[Frontend.scala:85:14]
input io_ptw_status_wfi, // @[Frontend.scala:85:14]
input [31:0] io_ptw_status_isa, // @[Frontend.scala:85:14]
input [1:0] io_ptw_status_dprv, // @[Frontend.scala:85:14]
input io_ptw_status_dv, // @[Frontend.scala:85:14]
input [1:0] io_ptw_status_prv, // @[Frontend.scala:85:14]
input io_ptw_status_v, // @[Frontend.scala:85:14]
input io_ptw_status_sd, // @[Frontend.scala:85:14]
input io_ptw_status_mpv, // @[Frontend.scala:85:14]
input io_ptw_status_gva, // @[Frontend.scala:85:14]
input io_ptw_status_tsr, // @[Frontend.scala:85:14]
input io_ptw_status_tw, // @[Frontend.scala:85:14]
input io_ptw_status_tvm, // @[Frontend.scala:85:14]
input io_ptw_status_mxr, // @[Frontend.scala:85:14]
input io_ptw_status_sum, // @[Frontend.scala:85:14]
input io_ptw_status_mprv, // @[Frontend.scala:85:14]
input [1:0] io_ptw_status_fs, // @[Frontend.scala:85:14]
input [1:0] io_ptw_status_mpp, // @[Frontend.scala:85:14]
input io_ptw_status_spp, // @[Frontend.scala:85:14]
input io_ptw_status_mpie, // @[Frontend.scala:85:14]
input io_ptw_status_spie, // @[Frontend.scala:85:14]
input io_ptw_status_mie, // @[Frontend.scala:85:14]
input io_ptw_status_sie, // @[Frontend.scala:85:14]
input io_ptw_hstatus_spvp, // @[Frontend.scala:85:14]
input io_ptw_hstatus_spv, // @[Frontend.scala:85:14]
input io_ptw_hstatus_gva, // @[Frontend.scala:85:14]
input io_ptw_gstatus_debug, // @[Frontend.scala:85:14]
input io_ptw_gstatus_cease, // @[Frontend.scala:85:14]
input io_ptw_gstatus_wfi, // @[Frontend.scala:85:14]
input [31:0] io_ptw_gstatus_isa, // @[Frontend.scala:85:14]
input [1:0] io_ptw_gstatus_dprv, // @[Frontend.scala:85:14]
input io_ptw_gstatus_dv, // @[Frontend.scala:85:14]
input [1:0] io_ptw_gstatus_prv, // @[Frontend.scala:85:14]
input io_ptw_gstatus_v, // @[Frontend.scala:85:14]
input io_ptw_gstatus_sd, // @[Frontend.scala:85:14]
input [22:0] io_ptw_gstatus_zero2, // @[Frontend.scala:85:14]
input io_ptw_gstatus_mpv, // @[Frontend.scala:85:14]
input io_ptw_gstatus_gva, // @[Frontend.scala:85:14]
input io_ptw_gstatus_mbe, // @[Frontend.scala:85:14]
input io_ptw_gstatus_sbe, // @[Frontend.scala:85:14]
input [1:0] io_ptw_gstatus_sxl, // @[Frontend.scala:85:14]
input [7:0] io_ptw_gstatus_zero1, // @[Frontend.scala:85:14]
input io_ptw_gstatus_tsr, // @[Frontend.scala:85:14]
input io_ptw_gstatus_tw, // @[Frontend.scala:85:14]
input io_ptw_gstatus_tvm, // @[Frontend.scala:85:14]
input io_ptw_gstatus_mxr, // @[Frontend.scala:85:14]
input io_ptw_gstatus_sum, // @[Frontend.scala:85:14]
input io_ptw_gstatus_mprv, // @[Frontend.scala:85:14]
input [1:0] io_ptw_gstatus_fs, // @[Frontend.scala:85:14]
input [1:0] io_ptw_gstatus_mpp, // @[Frontend.scala:85:14]
input [1:0] io_ptw_gstatus_vs, // @[Frontend.scala:85:14]
input io_ptw_gstatus_spp, // @[Frontend.scala:85:14]
input io_ptw_gstatus_mpie, // @[Frontend.scala:85:14]
input io_ptw_gstatus_ube, // @[Frontend.scala:85:14]
input io_ptw_gstatus_spie, // @[Frontend.scala:85:14]
input io_ptw_gstatus_upie, // @[Frontend.scala:85:14]
input io_ptw_gstatus_mie, // @[Frontend.scala:85:14]
input io_ptw_gstatus_hie, // @[Frontend.scala:85:14]
input io_ptw_gstatus_sie, // @[Frontend.scala:85:14]
input io_ptw_gstatus_uie, // @[Frontend.scala:85:14]
input io_ptw_pmp_0_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_0_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_0_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_0_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_0_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_0_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_0_mask, // @[Frontend.scala:85:14]
input io_ptw_pmp_1_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_1_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_1_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_1_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_1_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_1_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_1_mask, // @[Frontend.scala:85:14]
input io_ptw_pmp_2_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_2_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_2_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_2_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_2_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_2_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_2_mask, // @[Frontend.scala:85:14]
input io_ptw_pmp_3_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_3_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_3_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_3_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_3_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_3_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_3_mask, // @[Frontend.scala:85:14]
input io_ptw_pmp_4_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_4_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_4_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_4_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_4_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_4_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_4_mask, // @[Frontend.scala:85:14]
input io_ptw_pmp_5_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_5_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_5_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_5_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_5_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_5_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_5_mask, // @[Frontend.scala:85:14]
input io_ptw_pmp_6_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_6_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_6_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_6_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_6_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_6_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_6_mask, // @[Frontend.scala:85:14]
input io_ptw_pmp_7_cfg_l, // @[Frontend.scala:85:14]
input [1:0] io_ptw_pmp_7_cfg_a, // @[Frontend.scala:85:14]
input io_ptw_pmp_7_cfg_x, // @[Frontend.scala:85:14]
input io_ptw_pmp_7_cfg_w, // @[Frontend.scala:85:14]
input io_ptw_pmp_7_cfg_r, // @[Frontend.scala:85:14]
input [29:0] io_ptw_pmp_7_addr, // @[Frontend.scala:85:14]
input [31:0] io_ptw_pmp_7_mask, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_0_ren, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_0_wen, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_0_wdata, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_0_value, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_1_ren, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_1_wen, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_1_wdata, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_1_value, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_2_ren, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_2_wen, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_2_wdata, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_2_value, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_3_ren, // @[Frontend.scala:85:14]
input io_ptw_customCSRs_csrs_3_wen, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_3_wdata, // @[Frontend.scala:85:14]
input [63:0] io_ptw_customCSRs_csrs_3_value // @[Frontend.scala:85:14]
);
wire [1:0] btb_io_ras_update_bits_cfiType; // @[Frontend.scala:270:25, :274:40]
wire _btb_io_resp_valid; // @[Frontend.scala:198:21]
wire [1:0] _btb_io_resp_bits_cfiType; // @[Frontend.scala:198:21]
wire _btb_io_resp_bits_taken; // @[Frontend.scala:198:21]
wire [1:0] _btb_io_resp_bits_mask; // @[Frontend.scala:198:21]
wire _btb_io_resp_bits_bridx; // @[Frontend.scala:198:21]
wire [38:0] _btb_io_resp_bits_target; // @[Frontend.scala:198:21]
wire [4:0] _btb_io_resp_bits_entry; // @[Frontend.scala:198:21]
wire [7:0] _btb_io_resp_bits_bht_history; // @[Frontend.scala:198:21]
wire _btb_io_resp_bits_bht_value; // @[Frontend.scala:198:21]
wire _btb_io_ras_head_valid; // @[Frontend.scala:198:21]
wire [38:0] _btb_io_ras_head_bits; // @[Frontend.scala:198:21]
wire _tlb_io_req_ready; // @[Frontend.scala:105:19]
wire _tlb_io_resp_miss; // @[Frontend.scala:105:19]
wire [31:0] _tlb_io_resp_paddr; // @[Frontend.scala:105:19]
wire [39:0] _tlb_io_resp_gpa; // @[Frontend.scala:105:19]
wire _tlb_io_resp_pf_ld; // @[Frontend.scala:105:19]
wire _tlb_io_resp_pf_inst; // @[Frontend.scala:105:19]
wire _tlb_io_resp_ae_ld; // @[Frontend.scala:105:19]
wire _tlb_io_resp_ae_inst; // @[Frontend.scala:105:19]
wire _tlb_io_resp_ma_ld; // @[Frontend.scala:105:19]
wire _tlb_io_resp_cacheable; // @[Frontend.scala:105:19]
wire _tlb_io_resp_prefetchable; // @[Frontend.scala:105:19]
wire _fq_io_enq_ready; // @[Frontend.scala:91:64]
wire [4:0] _fq_io_mask; // @[Frontend.scala:91:64]
wire _icache_io_resp_valid; // @[Frontend.scala:70:26]
wire [31:0] _icache_io_resp_bits_data; // @[Frontend.scala:70:26]
wire _icache_io_resp_bits_ae; // @[Frontend.scala:70:26]
wire auto_icache_master_out_a_ready_0 = auto_icache_master_out_a_ready; // @[Frontend.scala:82:7]
wire auto_icache_master_out_d_valid_0 = auto_icache_master_out_d_valid; // @[Frontend.scala:82:7]
wire [2:0] auto_icache_master_out_d_bits_opcode_0 = auto_icache_master_out_d_bits_opcode; // @[Frontend.scala:82:7]
wire [1:0] auto_icache_master_out_d_bits_param_0 = auto_icache_master_out_d_bits_param; // @[Frontend.scala:82:7]
wire [3:0] auto_icache_master_out_d_bits_size_0 = auto_icache_master_out_d_bits_size; // @[Frontend.scala:82:7]
wire [2:0] auto_icache_master_out_d_bits_sink_0 = auto_icache_master_out_d_bits_sink; // @[Frontend.scala:82:7]
wire auto_icache_master_out_d_bits_denied_0 = auto_icache_master_out_d_bits_denied; // @[Frontend.scala:82:7]
wire [63:0] auto_icache_master_out_d_bits_data_0 = auto_icache_master_out_d_bits_data; // @[Frontend.scala:82:7]
wire auto_icache_master_out_d_bits_corrupt_0 = auto_icache_master_out_d_bits_corrupt; // @[Frontend.scala:82:7]
wire io_cpu_might_request_0 = io_cpu_might_request; // @[Frontend.scala:82:7]
wire io_cpu_req_valid_0 = io_cpu_req_valid; // @[Frontend.scala:82:7]
wire [39:0] io_cpu_req_bits_pc_0 = io_cpu_req_bits_pc; // @[Frontend.scala:82:7]
wire io_cpu_req_bits_speculative_0 = io_cpu_req_bits_speculative; // @[Frontend.scala:82:7]
wire io_cpu_sfence_valid_0 = io_cpu_sfence_valid; // @[Frontend.scala:82:7]
wire io_cpu_sfence_bits_rs1_0 = io_cpu_sfence_bits_rs1; // @[Frontend.scala:82:7]
wire io_cpu_sfence_bits_rs2_0 = io_cpu_sfence_bits_rs2; // @[Frontend.scala:82:7]
wire [38:0] io_cpu_sfence_bits_addr_0 = io_cpu_sfence_bits_addr; // @[Frontend.scala:82:7]
wire io_cpu_sfence_bits_asid_0 = io_cpu_sfence_bits_asid; // @[Frontend.scala:82:7]
wire io_cpu_sfence_bits_hv_0 = io_cpu_sfence_bits_hv; // @[Frontend.scala:82:7]
wire io_cpu_sfence_bits_hg_0 = io_cpu_sfence_bits_hg; // @[Frontend.scala:82:7]
wire io_cpu_resp_ready_0 = io_cpu_resp_ready; // @[Frontend.scala:82:7]
wire io_cpu_btb_update_valid_0 = io_cpu_btb_update_valid; // @[Frontend.scala:82:7]
wire [1:0] io_cpu_btb_update_bits_prediction_cfiType_0 = io_cpu_btb_update_bits_prediction_cfiType; // @[Frontend.scala:82:7]
wire io_cpu_btb_update_bits_prediction_taken_0 = io_cpu_btb_update_bits_prediction_taken; // @[Frontend.scala:82:7]
wire [1:0] io_cpu_btb_update_bits_prediction_mask_0 = io_cpu_btb_update_bits_prediction_mask; // @[Frontend.scala:82:7]
wire io_cpu_btb_update_bits_prediction_bridx_0 = io_cpu_btb_update_bits_prediction_bridx; // @[Frontend.scala:82:7]
wire [38:0] io_cpu_btb_update_bits_prediction_target_0 = io_cpu_btb_update_bits_prediction_target; // @[Frontend.scala:82:7]
wire [4:0] io_cpu_btb_update_bits_prediction_entry_0 = io_cpu_btb_update_bits_prediction_entry; // @[Frontend.scala:82:7]
wire [7:0] io_cpu_btb_update_bits_prediction_bht_history_0 = io_cpu_btb_update_bits_prediction_bht_history; // @[Frontend.scala:82:7]
wire io_cpu_btb_update_bits_prediction_bht_value_0 = io_cpu_btb_update_bits_prediction_bht_value; // @[Frontend.scala:82:7]
wire [38:0] io_cpu_btb_update_bits_pc_0 = io_cpu_btb_update_bits_pc; // @[Frontend.scala:82:7]
wire [38:0] io_cpu_btb_update_bits_target_0 = io_cpu_btb_update_bits_target; // @[Frontend.scala:82:7]
wire io_cpu_btb_update_bits_isValid_0 = io_cpu_btb_update_bits_isValid; // @[Frontend.scala:82:7]
wire [38:0] io_cpu_btb_update_bits_br_pc_0 = io_cpu_btb_update_bits_br_pc; // @[Frontend.scala:82:7]
wire [1:0] io_cpu_btb_update_bits_cfiType_0 = io_cpu_btb_update_bits_cfiType; // @[Frontend.scala:82:7]
wire io_cpu_bht_update_valid_0 = io_cpu_bht_update_valid; // @[Frontend.scala:82:7]
wire [7:0] io_cpu_bht_update_bits_prediction_history_0 = io_cpu_bht_update_bits_prediction_history; // @[Frontend.scala:82:7]
wire io_cpu_bht_update_bits_prediction_value_0 = io_cpu_bht_update_bits_prediction_value; // @[Frontend.scala:82:7]
wire [38:0] io_cpu_bht_update_bits_pc_0 = io_cpu_bht_update_bits_pc; // @[Frontend.scala:82:7]
wire io_cpu_bht_update_bits_branch_0 = io_cpu_bht_update_bits_branch; // @[Frontend.scala:82:7]
wire io_cpu_bht_update_bits_taken_0 = io_cpu_bht_update_bits_taken; // @[Frontend.scala:82:7]
wire io_cpu_bht_update_bits_mispredict_0 = io_cpu_bht_update_bits_mispredict; // @[Frontend.scala:82:7]
wire io_cpu_flush_icache_0 = io_cpu_flush_icache; // @[Frontend.scala:82:7]
wire io_cpu_progress_0 = io_cpu_progress; // @[Frontend.scala:82:7]
wire io_ptw_req_ready_0 = io_ptw_req_ready; // @[Frontend.scala:82:7]
wire io_ptw_resp_valid_0 = io_ptw_resp_valid; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_ae_ptw_0 = io_ptw_resp_bits_ae_ptw; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_ae_final_0 = io_ptw_resp_bits_ae_final; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pf_0 = io_ptw_resp_bits_pf; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_gf_0 = io_ptw_resp_bits_gf; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_hr_0 = io_ptw_resp_bits_hr; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_hw_0 = io_ptw_resp_bits_hw; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_hx_0 = io_ptw_resp_bits_hx; // @[Frontend.scala:82:7]
wire [9:0] io_ptw_resp_bits_pte_reserved_for_future_0 = io_ptw_resp_bits_pte_reserved_for_future; // @[Frontend.scala:82:7]
wire [43:0] io_ptw_resp_bits_pte_ppn_0 = io_ptw_resp_bits_pte_ppn; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_resp_bits_pte_reserved_for_software_0 = io_ptw_resp_bits_pte_reserved_for_software; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_d_0 = io_ptw_resp_bits_pte_d; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_a_0 = io_ptw_resp_bits_pte_a; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_g_0 = io_ptw_resp_bits_pte_g; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_u_0 = io_ptw_resp_bits_pte_u; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_x_0 = io_ptw_resp_bits_pte_x; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_w_0 = io_ptw_resp_bits_pte_w; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_r_0 = io_ptw_resp_bits_pte_r; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_pte_v_0 = io_ptw_resp_bits_pte_v; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_resp_bits_level_0 = io_ptw_resp_bits_level; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_homogeneous_0 = io_ptw_resp_bits_homogeneous; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_gpa_valid_0 = io_ptw_resp_bits_gpa_valid; // @[Frontend.scala:82:7]
wire [38:0] io_ptw_resp_bits_gpa_bits_0 = io_ptw_resp_bits_gpa_bits; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_gpa_is_pte_0 = io_ptw_resp_bits_gpa_is_pte; // @[Frontend.scala:82:7]
wire [3:0] io_ptw_ptbr_mode_0 = io_ptw_ptbr_mode; // @[Frontend.scala:82:7]
wire [43:0] io_ptw_ptbr_ppn_0 = io_ptw_ptbr_ppn; // @[Frontend.scala:82:7]
wire io_ptw_status_debug_0 = io_ptw_status_debug; // @[Frontend.scala:82:7]
wire io_ptw_status_cease_0 = io_ptw_status_cease; // @[Frontend.scala:82:7]
wire io_ptw_status_wfi_0 = io_ptw_status_wfi; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_status_isa_0 = io_ptw_status_isa; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_dprv_0 = io_ptw_status_dprv; // @[Frontend.scala:82:7]
wire io_ptw_status_dv_0 = io_ptw_status_dv; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_prv_0 = io_ptw_status_prv; // @[Frontend.scala:82:7]
wire io_ptw_status_v_0 = io_ptw_status_v; // @[Frontend.scala:82:7]
wire io_ptw_status_sd_0 = io_ptw_status_sd; // @[Frontend.scala:82:7]
wire io_ptw_status_mpv_0 = io_ptw_status_mpv; // @[Frontend.scala:82:7]
wire io_ptw_status_gva_0 = io_ptw_status_gva; // @[Frontend.scala:82:7]
wire io_ptw_status_tsr_0 = io_ptw_status_tsr; // @[Frontend.scala:82:7]
wire io_ptw_status_tw_0 = io_ptw_status_tw; // @[Frontend.scala:82:7]
wire io_ptw_status_tvm_0 = io_ptw_status_tvm; // @[Frontend.scala:82:7]
wire io_ptw_status_mxr_0 = io_ptw_status_mxr; // @[Frontend.scala:82:7]
wire io_ptw_status_sum_0 = io_ptw_status_sum; // @[Frontend.scala:82:7]
wire io_ptw_status_mprv_0 = io_ptw_status_mprv; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_fs_0 = io_ptw_status_fs; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_mpp_0 = io_ptw_status_mpp; // @[Frontend.scala:82:7]
wire io_ptw_status_spp_0 = io_ptw_status_spp; // @[Frontend.scala:82:7]
wire io_ptw_status_mpie_0 = io_ptw_status_mpie; // @[Frontend.scala:82:7]
wire io_ptw_status_spie_0 = io_ptw_status_spie; // @[Frontend.scala:82:7]
wire io_ptw_status_mie_0 = io_ptw_status_mie; // @[Frontend.scala:82:7]
wire io_ptw_status_sie_0 = io_ptw_status_sie; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_spvp_0 = io_ptw_hstatus_spvp; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_spv_0 = io_ptw_hstatus_spv; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_gva_0 = io_ptw_hstatus_gva; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_debug_0 = io_ptw_gstatus_debug; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_cease_0 = io_ptw_gstatus_cease; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_wfi_0 = io_ptw_gstatus_wfi; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_gstatus_isa_0 = io_ptw_gstatus_isa; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_dprv_0 = io_ptw_gstatus_dprv; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_dv_0 = io_ptw_gstatus_dv; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_prv_0 = io_ptw_gstatus_prv; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_v_0 = io_ptw_gstatus_v; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_sd_0 = io_ptw_gstatus_sd; // @[Frontend.scala:82:7]
wire [22:0] io_ptw_gstatus_zero2_0 = io_ptw_gstatus_zero2; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_mpv_0 = io_ptw_gstatus_mpv; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_gva_0 = io_ptw_gstatus_gva; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_mbe_0 = io_ptw_gstatus_mbe; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_sbe_0 = io_ptw_gstatus_sbe; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_sxl_0 = io_ptw_gstatus_sxl; // @[Frontend.scala:82:7]
wire [7:0] io_ptw_gstatus_zero1_0 = io_ptw_gstatus_zero1; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_tsr_0 = io_ptw_gstatus_tsr; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_tw_0 = io_ptw_gstatus_tw; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_tvm_0 = io_ptw_gstatus_tvm; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_mxr_0 = io_ptw_gstatus_mxr; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_sum_0 = io_ptw_gstatus_sum; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_mprv_0 = io_ptw_gstatus_mprv; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_fs_0 = io_ptw_gstatus_fs; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_mpp_0 = io_ptw_gstatus_mpp; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_vs_0 = io_ptw_gstatus_vs; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_spp_0 = io_ptw_gstatus_spp; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_mpie_0 = io_ptw_gstatus_mpie; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_ube_0 = io_ptw_gstatus_ube; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_spie_0 = io_ptw_gstatus_spie; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_upie_0 = io_ptw_gstatus_upie; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_mie_0 = io_ptw_gstatus_mie; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_hie_0 = io_ptw_gstatus_hie; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_sie_0 = io_ptw_gstatus_sie; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_uie_0 = io_ptw_gstatus_uie; // @[Frontend.scala:82:7]
wire io_ptw_pmp_0_cfg_l_0 = io_ptw_pmp_0_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_0_cfg_a_0 = io_ptw_pmp_0_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_0_cfg_x_0 = io_ptw_pmp_0_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_0_cfg_w_0 = io_ptw_pmp_0_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_0_cfg_r_0 = io_ptw_pmp_0_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_0_addr_0 = io_ptw_pmp_0_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_0_mask_0 = io_ptw_pmp_0_mask; // @[Frontend.scala:82:7]
wire io_ptw_pmp_1_cfg_l_0 = io_ptw_pmp_1_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_1_cfg_a_0 = io_ptw_pmp_1_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_1_cfg_x_0 = io_ptw_pmp_1_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_1_cfg_w_0 = io_ptw_pmp_1_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_1_cfg_r_0 = io_ptw_pmp_1_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_1_addr_0 = io_ptw_pmp_1_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_1_mask_0 = io_ptw_pmp_1_mask; // @[Frontend.scala:82:7]
wire io_ptw_pmp_2_cfg_l_0 = io_ptw_pmp_2_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_2_cfg_a_0 = io_ptw_pmp_2_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_2_cfg_x_0 = io_ptw_pmp_2_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_2_cfg_w_0 = io_ptw_pmp_2_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_2_cfg_r_0 = io_ptw_pmp_2_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_2_addr_0 = io_ptw_pmp_2_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_2_mask_0 = io_ptw_pmp_2_mask; // @[Frontend.scala:82:7]
wire io_ptw_pmp_3_cfg_l_0 = io_ptw_pmp_3_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_3_cfg_a_0 = io_ptw_pmp_3_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_3_cfg_x_0 = io_ptw_pmp_3_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_3_cfg_w_0 = io_ptw_pmp_3_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_3_cfg_r_0 = io_ptw_pmp_3_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_3_addr_0 = io_ptw_pmp_3_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_3_mask_0 = io_ptw_pmp_3_mask; // @[Frontend.scala:82:7]
wire io_ptw_pmp_4_cfg_l_0 = io_ptw_pmp_4_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_4_cfg_a_0 = io_ptw_pmp_4_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_4_cfg_x_0 = io_ptw_pmp_4_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_4_cfg_w_0 = io_ptw_pmp_4_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_4_cfg_r_0 = io_ptw_pmp_4_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_4_addr_0 = io_ptw_pmp_4_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_4_mask_0 = io_ptw_pmp_4_mask; // @[Frontend.scala:82:7]
wire io_ptw_pmp_5_cfg_l_0 = io_ptw_pmp_5_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_5_cfg_a_0 = io_ptw_pmp_5_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_5_cfg_x_0 = io_ptw_pmp_5_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_5_cfg_w_0 = io_ptw_pmp_5_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_5_cfg_r_0 = io_ptw_pmp_5_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_5_addr_0 = io_ptw_pmp_5_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_5_mask_0 = io_ptw_pmp_5_mask; // @[Frontend.scala:82:7]
wire io_ptw_pmp_6_cfg_l_0 = io_ptw_pmp_6_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_6_cfg_a_0 = io_ptw_pmp_6_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_6_cfg_x_0 = io_ptw_pmp_6_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_6_cfg_w_0 = io_ptw_pmp_6_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_6_cfg_r_0 = io_ptw_pmp_6_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_6_addr_0 = io_ptw_pmp_6_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_6_mask_0 = io_ptw_pmp_6_mask; // @[Frontend.scala:82:7]
wire io_ptw_pmp_7_cfg_l_0 = io_ptw_pmp_7_cfg_l; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_7_cfg_a_0 = io_ptw_pmp_7_cfg_a; // @[Frontend.scala:82:7]
wire io_ptw_pmp_7_cfg_x_0 = io_ptw_pmp_7_cfg_x; // @[Frontend.scala:82:7]
wire io_ptw_pmp_7_cfg_w_0 = io_ptw_pmp_7_cfg_w; // @[Frontend.scala:82:7]
wire io_ptw_pmp_7_cfg_r_0 = io_ptw_pmp_7_cfg_r; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_pmp_7_addr_0 = io_ptw_pmp_7_addr; // @[Frontend.scala:82:7]
wire [31:0] io_ptw_pmp_7_mask_0 = io_ptw_pmp_7_mask; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_0_ren_0 = io_ptw_customCSRs_csrs_0_ren; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_0_wen_0 = io_ptw_customCSRs_csrs_0_wen; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_0_wdata_0 = io_ptw_customCSRs_csrs_0_wdata; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_0_value_0 = io_ptw_customCSRs_csrs_0_value; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_1_ren_0 = io_ptw_customCSRs_csrs_1_ren; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_1_wen_0 = io_ptw_customCSRs_csrs_1_wen; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_1_wdata_0 = io_ptw_customCSRs_csrs_1_wdata; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_1_value_0 = io_ptw_customCSRs_csrs_1_value; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_2_ren_0 = io_ptw_customCSRs_csrs_2_ren; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_2_wen_0 = io_ptw_customCSRs_csrs_2_wen; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_2_wdata_0 = io_ptw_customCSRs_csrs_2_wdata; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_2_value_0 = io_ptw_customCSRs_csrs_2_value; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_3_ren_0 = io_ptw_customCSRs_csrs_3_ren; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_3_wen_0 = io_ptw_customCSRs_csrs_3_wen; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_3_wdata_0 = io_ptw_customCSRs_csrs_3_wdata; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_3_value_0 = io_ptw_customCSRs_csrs_3_value; // @[Frontend.scala:82:7]
wire auto_icache_master_out_d_ready = 1'h1; // @[Frontend.scala:82:7]
wire io_cpu_clock_enabled = 1'h1; // @[Frontend.scala:82:7]
wire clock_en = 1'h1; // @[Frontend.scala:94:31]
wire _taken_rviImm_b19_12_T = 1'h1; // @[RocketCore.scala:1343:26]
wire _taken_rviImm_b11_T_3 = 1'h1; // @[RocketCore.scala:1345:23]
wire _taken_rviImm_b19_12_T_5 = 1'h1; // @[RocketCore.scala:1343:26]
wire _taken_rviImm_b19_12_T_6 = 1'h1; // @[RocketCore.scala:1343:43]
wire _taken_rviImm_b19_12_T_7 = 1'h1; // @[RocketCore.scala:1343:36]
wire _taken_rviImm_b11_T_17 = 1'h1; // @[RocketCore.scala:1346:23]
wire _taken_rviImm_b4_1_T_12 = 1'h1; // @[RocketCore.scala:1349:41]
wire _taken_rviImm_b4_1_T_13 = 1'h1; // @[RocketCore.scala:1349:34]
wire _taken_T_6 = 1'h1; // @[Frontend.scala:270:13]
wire _taken_btb_io_ras_update_bits_cfiType_T_3 = 1'h1; // @[Frontend.scala:276:85]
wire _taken_rviImm_b19_12_T_10 = 1'h1; // @[RocketCore.scala:1343:26]
wire _taken_rviImm_b11_T_25 = 1'h1; // @[RocketCore.scala:1345:23]
wire _taken_rviImm_b19_12_T_15 = 1'h1; // @[RocketCore.scala:1343:26]
wire _taken_rviImm_b19_12_T_16 = 1'h1; // @[RocketCore.scala:1343:43]
wire _taken_rviImm_b19_12_T_17 = 1'h1; // @[RocketCore.scala:1343:36]
wire _taken_rviImm_b11_T_39 = 1'h1; // @[RocketCore.scala:1346:23]
wire _taken_rviImm_b4_1_T_32 = 1'h1; // @[RocketCore.scala:1349:41]
wire _taken_rviImm_b4_1_T_33 = 1'h1; // @[RocketCore.scala:1349:34]
wire _taken_btb_io_ras_update_bits_cfiType_T_11 = 1'h1; // @[Frontend.scala:276:85]
wire _clock_en_reg_T = 1'h1; // @[Frontend.scala:376:19]
wire _clock_en_reg_T_1 = 1'h1; // @[Frontend.scala:376:45]
wire _clock_en_reg_T_2 = 1'h1; // @[Frontend.scala:377:26]
wire _clock_en_reg_T_3 = 1'h1; // @[Frontend.scala:378:34]
wire _clock_en_reg_T_4 = 1'h1; // @[Frontend.scala:379:14]
wire _clock_en_reg_T_6 = 1'h1; // @[Frontend.scala:379:26]
wire _clock_en_reg_T_9 = 1'h1; // @[Frontend.scala:380:23]
wire auto_icache_master_out_a_bits_source = 1'h0; // @[Frontend.scala:82:7]
wire auto_icache_master_out_a_bits_corrupt = 1'h0; // @[Frontend.scala:82:7]
wire auto_icache_master_out_d_bits_source = 1'h0; // @[Frontend.scala:82:7]
wire io_cpu_btb_update_bits_taken = 1'h0; // @[Frontend.scala:82:7]
wire io_cpu_ras_update_valid = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_req_bits_bits_vstage1 = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_req_bits_bits_stage2 = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_resp_bits_fragmented_superpage = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_status_mbe = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_status_sbe = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_status_sd_rv32 = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_status_ube = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_status_upie = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_status_hie = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_status_uie = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_vtsr = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_vtw = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_vtvm = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_hu = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_hstatus_vsbe = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_gstatus_sd_rv32 = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_0_stall = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_0_set = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_1_stall = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_1_set = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_2_stall = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_2_set = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_3_stall = 1'h0; // @[Frontend.scala:82:7]
wire io_ptw_customCSRs_csrs_3_set = 1'h0; // @[Frontend.scala:82:7]
wire taken_rvcJAL = 1'h0; // @[Frontend.scala:245:35]
wire _taken_rviImm_sign_T = 1'h0; // @[RocketCore.scala:1341:24]
wire _taken_rviImm_b30_20_T = 1'h0; // @[RocketCore.scala:1342:26]
wire _taken_rviImm_b19_12_T_1 = 1'h0; // @[RocketCore.scala:1343:43]
wire _taken_rviImm_b19_12_T_2 = 1'h0; // @[RocketCore.scala:1343:36]
wire _taken_rviImm_b11_T = 1'h0; // @[RocketCore.scala:1344:23]
wire _taken_rviImm_b11_T_1 = 1'h0; // @[RocketCore.scala:1344:40]
wire _taken_rviImm_b11_T_2 = 1'h0; // @[RocketCore.scala:1344:33]
wire _taken_rviImm_b11_T_6 = 1'h0; // @[RocketCore.scala:1346:23]
wire _taken_rviImm_b10_5_T = 1'h0; // @[RocketCore.scala:1347:25]
wire _taken_rviImm_b10_5_T_1 = 1'h0; // @[RocketCore.scala:1347:42]
wire _taken_rviImm_b10_5_T_2 = 1'h0; // @[RocketCore.scala:1347:35]
wire _taken_rviImm_b4_1_T = 1'h0; // @[RocketCore.scala:1348:24]
wire _taken_rviImm_b4_1_T_1 = 1'h0; // @[RocketCore.scala:1349:24]
wire _taken_rviImm_b4_1_T_2 = 1'h0; // @[RocketCore.scala:1349:41]
wire _taken_rviImm_b4_1_T_3 = 1'h0; // @[RocketCore.scala:1349:34]
wire _taken_rviImm_b4_1_T_5 = 1'h0; // @[RocketCore.scala:1350:24]
wire _taken_rviImm_b0_T = 1'h0; // @[RocketCore.scala:1351:22]
wire _taken_rviImm_b0_T_2 = 1'h0; // @[RocketCore.scala:1352:22]
wire _taken_rviImm_b0_T_4 = 1'h0; // @[RocketCore.scala:1353:22]
wire _taken_rviImm_b0_T_6 = 1'h0; // @[RocketCore.scala:1353:17]
wire _taken_rviImm_b0_T_7 = 1'h0; // @[RocketCore.scala:1352:17]
wire taken_rviImm_b0 = 1'h0; // @[RocketCore.scala:1351:17]
wire _taken_rviImm_sign_T_3 = 1'h0; // @[RocketCore.scala:1341:24]
wire _taken_rviImm_b30_20_T_3 = 1'h0; // @[RocketCore.scala:1342:26]
wire _taken_rviImm_b11_T_11 = 1'h0; // @[RocketCore.scala:1344:23]
wire _taken_rviImm_b11_T_12 = 1'h0; // @[RocketCore.scala:1344:40]
wire _taken_rviImm_b11_T_13 = 1'h0; // @[RocketCore.scala:1344:33]
wire _taken_rviImm_b11_T_14 = 1'h0; // @[RocketCore.scala:1345:23]
wire _taken_rviImm_b10_5_T_4 = 1'h0; // @[RocketCore.scala:1347:25]
wire _taken_rviImm_b10_5_T_5 = 1'h0; // @[RocketCore.scala:1347:42]
wire _taken_rviImm_b10_5_T_6 = 1'h0; // @[RocketCore.scala:1347:35]
wire _taken_rviImm_b4_1_T_10 = 1'h0; // @[RocketCore.scala:1348:24]
wire _taken_rviImm_b4_1_T_11 = 1'h0; // @[RocketCore.scala:1349:24]
wire _taken_rviImm_b4_1_T_15 = 1'h0; // @[RocketCore.scala:1350:24]
wire _taken_rviImm_b0_T_8 = 1'h0; // @[RocketCore.scala:1351:22]
wire _taken_rviImm_b0_T_10 = 1'h0; // @[RocketCore.scala:1352:22]
wire _taken_rviImm_b0_T_12 = 1'h0; // @[RocketCore.scala:1353:22]
wire _taken_rviImm_b0_T_14 = 1'h0; // @[RocketCore.scala:1353:17]
wire _taken_rviImm_b0_T_15 = 1'h0; // @[RocketCore.scala:1352:17]
wire taken_rviImm_b0_1 = 1'h0; // @[RocketCore.scala:1351:17]
wire taken_rvcJAL_1 = 1'h0; // @[Frontend.scala:245:35]
wire _taken_rviImm_sign_T_6 = 1'h0; // @[RocketCore.scala:1341:24]
wire _taken_rviImm_b30_20_T_6 = 1'h0; // @[RocketCore.scala:1342:26]
wire _taken_rviImm_b19_12_T_11 = 1'h0; // @[RocketCore.scala:1343:43]
wire _taken_rviImm_b19_12_T_12 = 1'h0; // @[RocketCore.scala:1343:36]
wire _taken_rviImm_b11_T_22 = 1'h0; // @[RocketCore.scala:1344:23]
wire _taken_rviImm_b11_T_23 = 1'h0; // @[RocketCore.scala:1344:40]
wire _taken_rviImm_b11_T_24 = 1'h0; // @[RocketCore.scala:1344:33]
wire _taken_rviImm_b11_T_28 = 1'h0; // @[RocketCore.scala:1346:23]
wire _taken_rviImm_b10_5_T_8 = 1'h0; // @[RocketCore.scala:1347:25]
wire _taken_rviImm_b10_5_T_9 = 1'h0; // @[RocketCore.scala:1347:42]
wire _taken_rviImm_b10_5_T_10 = 1'h0; // @[RocketCore.scala:1347:35]
wire _taken_rviImm_b4_1_T_20 = 1'h0; // @[RocketCore.scala:1348:24]
wire _taken_rviImm_b4_1_T_21 = 1'h0; // @[RocketCore.scala:1349:24]
wire _taken_rviImm_b4_1_T_22 = 1'h0; // @[RocketCore.scala:1349:41]
wire _taken_rviImm_b4_1_T_23 = 1'h0; // @[RocketCore.scala:1349:34]
wire _taken_rviImm_b4_1_T_25 = 1'h0; // @[RocketCore.scala:1350:24]
wire _taken_rviImm_b0_T_16 = 1'h0; // @[RocketCore.scala:1351:22]
wire _taken_rviImm_b0_T_18 = 1'h0; // @[RocketCore.scala:1352:22]
wire _taken_rviImm_b0_T_20 = 1'h0; // @[RocketCore.scala:1353:22]
wire _taken_rviImm_b0_T_22 = 1'h0; // @[RocketCore.scala:1353:17]
wire _taken_rviImm_b0_T_23 = 1'h0; // @[RocketCore.scala:1352:17]
wire taken_rviImm_b0_2 = 1'h0; // @[RocketCore.scala:1351:17]
wire _taken_rviImm_sign_T_9 = 1'h0; // @[RocketCore.scala:1341:24]
wire _taken_rviImm_b30_20_T_9 = 1'h0; // @[RocketCore.scala:1342:26]
wire _taken_rviImm_b11_T_33 = 1'h0; // @[RocketCore.scala:1344:23]
wire _taken_rviImm_b11_T_34 = 1'h0; // @[RocketCore.scala:1344:40]
wire _taken_rviImm_b11_T_35 = 1'h0; // @[RocketCore.scala:1344:33]
wire _taken_rviImm_b11_T_36 = 1'h0; // @[RocketCore.scala:1345:23]
wire _taken_rviImm_b10_5_T_12 = 1'h0; // @[RocketCore.scala:1347:25]
wire _taken_rviImm_b10_5_T_13 = 1'h0; // @[RocketCore.scala:1347:42]
wire _taken_rviImm_b10_5_T_14 = 1'h0; // @[RocketCore.scala:1347:35]
wire _taken_rviImm_b4_1_T_30 = 1'h0; // @[RocketCore.scala:1348:24]
wire _taken_rviImm_b4_1_T_31 = 1'h0; // @[RocketCore.scala:1349:24]
wire _taken_rviImm_b4_1_T_35 = 1'h0; // @[RocketCore.scala:1350:24]
wire _taken_rviImm_b0_T_24 = 1'h0; // @[RocketCore.scala:1351:22]
wire _taken_rviImm_b0_T_26 = 1'h0; // @[RocketCore.scala:1352:22]
wire _taken_rviImm_b0_T_28 = 1'h0; // @[RocketCore.scala:1353:22]
wire _taken_rviImm_b0_T_30 = 1'h0; // @[RocketCore.scala:1353:17]
wire _taken_rviImm_b0_T_31 = 1'h0; // @[RocketCore.scala:1352:17]
wire taken_rviImm_b0_3 = 1'h0; // @[RocketCore.scala:1351:17]
wire [15:0] io_ptw_ptbr_asid = 16'h0; // @[Frontend.scala:82:7]
wire [15:0] io_ptw_hgatp_asid = 16'h0; // @[Frontend.scala:82:7]
wire [15:0] io_ptw_vsatp_asid = 16'h0; // @[Frontend.scala:82:7]
wire [3:0] io_ptw_hgatp_mode = 4'h0; // @[Frontend.scala:82:7]
wire [3:0] io_ptw_vsatp_mode = 4'h0; // @[Frontend.scala:82:7]
wire [43:0] io_ptw_hgatp_ppn = 44'h0; // @[Frontend.scala:82:7]
wire [43:0] io_ptw_vsatp_ppn = 44'h0; // @[Frontend.scala:82:7]
wire [22:0] io_ptw_status_zero2 = 23'h0; // @[Frontend.scala:82:7]
wire [7:0] io_ptw_status_zero1 = 8'h0; // @[Frontend.scala:82:7]
wire [1:0] io_cpu_ras_update_bits_cfiType = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_xs = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_vs = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_hstatus_zero3 = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_hstatus_zero2 = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_xs = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_0_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_1_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_2_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_3_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_4_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_5_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_6_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_pmp_7_cfg_res = 2'h0; // @[Frontend.scala:82:7]
wire [29:0] io_ptw_hstatus_zero6 = 30'h0; // @[Frontend.scala:82:7]
wire [8:0] io_ptw_hstatus_zero5 = 9'h0; // @[Frontend.scala:82:7]
wire [5:0] io_ptw_hstatus_vgein = 6'h0; // @[Frontend.scala:82:7]
wire [4:0] io_ptw_hstatus_zero1 = 5'h0; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_sxl = 2'h2; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_status_uxl = 2'h2; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_hstatus_vsxl = 2'h2; // @[Frontend.scala:82:7]
wire [1:0] io_ptw_gstatus_uxl = 2'h2; // @[Frontend.scala:82:7]
wire [2:0] auto_icache_master_out_a_bits_opcode = 3'h4; // @[Frontend.scala:82:7]
wire [2:0] auto_icache_master_out_a_bits_param = 3'h0; // @[Frontend.scala:82:7]
wire [3:0] auto_icache_master_out_a_bits_size = 4'h6; // @[Frontend.scala:82:7]
wire [7:0] auto_icache_master_out_a_bits_mask = 8'hFF; // @[Frontend.scala:82:7]
wire [63:0] auto_icache_master_out_a_bits_data = 64'h0; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_0_sdata = 64'h0; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_1_sdata = 64'h0; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_2_sdata = 64'h0; // @[Frontend.scala:82:7]
wire [63:0] io_ptw_customCSRs_csrs_3_sdata = 64'h0; // @[Frontend.scala:82:7]
wire [31:0] auto_reset_vector_sink_in = 32'h10000; // @[Frontend.scala:82:7]
wire [31:0] resetVectorSinkNodeIn = 32'h10000; // @[MixedNode.scala:551:17]
wire [31:0] _s2_pc_T_2 = 32'h10000; // @[Frontend.scala:384:27]
wire [38:0] io_cpu_ras_update_bits_returnAddr = 39'h0; // @[Frontend.scala:82:7]
wire [31:0] _s2_pc_T = 32'hFFFEFFFF; // @[Frontend.scala:384:29]
wire [31:0] _s2_pc_T_1 = 32'hFFFEFFFF; // @[Frontend.scala:384:33]
wire [39:0] _io_cpu_npc_T_3; // @[Frontend.scala:384:27]
wire _io_cpu_perf_tlbMiss_T; // @[Decoupled.scala:51:35]
wire [31:0] auto_icache_master_out_a_bits_address_0; // @[Frontend.scala:82:7]
wire auto_icache_master_out_a_valid_0; // @[Frontend.scala:82:7]
wire [7:0] io_cpu_resp_bits_btb_bht_history_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_bits_btb_bht_value_0; // @[Frontend.scala:82:7]
wire [1:0] io_cpu_resp_bits_btb_cfiType_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_bits_btb_taken_0; // @[Frontend.scala:82:7]
wire [1:0] io_cpu_resp_bits_btb_mask_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_bits_btb_bridx_0; // @[Frontend.scala:82:7]
wire [38:0] io_cpu_resp_bits_btb_target_0; // @[Frontend.scala:82:7]
wire [4:0] io_cpu_resp_bits_btb_entry_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_bits_xcpt_pf_inst_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_bits_xcpt_gf_inst_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_bits_xcpt_ae_inst_0; // @[Frontend.scala:82:7]
wire [39:0] io_cpu_resp_bits_pc_0; // @[Frontend.scala:82:7]
wire [31:0] io_cpu_resp_bits_data_0; // @[Frontend.scala:82:7]
wire [1:0] io_cpu_resp_bits_mask_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_bits_replay_0; // @[Frontend.scala:82:7]
wire io_cpu_resp_valid_0; // @[Frontend.scala:82:7]
wire io_cpu_gpa_valid_0; // @[Frontend.scala:82:7]
wire [39:0] io_cpu_gpa_bits_0; // @[Frontend.scala:82:7]
wire io_cpu_perf_acquire_0; // @[Frontend.scala:82:7]
wire io_cpu_perf_tlbMiss_0; // @[Frontend.scala:82:7]
wire io_cpu_gpa_is_pte_0; // @[Frontend.scala:82:7]
wire [39:0] io_cpu_npc_0; // @[Frontend.scala:82:7]
wire [26:0] io_ptw_req_bits_bits_addr_0; // @[Frontend.scala:82:7]
wire io_ptw_req_bits_bits_need_gpa_0; // @[Frontend.scala:82:7]
wire io_ptw_req_bits_valid_0; // @[Frontend.scala:82:7]
wire io_ptw_req_valid_0; // @[Frontend.scala:82:7]
wire io_errors_bus_valid; // @[Frontend.scala:82:7]
wire [31:0] io_errors_bus_bits; // @[Frontend.scala:82:7]
reg s1_valid; // @[Frontend.scala:107:21]
reg s2_valid; // @[Frontend.scala:108:25]
wire _s0_fq_has_space_T = _fq_io_mask[2]; // @[Frontend.scala:91:64, :110:16]
wire _s0_fq_has_space_T_1 = ~_s0_fq_has_space_T; // @[Frontend.scala:110:{5,16}]
wire _s0_fq_has_space_T_2 = _fq_io_mask[3]; // @[Frontend.scala:91:64, :111:17]
wire _s0_fq_has_space_T_3 = ~_s0_fq_has_space_T_2; // @[Frontend.scala:111:{6,17}]
wire _s0_fq_has_space_T_4 = ~s1_valid; // @[Frontend.scala:107:21, :111:45]
wire _s0_fq_has_space_T_5 = ~s2_valid; // @[Frontend.scala:108:25, :111:58]
wire _s0_fq_has_space_T_6 = _s0_fq_has_space_T_4 | _s0_fq_has_space_T_5; // @[Frontend.scala:111:{45,55,58}]
wire _s0_fq_has_space_T_7 = _s0_fq_has_space_T_3 & _s0_fq_has_space_T_6; // @[Frontend.scala:111:{6,41,55}]
wire _s0_fq_has_space_T_8 = _s0_fq_has_space_T_1 | _s0_fq_has_space_T_7; // @[Frontend.scala:110:{5,40}, :111:41]
wire _s0_fq_has_space_T_9 = _fq_io_mask[4]; // @[Frontend.scala:91:64, :112:17]
wire _clock_en_reg_T_7 = _fq_io_mask[4]; // @[Frontend.scala:91:64, :112:17, :381:16]
wire _s0_fq_has_space_T_10 = ~_s0_fq_has_space_T_9; // @[Frontend.scala:112:{6,17}]
wire _s0_fq_has_space_T_11 = ~s1_valid; // @[Frontend.scala:107:21, :111:45, :112:45]
wire _s0_fq_has_space_T_12 = ~s2_valid; // @[Frontend.scala:108:25, :111:58, :112:58]
wire _s0_fq_has_space_T_13 = _s0_fq_has_space_T_11 & _s0_fq_has_space_T_12; // @[Frontend.scala:112:{45,55,58}]
wire _s0_fq_has_space_T_14 = _s0_fq_has_space_T_10 & _s0_fq_has_space_T_13; // @[Frontend.scala:112:{6,41,55}]
wire s0_fq_has_space = _s0_fq_has_space_T_8 | _s0_fq_has_space_T_14; // @[Frontend.scala:110:40, :111:70, :112:41]
wire s0_valid = io_cpu_req_valid_0 | s0_fq_has_space; // @[Frontend.scala:82:7, :111:70, :113:35]
reg [39:0] s1_pc; // @[Frontend.scala:115:18]
reg s1_speculative; // @[Frontend.scala:116:27]
reg [39:0] s2_pc; // @[Frontend.scala:117:22]
reg s2_btb_resp_valid; // @[Frontend.scala:118:44]
reg [1:0] s2_btb_resp_bits_cfiType; // @[Frontend.scala:119:29]
reg s2_btb_resp_bits_taken; // @[Frontend.scala:119:29]
reg [1:0] s2_btb_resp_bits_mask; // @[Frontend.scala:119:29]
reg s2_btb_resp_bits_bridx; // @[Frontend.scala:119:29]
wire _taken_T_30 = s2_btb_resp_bits_bridx; // @[Frontend.scala:119:29, :261:69]
reg [38:0] s2_btb_resp_bits_target; // @[Frontend.scala:119:29]
reg [4:0] s2_btb_resp_bits_entry; // @[Frontend.scala:119:29]
reg [7:0] s2_btb_resp_bits_bht_history; // @[Frontend.scala:119:29]
reg s2_btb_resp_bits_bht_value; // @[Frontend.scala:119:29]
wire _taken_predict_taken_T = s2_btb_resp_bits_bht_value; // @[Frontend.scala:119:29]
wire _taken_T_23 = s2_btb_resp_bits_bht_value; // @[Frontend.scala:119:29]
wire _taken_predict_taken_T_1 = s2_btb_resp_bits_bht_value; // @[Frontend.scala:119:29]
wire _taken_T_52 = s2_btb_resp_bits_bht_value; // @[Frontend.scala:119:29]
wire s2_btb_taken = s2_btb_resp_valid & s2_btb_resp_bits_taken; // @[Frontend.scala:118:44, :119:29, :120:40]
reg s2_tlb_resp_miss; // @[Frontend.scala:121:24]
reg [31:0] s2_tlb_resp_paddr; // @[Frontend.scala:121:24]
reg [39:0] s2_tlb_resp_gpa; // @[Frontend.scala:121:24]
reg s2_tlb_resp_pf_ld; // @[Frontend.scala:121:24]
reg s2_tlb_resp_pf_inst; // @[Frontend.scala:121:24]
reg s2_tlb_resp_ae_ld; // @[Frontend.scala:121:24]
reg s2_tlb_resp_ae_inst; // @[Frontend.scala:121:24]
reg s2_tlb_resp_ma_ld; // @[Frontend.scala:121:24]
reg s2_tlb_resp_cacheable; // @[Frontend.scala:121:24]
reg s2_tlb_resp_prefetchable; // @[Frontend.scala:121:24]
wire _s2_xcpt_T = s2_tlb_resp_ae_inst | s2_tlb_resp_pf_inst; // @[Frontend.scala:121:24, :122:37]
wire s2_xcpt = _s2_xcpt_T; // @[Frontend.scala:122:{37,60}]
reg s2_speculative; // @[Frontend.scala:123:31]
reg s2_partial_insn_valid; // @[Frontend.scala:124:38]
reg [15:0] s2_partial_insn; // @[Frontend.scala:125:28]
reg wrong_path; // @[Frontend.scala:126:27]
wire [39:0] _s1_base_pc_T = ~s1_pc; // @[Frontend.scala:115:18, :128:22]
wire [39:0] _s1_base_pc_T_1 = {_s1_base_pc_T[39:2], 2'h3}; // @[Frontend.scala:128:{22,29}]
wire [39:0] s1_base_pc = ~_s1_base_pc_T_1; // @[Frontend.scala:128:{20,29}]
wire [40:0] _ntpc_T = {1'h0, s1_base_pc} + 41'h4; // @[Frontend.scala:128:20, :129:25]
wire [39:0] ntpc = _ntpc_T[39:0]; // @[Frontend.scala:129:25]
wire [39:0] predicted_npc; // @[Frontend.scala:130:34]
wire predicted_taken; // @[Frontend.scala:131:36]
wire _s2_replay_T_5; // @[Frontend.scala:134:46]
wire s2_replay; // @[Frontend.scala:133:23]
wire _fq_io_enq_valid_T_6; // @[Frontend.scala:184:52]
wire _T_37 = _fq_io_enq_ready & _fq_io_enq_valid_T_6; // @[Decoupled.scala:51:35]
wire _s2_replay_T; // @[Decoupled.scala:51:35]
assign _s2_replay_T = _T_37; // @[Decoupled.scala:51:35]
wire _btb_io_btb_update_valid_T; // @[Decoupled.scala:51:35]
assign _btb_io_btb_update_valid_T = _T_37; // @[Decoupled.scala:51:35]
wire _taken_btb_io_ras_update_valid_T; // @[Decoupled.scala:51:35]
assign _taken_btb_io_ras_update_valid_T = _T_37; // @[Decoupled.scala:51:35]
wire _taken_T_8; // @[Decoupled.scala:51:35]
assign _taken_T_8 = _T_37; // @[Decoupled.scala:51:35]
wire _taken_btb_io_bht_advance_valid_T; // @[Decoupled.scala:51:35]
assign _taken_btb_io_bht_advance_valid_T = _T_37; // @[Decoupled.scala:51:35]
wire _taken_btb_io_ras_update_valid_T_9; // @[Decoupled.scala:51:35]
assign _taken_btb_io_ras_update_valid_T_9 = _T_37; // @[Decoupled.scala:51:35]
wire _taken_T_37; // @[Decoupled.scala:51:35]
assign _taken_T_37 = _T_37; // @[Decoupled.scala:51:35]
wire _taken_btb_io_bht_advance_valid_T_3; // @[Decoupled.scala:51:35]
assign _taken_btb_io_bht_advance_valid_T_3 = _T_37; // @[Decoupled.scala:51:35]
wire _taken_T_57; // @[Decoupled.scala:51:35]
assign _taken_T_57 = _T_37; // @[Decoupled.scala:51:35]
wire _s2_replay_T_1 = ~_s2_replay_T; // @[Decoupled.scala:51:35]
wire _s2_replay_T_2 = s2_valid & _s2_replay_T_1; // @[Frontend.scala:108:25, :134:{26,29}]
wire _s2_replay_T_3 = ~s0_valid; // @[Frontend.scala:113:35, :134:70]
wire _s2_replay_T_4 = s2_replay & _s2_replay_T_3; // @[Frontend.scala:133:23, :134:{67,70}]
reg s2_replay_REG; // @[Frontend.scala:134:56]
assign _s2_replay_T_5 = _s2_replay_T_2 | s2_replay_REG; // @[Frontend.scala:134:{26,46,56}]
assign s2_replay = _s2_replay_T_5; // @[Frontend.scala:133:23, :134:46]
wire [39:0] npc = s2_replay ? s2_pc : predicted_npc; // @[Frontend.scala:117:22, :130:34, :133:23, :135:16]
wire _s0_speculative_T = ~s2_speculative; // @[Frontend.scala:123:31, :141:56]
wire _s0_speculative_T_1 = s2_valid & _s0_speculative_T; // @[Frontend.scala:108:25, :141:{53,56}]
wire _s0_speculative_T_2 = s1_speculative | _s0_speculative_T_1; // @[Frontend.scala:116:27, :141:{41,53}]
wire s0_speculative = _s0_speculative_T_2 | predicted_taken; // @[Frontend.scala:131:36, :141:{41,72}]
wire _s1_speculative_T = s2_replay ? s2_speculative : s0_speculative; // @[Frontend.scala:123:31, :133:23, :141:72, :143:75]
wire _s1_speculative_T_1 = io_cpu_req_valid_0 ? io_cpu_req_bits_speculative_0 : _s1_speculative_T; // @[Frontend.scala:82:7, :143:{24,75}]
wire s2_redirect; // @[Frontend.scala:145:32]
wire _s2_valid_T = ~s2_redirect; // @[Frontend.scala:145:32, :148:17]
reg [1:0] recent_progress_counter; // @[Frontend.scala:155:40]
wire recent_progress = |recent_progress_counter; // @[Frontend.scala:155:40, :156:49]
assign _io_cpu_perf_tlbMiss_T = io_ptw_req_ready_0 & io_ptw_req_valid_0; // @[Decoupled.scala:51:35]
wire [2:0] _recent_progress_counter_T = {1'h0, recent_progress_counter} - 3'h1; // @[Frontend.scala:155:40, :157:97]
wire [1:0] _recent_progress_counter_T_1 = _recent_progress_counter_T[1:0]; // @[Frontend.scala:157:97]
wire _s2_kill_speculative_tlb_refill_T = ~recent_progress; // @[Frontend.scala:156:49, :160:58]
wire s2_kill_speculative_tlb_refill = s2_speculative & _s2_kill_speculative_tlb_refill_T; // @[Frontend.scala:123:31, :160:{55,58}]
wire _tlb_io_req_valid_T = ~s2_replay; // @[Frontend.scala:133:23, :147:9, :163:35]
wire _tlb_io_req_valid_T_1 = s1_valid & _tlb_io_req_valid_T; // @[Frontend.scala:107:21, :163:{32,35}]
wire _tlb_io_kill_T = ~s2_valid; // @[Frontend.scala:108:25, :111:58, :171:18]
wire _tlb_io_kill_T_1 = _tlb_io_kill_T | s2_kill_speculative_tlb_refill; // @[Frontend.scala:160:55, :171:{18,28}]
wire _icache_io_s1_kill_T = s2_redirect | _tlb_io_resp_miss; // @[Frontend.scala:105:19, :145:32, :178:36]
wire _icache_io_s1_kill_T_1 = _icache_io_s1_kill_T | s2_replay; // @[Frontend.scala:133:23, :178:{36,56}]
wire _s2_can_speculatively_refill_T = io_ptw_customCSRs_csrs_0_value_0[3]; // @[CustomCSRs.scala:46:69]
wire _s2_can_speculatively_refill_T_1 = ~_s2_can_speculatively_refill_T; // @[CustomCSRs.scala:46:69]
wire s2_can_speculatively_refill = s2_tlb_resp_cacheable & _s2_can_speculatively_refill_T_1; // @[Frontend.scala:121:24, :179:{59,62}]
wire _icache_io_s2_kill_T = ~s2_can_speculatively_refill; // @[Frontend.scala:179:59, :180:42]
wire _icache_io_s2_kill_T_1 = s2_speculative & _icache_io_s2_kill_T; // @[Frontend.scala:123:31, :180:{39,42}]
wire _icache_io_s2_kill_T_2 = _icache_io_s2_kill_T_1 | s2_xcpt; // @[Frontend.scala:122:60, :180:{39,71}]
wire _icache_io_s2_prefetch_T = io_ptw_customCSRs_csrs_0_value_0[17]; // @[RocketCore.scala:115:60]
wire _icache_io_s2_prefetch_T_1 = ~_icache_io_s2_prefetch_T; // @[RocketCore.scala:115:60]
wire _icache_io_s2_prefetch_T_2 = s2_tlb_resp_prefetchable & _icache_io_s2_prefetch_T_1; // @[Frontend.scala:121:24, :182:{53,56}]
reg fq_io_enq_valid_REG; // @[Frontend.scala:184:29]
wire _fq_io_enq_valid_T = fq_io_enq_valid_REG & s2_valid; // @[Frontend.scala:108:25, :184:{29,40}]
wire _GEN = s2_kill_speculative_tlb_refill & s2_tlb_resp_miss; // @[Frontend.scala:121:24, :160:55, :184:112]
wire _fq_io_enq_valid_T_1; // @[Frontend.scala:184:112]
assign _fq_io_enq_valid_T_1 = _GEN; // @[Frontend.scala:184:112]
wire _fq_io_enq_bits_replay_T_5; // @[Frontend.scala:190:150]
assign _fq_io_enq_bits_replay_T_5 = _GEN; // @[Frontend.scala:184:112, :190:150]
wire _fq_io_enq_valid_T_2 = _icache_io_resp_valid | _fq_io_enq_valid_T_1; // @[Frontend.scala:70:26, :184:{77,112}]
wire _fq_io_enq_valid_T_3 = ~s2_tlb_resp_miss; // @[Frontend.scala:121:24, :184:137]
wire _fq_io_enq_valid_T_4 = _fq_io_enq_valid_T_3 & _icache_io_s2_kill_T_2; // @[Frontend.scala:180:71, :184:{137,155}]
wire _fq_io_enq_valid_T_5 = _fq_io_enq_valid_T_2 | _fq_io_enq_valid_T_4; // @[Frontend.scala:184:{77,133,155}]
assign _fq_io_enq_valid_T_6 = _fq_io_enq_valid_T & _fq_io_enq_valid_T_5; // @[Frontend.scala:184:{40,52,133}]
wire [39:0] _io_cpu_npc_T = io_cpu_req_valid_0 ? io_cpu_req_bits_pc_0 : npc; // @[Frontend.scala:82:7, :135:16, :186:28]
wire [39:0] _io_cpu_npc_T_1 = ~_io_cpu_npc_T; // @[Frontend.scala:186:28, :384:29]
wire [39:0] _io_cpu_npc_T_2 = {_io_cpu_npc_T_1[39:1], 1'h1}; // @[Frontend.scala:384:{29,33}]
assign _io_cpu_npc_T_3 = ~_io_cpu_npc_T_2; // @[Frontend.scala:384:{27,33}]
assign io_cpu_npc_0 = _io_cpu_npc_T_3; // @[Frontend.scala:82:7, :384:27]
wire _fq_io_enq_bits_mask_T = s2_pc[1]; // @[package.scala:163:13]
wire [2:0] _fq_io_enq_bits_mask_T_1 = 3'h3 << _fq_io_enq_bits_mask_T; // @[package.scala:163:13]
wire _fq_io_enq_bits_replay_T = ~_icache_io_resp_valid; // @[Frontend.scala:70:26, :190:80]
wire _fq_io_enq_bits_replay_T_1 = _icache_io_s2_kill_T_2 & _fq_io_enq_bits_replay_T; // @[Frontend.scala:180:71, :190:{77,80}]
wire _fq_io_enq_bits_replay_T_2 = ~s2_xcpt; // @[Frontend.scala:122:60, :190:105]
wire _fq_io_enq_bits_replay_T_3 = _fq_io_enq_bits_replay_T_1 & _fq_io_enq_bits_replay_T_2; // @[Frontend.scala:190:{77,102,105}]
wire _fq_io_enq_bits_replay_T_4 = _fq_io_enq_bits_replay_T_3; // @[Frontend.scala:190:{56,102}]
wire _fq_io_enq_bits_replay_T_6 = _fq_io_enq_bits_replay_T_4 | _fq_io_enq_bits_replay_T_5; // @[Frontend.scala:190:{56,115,150}]
wire _btb_io_req_valid_T = ~s2_redirect; // @[Frontend.scala:145:32, :148:17, :209:27]
assign predicted_taken = _btb_io_resp_valid & _btb_io_resp_bits_taken; // @[Frontend.scala:131:36, :198:21, :213:29]
wire _predicted_npc_T = _btb_io_resp_bits_target[38]; // @[package.scala:132:38]
wire [39:0] _predicted_npc_T_1 = {_predicted_npc_T, _btb_io_resp_bits_target}; // @[package.scala:132:{15,38}]
wire [39:0] _s2_base_pc_T = ~s2_pc; // @[Frontend.scala:117:22, :222:24]
wire [39:0] _s2_base_pc_T_1 = {_s2_base_pc_T[39:2], 2'h3}; // @[Frontend.scala:222:{24,31}]
wire [39:0] s2_base_pc = ~_s2_base_pc_T_1; // @[Frontend.scala:222:{22,31}]
wire [39:0] taken_pc = s2_base_pc; // @[Frontend.scala:222:22, :287:33]
wire _taken_T_35; // @[Frontend.scala:270:13]
wire taken_idx; // @[Frontend.scala:223:25]
wire [1:0] after_idx; // @[Frontend.scala:224:25]
wire useRAS; // @[Frontend.scala:225:29]
wire updateBTB; // @[Frontend.scala:226:32]
wire _fetch_bubble_likely_T = _fq_io_mask[1]; // @[Frontend.scala:91:64, :318:44]
wire fetch_bubble_likely = ~_fetch_bubble_likely_T; // @[Frontend.scala:318:{33,44}]
wire _btb_io_btb_update_valid_T_1 = ~wrong_path; // @[Frontend.scala:126:27, :319:52]
wire _btb_io_btb_update_valid_T_2 = _btb_io_btb_update_valid_T & _btb_io_btb_update_valid_T_1; // @[Decoupled.scala:51:35]
wire _btb_io_btb_update_valid_T_3 = _btb_io_btb_update_valid_T_2 & fetch_bubble_likely; // @[Frontend.scala:318:33, :319:{49,64}]
wire _btb_io_btb_update_valid_T_4 = _btb_io_btb_update_valid_T_3 & updateBTB; // @[Frontend.scala:226:32, :319:{64,87}]
wire [1:0] _btb_io_btb_update_bits_br_pc_T = {taken_idx, 1'h0}; // @[Frontend.scala:223:25, :323:63]
wire [39:0] _btb_io_btb_update_bits_br_pc_T_1 = {s2_base_pc[39:2], s2_base_pc[1:0] | _btb_io_btb_update_bits_br_pc_T}; // @[Frontend.scala:222:22, :323:{50,63}]
wire [2:0] _btb_io_ras_update_bits_returnAddr_T = {after_idx, 1'h0}; // @[Frontend.scala:224:25, :327:66]
wire [40:0] _btb_io_ras_update_bits_returnAddr_T_1 = {1'h0, s2_base_pc} + {38'h0, _btb_io_ras_update_bits_returnAddr_T}; // @[Frontend.scala:129:25, :222:22, :327:{53,66}]
wire [39:0] _btb_io_ras_update_bits_returnAddr_T_2 = _btb_io_ras_update_bits_returnAddr_T_1[39:0]; // @[Frontend.scala:327:53]
wire [1:0] _taken_prevRVI_T = s2_partial_insn[1:0]; // @[Frontend.scala:125:28, :233:39]
wire _taken_prevRVI_T_1 = _taken_prevRVI_T != 2'h3; // @[Frontend.scala:233:{39,45}]
wire _taken_prevRVI_T_2 = ~_taken_prevRVI_T_1; // @[Frontend.scala:233:45, :234:34]
wire taken_prevRVI = s2_partial_insn_valid & _taken_prevRVI_T_2; // @[Frontend.scala:124:38, :234:{31,34}]
wire _taken_valid_T = _fq_io_enq_bits_mask_T_1[0]; // @[Frontend.scala:189:50, :235:38]
wire _taken_valid_T_1 = ~taken_prevRVI; // @[Frontend.scala:234:31, :235:47]
wire taken_valid = _taken_valid_T & _taken_valid_T_1; // @[Frontend.scala:235:{38,44,47}]
wire [15:0] taken_bits = _icache_io_resp_bits_data[15:0]; // @[Frontend.scala:70:26, :236:37]
wire [1:0] _taken_rvc_T = taken_bits[1:0]; // @[Frontend.scala:233:39, :236:37]
wire [1:0] _taken_prevRVI_T_3 = taken_bits[1:0]; // @[Frontend.scala:233:39, :236:37]
wire taken_rvc = _taken_rvc_T != 2'h3; // @[Frontend.scala:233:{39,45}]
wire [31:0] taken_rviBits = {taken_bits, s2_partial_insn}; // @[Frontend.scala:125:28, :236:37, :238:24]
wire [6:0] _taken_rviBranch_T = taken_rviBits[6:0]; // @[Frontend.scala:238:24, :239:30]
wire [6:0] _taken_rviJump_T = taken_rviBits[6:0]; // @[Frontend.scala:238:24, :239:30, :240:28]
wire [6:0] _taken_rviJALR_T = taken_rviBits[6:0]; // @[Frontend.scala:238:24, :239:30, :241:28]
wire taken_rviBranch = _taken_rviBranch_T == 7'h63; // @[Frontend.scala:239:{30,36}]
wire taken_rviJump = _taken_rviJump_T == 7'h6F; // @[Frontend.scala:240:{28,34}]
wire taken_rviJALR = _taken_rviJALR_T == 7'h67; // @[Frontend.scala:241:{28,34}]
wire _taken_rviReturn_T = taken_rviBits[7]; // @[Frontend.scala:238:24, :242:42]
wire _taken_rviCall_T_1 = taken_rviBits[7]; // @[Frontend.scala:238:24, :242:42, :243:52]
wire _taken_rviImm_b11_T_7 = taken_rviBits[7]; // @[RocketCore.scala:1346:39]
wire _taken_rviImm_b0_T_1 = taken_rviBits[7]; // @[RocketCore.scala:1351:37]
wire _taken_rviImm_b11_T_18 = taken_rviBits[7]; // @[RocketCore.scala:1346:39]
wire _taken_rviImm_b0_T_9 = taken_rviBits[7]; // @[RocketCore.scala:1351:37]
wire _taken_rviReturn_T_1 = ~_taken_rviReturn_T; // @[Frontend.scala:242:{34,42}]
wire _taken_rviReturn_T_2 = taken_rviJALR & _taken_rviReturn_T_1; // @[Frontend.scala:241:34, :242:{31,34}]
wire [4:0] _taken_rviReturn_T_3 = taken_rviBits[19:15]; // @[Frontend.scala:238:24, :242:77]
wire [4:0] _taken_rviReturn_T_4 = _taken_rviReturn_T_3 & 5'h1B; // @[Frontend.scala:242:{66,77}]
wire _taken_rviReturn_T_5 = _taken_rviReturn_T_4 == 5'h1; // @[Frontend.scala:242:66]
wire taken_rviReturn = _taken_rviReturn_T_2 & _taken_rviReturn_T_5; // @[Frontend.scala:242:{31,46,66}]
wire _GEN_0 = taken_rviJALR | taken_rviJump; // @[Frontend.scala:240:34, :241:34, :243:30]
wire _taken_rviCall_T; // @[Frontend.scala:243:30]
assign _taken_rviCall_T = _GEN_0; // @[Frontend.scala:243:30]
wire _taken_taken_T; // @[Frontend.scala:255:29]
assign _taken_taken_T = _GEN_0; // @[Frontend.scala:243:30, :255:29]
wire taken_rviCall = _taken_rviCall_T & _taken_rviCall_T_1; // @[Frontend.scala:243:{30,42,52}]
wire [15:0] _GEN_1 = taken_bits & 16'hE003; // @[Frontend.scala:236:37, :244:28]
wire [15:0] _taken_rvcBranch_T; // @[Frontend.scala:244:28]
assign _taken_rvcBranch_T = _GEN_1; // @[Frontend.scala:244:28]
wire [15:0] _taken_rvcBranch_T_2; // @[Frontend.scala:244:60]
assign _taken_rvcBranch_T_2 = _GEN_1; // @[Frontend.scala:244:{28,60}]
wire [15:0] _taken_rvcJAL_T; // @[Frontend.scala:245:43]
assign _taken_rvcJAL_T = _GEN_1; // @[Frontend.scala:244:28, :245:43]
wire [15:0] _taken_rvcJump_T; // @[Frontend.scala:246:26]
assign _taken_rvcJump_T = _GEN_1; // @[Frontend.scala:244:28, :246:26]
wire _taken_rvcBranch_T_1 = _taken_rvcBranch_T == 16'hC001; // @[Frontend.scala:244:28]
wire _taken_rvcBranch_T_3 = _taken_rvcBranch_T_2 == 16'hE001; // @[Frontend.scala:244:60]
wire taken_rvcBranch = _taken_rvcBranch_T_1 | _taken_rvcBranch_T_3; // @[Frontend.scala:244:{28,52,60}]
wire _taken_rvcJAL_T_1 = _taken_rvcJAL_T == 16'h2001; // @[Frontend.scala:245:43]
wire _taken_rvcJump_T_1 = _taken_rvcJump_T == 16'hA001; // @[Frontend.scala:246:26]
wire taken_rvcJump = _taken_rvcJump_T_1; // @[Frontend.scala:246:{26,47}]
wire _taken_rvcImm_T = taken_bits[14]; // @[Frontend.scala:236:37, :247:28]
wire _taken_rvcImm_T_1 = taken_bits[12]; // @[RVC.scala:45:27]
wire _taken_rvcImm_T_9 = taken_bits[12]; // @[RVC.scala:44:28, :45:27]
wire [4:0] _taken_rvcImm_T_2 = {5{_taken_rvcImm_T_1}}; // @[RVC.scala:45:{22,27}]
wire [1:0] _taken_rvcImm_T_3 = taken_bits[6:5]; // @[RVC.scala:45:35]
wire _taken_rvcImm_T_4 = taken_bits[2]; // @[RVC.scala:45:43]
wire _taken_rvcImm_T_15 = taken_bits[2]; // @[RVC.scala:44:63, :45:43]
wire [1:0] _taken_rvcImm_T_5 = taken_bits[11:10]; // @[RVC.scala:45:49]
wire [1:0] _taken_rvcImm_T_6 = taken_bits[4:3]; // @[RVC.scala:45:59]
wire [3:0] taken_rvcImm_lo_hi = {_taken_rvcImm_T_5, _taken_rvcImm_T_6}; // @[RVC.scala:45:{17,49,59}]
wire [4:0] taken_rvcImm_lo = {taken_rvcImm_lo_hi, 1'h0}; // @[RVC.scala:45:17]
wire [6:0] taken_rvcImm_hi_hi = {_taken_rvcImm_T_2, _taken_rvcImm_T_3}; // @[RVC.scala:45:{17,22,35}]
wire [7:0] taken_rvcImm_hi = {taken_rvcImm_hi_hi, _taken_rvcImm_T_4}; // @[RVC.scala:45:{17,43}]
wire [12:0] _taken_rvcImm_T_7 = {taken_rvcImm_hi, taken_rvcImm_lo}; // @[RVC.scala:45:17]
wire [12:0] _taken_rvcImm_T_8 = _taken_rvcImm_T_7; // @[RVC.scala:45:17]
wire [9:0] _taken_rvcImm_T_10 = {10{_taken_rvcImm_T_9}}; // @[RVC.scala:44:{22,28}]
wire _taken_rvcImm_T_11 = taken_bits[8]; // @[RVC.scala:44:36]
wire [1:0] _taken_rvcImm_T_12 = taken_bits[10:9]; // @[RVC.scala:44:42]
wire _taken_rvcImm_T_13 = taken_bits[6]; // @[RVC.scala:44:51]
wire _taken_rvcImm_T_14 = taken_bits[7]; // @[RVC.scala:44:57]
wire _taken_rvcImm_T_16 = taken_bits[11]; // @[RVC.scala:44:69]
wire [2:0] _taken_rvcImm_T_17 = taken_bits[5:3]; // @[RVC.scala:44:76]
wire [3:0] taken_rvcImm_lo_lo = {_taken_rvcImm_T_17, 1'h0}; // @[RVC.scala:44:{17,76}]
wire [1:0] taken_rvcImm_lo_hi_1 = {_taken_rvcImm_T_15, _taken_rvcImm_T_16}; // @[RVC.scala:44:{17,63,69}]
wire [5:0] taken_rvcImm_lo_1 = {taken_rvcImm_lo_hi_1, taken_rvcImm_lo_lo}; // @[RVC.scala:44:17]
wire [1:0] taken_rvcImm_hi_lo = {_taken_rvcImm_T_13, _taken_rvcImm_T_14}; // @[RVC.scala:44:{17,51,57}]
wire [10:0] taken_rvcImm_hi_hi_hi = {_taken_rvcImm_T_10, _taken_rvcImm_T_11}; // @[RVC.scala:44:{17,22,36}]
wire [12:0] taken_rvcImm_hi_hi_1 = {taken_rvcImm_hi_hi_hi, _taken_rvcImm_T_12}; // @[RVC.scala:44:{17,42}]
wire [14:0] taken_rvcImm_hi_1 = {taken_rvcImm_hi_hi_1, taken_rvcImm_hi_lo}; // @[RVC.scala:44:17]
wire [20:0] _taken_rvcImm_T_18 = {taken_rvcImm_hi_1, taken_rvcImm_lo_1}; // @[RVC.scala:44:17]
wire [20:0] _taken_rvcImm_T_19 = _taken_rvcImm_T_18; // @[RVC.scala:44:17]
wire [20:0] taken_rvcImm = _taken_rvcImm_T ? {{8{_taken_rvcImm_T_8[12]}}, _taken_rvcImm_T_8} : _taken_rvcImm_T_19; // @[Frontend.scala:247:{23,28,72,118}]
wire [15:0] _GEN_2 = taken_bits & 16'hF003; // @[Frontend.scala:236:37, :248:24]
wire [15:0] _taken_rvcJR_T; // @[Frontend.scala:248:24]
assign _taken_rvcJR_T = _GEN_2; // @[Frontend.scala:248:24]
wire [15:0] _taken_rvcJALR_T; // @[Frontend.scala:250:26]
assign _taken_rvcJALR_T = _GEN_2; // @[Frontend.scala:248:24, :250:26]
wire _taken_rvcJR_T_1 = _taken_rvcJR_T == 16'h8002; // @[Frontend.scala:248:24]
wire [4:0] _taken_rvcJR_T_2 = taken_bits[6:2]; // @[Frontend.scala:236:37, :248:53]
wire [4:0] _taken_rvcJALR_T_2 = taken_bits[6:2]; // @[Frontend.scala:236:37, :248:53, :250:56]
wire _taken_rvcJR_T_3 = _taken_rvcJR_T_2 == 5'h0; // @[Frontend.scala:248:{53,59}]
wire taken_rvcJR = _taken_rvcJR_T_1 & _taken_rvcJR_T_3; // @[Frontend.scala:248:{24,46,59}]
wire [4:0] _taken_rvcReturn_T = taken_bits[11:7]; // @[Frontend.scala:236:37, :249:57]
wire [4:0] _taken_rvcReturn_T_1 = _taken_rvcReturn_T & 5'h1B; // @[Frontend.scala:249:{49,57}]
wire _taken_rvcReturn_T_2 = _taken_rvcReturn_T_1 == 5'h1; // @[Frontend.scala:249:49]
wire taken_rvcReturn = taken_rvcJR & _taken_rvcReturn_T_2; // @[Frontend.scala:248:46, :249:{29,49}]
wire _taken_rvcJALR_T_1 = _taken_rvcJALR_T == 16'h9002; // @[Frontend.scala:250:26]
wire _taken_rvcJALR_T_3 = _taken_rvcJALR_T_2 == 5'h0; // @[Frontend.scala:250:{56,62}]
wire taken_rvcJALR = _taken_rvcJALR_T_1 & _taken_rvcJALR_T_3; // @[Frontend.scala:250:{26,49,62}]
wire taken_rvcCall = taken_rvcJALR; // @[Frontend.scala:250:49, :251:28]
wire _taken_rviImm_T = taken_rviBits[3]; // @[Frontend.scala:238:24, :252:31]
wire _taken_rviImm_sign_T_1 = taken_rviBits[31]; // @[RocketCore.scala:1341:44]
wire _taken_rviImm_sign_T_4 = taken_rviBits[31]; // @[RocketCore.scala:1341:44]
wire _taken_rviImm_sign_T_2 = _taken_rviImm_sign_T_1; // @[RocketCore.scala:1341:{44,49}]
wire taken_rviImm_sign = _taken_rviImm_sign_T_2; // @[RocketCore.scala:1341:{19,49}]
wire _taken_rviImm_b11_T_9 = taken_rviImm_sign; // @[RocketCore.scala:1341:19, :1346:18]
wire taken_rviImm_hi_hi_hi = taken_rviImm_sign; // @[RocketCore.scala:1341:19, :1355:8]
wire [10:0] _taken_rviImm_b30_20_T_1 = taken_rviBits[30:20]; // @[RocketCore.scala:1342:41]
wire [10:0] _taken_rviImm_b30_20_T_4 = taken_rviBits[30:20]; // @[RocketCore.scala:1342:41]
wire [10:0] _taken_rviImm_b30_20_T_2 = _taken_rviImm_b30_20_T_1; // @[RocketCore.scala:1342:{41,49}]
wire [10:0] taken_rviImm_b30_20 = {11{taken_rviImm_sign}}; // @[RocketCore.scala:1341:19, :1342:21]
wire [10:0] taken_rviImm_hi_hi_lo = taken_rviImm_b30_20; // @[RocketCore.scala:1342:21, :1355:8]
wire [7:0] _taken_rviImm_b19_12_T_3 = taken_rviBits[19:12]; // @[RocketCore.scala:1343:65]
wire [7:0] _taken_rviImm_b19_12_T_8 = taken_rviBits[19:12]; // @[RocketCore.scala:1343:65]
wire [7:0] _taken_rviImm_b19_12_T_4 = _taken_rviImm_b19_12_T_3; // @[RocketCore.scala:1343:{65,73}]
wire [7:0] taken_rviImm_b19_12 = _taken_rviImm_b19_12_T_4; // @[RocketCore.scala:1343:{21,73}]
wire [7:0] taken_rviImm_hi_lo_hi = taken_rviImm_b19_12; // @[RocketCore.scala:1343:21, :1355:8]
wire _taken_rviImm_b11_T_4 = taken_rviBits[20]; // @[RocketCore.scala:1345:39]
wire _taken_rviImm_b0_T_3 = taken_rviBits[20]; // @[RocketCore.scala:1345:39, :1352:37]
wire _taken_rviImm_b11_T_15 = taken_rviBits[20]; // @[RocketCore.scala:1345:39]
wire _taken_rviImm_b0_T_11 = taken_rviBits[20]; // @[RocketCore.scala:1345:39, :1352:37]
wire _taken_rviImm_b11_T_5 = _taken_rviImm_b11_T_4; // @[RocketCore.scala:1345:{39,44}]
wire _taken_rviImm_b11_T_10 = _taken_rviImm_b11_T_5; // @[RocketCore.scala:1345:{18,44}]
wire _taken_rviImm_b11_T_8 = _taken_rviImm_b11_T_7; // @[RocketCore.scala:1346:{39,43}]
wire taken_rviImm_b11 = _taken_rviImm_b11_T_10; // @[RocketCore.scala:1344:18, :1345:18]
wire taken_rviImm_hi_lo_lo = taken_rviImm_b11; // @[RocketCore.scala:1344:18, :1355:8]
wire [5:0] _taken_rviImm_b10_5_T_3 = taken_rviBits[30:25]; // @[RocketCore.scala:1347:62]
wire [5:0] _taken_rviImm_b10_5_T_7 = taken_rviBits[30:25]; // @[RocketCore.scala:1347:62]
wire [5:0] taken_rviImm_b10_5 = _taken_rviImm_b10_5_T_3; // @[RocketCore.scala:1347:{20,62}]
wire [3:0] _taken_rviImm_b4_1_T_4 = taken_rviBits[11:8]; // @[RocketCore.scala:1349:57]
wire [3:0] _taken_rviImm_b4_1_T_14 = taken_rviBits[11:8]; // @[RocketCore.scala:1349:57]
wire [3:0] _taken_rviImm_b4_1_T_6 = taken_rviBits[19:16]; // @[RocketCore.scala:1350:39]
wire [3:0] _taken_rviImm_b4_1_T_16 = taken_rviBits[19:16]; // @[RocketCore.scala:1350:39]
wire [3:0] _taken_rviImm_b4_1_T_7 = taken_rviBits[24:21]; // @[RocketCore.scala:1350:52]
wire [3:0] _taken_rviImm_b4_1_T_17 = taken_rviBits[24:21]; // @[RocketCore.scala:1350:52]
wire [3:0] _taken_rviImm_b4_1_T_8 = _taken_rviImm_b4_1_T_7; // @[RocketCore.scala:1350:{19,52}]
wire [3:0] _taken_rviImm_b4_1_T_9 = _taken_rviImm_b4_1_T_8; // @[RocketCore.scala:1349:19, :1350:19]
wire [3:0] taken_rviImm_b4_1 = _taken_rviImm_b4_1_T_9; // @[RocketCore.scala:1348:19, :1349:19]
wire _taken_rviImm_b0_T_5 = taken_rviBits[15]; // @[RocketCore.scala:1353:37]
wire _taken_rviImm_b0_T_13 = taken_rviBits[15]; // @[RocketCore.scala:1353:37]
wire [9:0] taken_rviImm_lo_hi = {taken_rviImm_b10_5, taken_rviImm_b4_1}; // @[RocketCore.scala:1347:20, :1348:19, :1355:8]
wire [10:0] taken_rviImm_lo = {taken_rviImm_lo_hi, 1'h0}; // @[RocketCore.scala:1355:8]
wire [8:0] taken_rviImm_hi_lo = {taken_rviImm_hi_lo_hi, taken_rviImm_hi_lo_lo}; // @[RocketCore.scala:1355:8]
wire [11:0] taken_rviImm_hi_hi = {taken_rviImm_hi_hi_hi, taken_rviImm_hi_hi_lo}; // @[RocketCore.scala:1355:8]
wire [20:0] taken_rviImm_hi = {taken_rviImm_hi_hi, taken_rviImm_hi_lo}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_1 = {taken_rviImm_hi, taken_rviImm_lo}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_2 = _taken_rviImm_T_1; // @[RocketCore.scala:1355:{8,53}]
wire _taken_rviImm_sign_T_5 = _taken_rviImm_sign_T_4; // @[RocketCore.scala:1341:{44,49}]
wire taken_rviImm_sign_1 = _taken_rviImm_sign_T_5; // @[RocketCore.scala:1341:{19,49}]
wire taken_rviImm_hi_hi_hi_1 = taken_rviImm_sign_1; // @[RocketCore.scala:1341:19, :1355:8]
wire [10:0] _taken_rviImm_b30_20_T_5 = _taken_rviImm_b30_20_T_4; // @[RocketCore.scala:1342:{41,49}]
wire [10:0] taken_rviImm_b30_20_1 = {11{taken_rviImm_sign_1}}; // @[RocketCore.scala:1341:19, :1342:21]
wire [10:0] taken_rviImm_hi_hi_lo_1 = taken_rviImm_b30_20_1; // @[RocketCore.scala:1342:21, :1355:8]
wire [7:0] _taken_rviImm_b19_12_T_9 = _taken_rviImm_b19_12_T_8; // @[RocketCore.scala:1343:{65,73}]
wire [7:0] taken_rviImm_b19_12_1 = {8{taken_rviImm_sign_1}}; // @[RocketCore.scala:1341:19, :1343:21]
wire [7:0] taken_rviImm_hi_lo_hi_1 = taken_rviImm_b19_12_1; // @[RocketCore.scala:1343:21, :1355:8]
wire _taken_rviImm_b11_T_16 = _taken_rviImm_b11_T_15; // @[RocketCore.scala:1345:{39,44}]
wire _taken_rviImm_b11_T_19 = _taken_rviImm_b11_T_18; // @[RocketCore.scala:1346:{39,43}]
wire _taken_rviImm_b11_T_20 = _taken_rviImm_b11_T_19; // @[RocketCore.scala:1346:{18,43}]
wire _taken_rviImm_b11_T_21 = _taken_rviImm_b11_T_20; // @[RocketCore.scala:1345:18, :1346:18]
wire taken_rviImm_b11_1 = _taken_rviImm_b11_T_21; // @[RocketCore.scala:1344:18, :1345:18]
wire taken_rviImm_hi_lo_lo_1 = taken_rviImm_b11_1; // @[RocketCore.scala:1344:18, :1355:8]
wire [5:0] taken_rviImm_b10_5_1 = _taken_rviImm_b10_5_T_7; // @[RocketCore.scala:1347:{20,62}]
wire [3:0] _taken_rviImm_b4_1_T_19 = _taken_rviImm_b4_1_T_14; // @[RocketCore.scala:1349:{19,57}]
wire [3:0] _taken_rviImm_b4_1_T_18 = _taken_rviImm_b4_1_T_17; // @[RocketCore.scala:1350:{19,52}]
wire [3:0] taken_rviImm_b4_1_1 = _taken_rviImm_b4_1_T_19; // @[RocketCore.scala:1348:19, :1349:19]
wire [9:0] taken_rviImm_lo_hi_1 = {taken_rviImm_b10_5_1, taken_rviImm_b4_1_1}; // @[RocketCore.scala:1347:20, :1348:19, :1355:8]
wire [10:0] taken_rviImm_lo_1 = {taken_rviImm_lo_hi_1, 1'h0}; // @[RocketCore.scala:1355:8]
wire [8:0] taken_rviImm_hi_lo_1 = {taken_rviImm_hi_lo_hi_1, taken_rviImm_hi_lo_lo_1}; // @[RocketCore.scala:1355:8]
wire [11:0] taken_rviImm_hi_hi_1 = {taken_rviImm_hi_hi_hi_1, taken_rviImm_hi_hi_lo_1}; // @[RocketCore.scala:1355:8]
wire [20:0] taken_rviImm_hi_1 = {taken_rviImm_hi_hi_1, taken_rviImm_hi_lo_1}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_3 = {taken_rviImm_hi_1, taken_rviImm_lo_1}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_4 = _taken_rviImm_T_3; // @[RocketCore.scala:1355:{8,53}]
wire [31:0] taken_rviImm = _taken_rviImm_T ? _taken_rviImm_T_2 : _taken_rviImm_T_4; // @[RocketCore.scala:1355:53]
wire taken_predict_taken = _taken_predict_taken_T; // @[Frontend.scala:253:54]
wire _taken_taken_T_1 = taken_rviBranch & taken_predict_taken; // @[Frontend.scala:239:36, :253:54, :255:53]
wire _taken_taken_T_2 = _taken_taken_T | _taken_taken_T_1; // @[Frontend.scala:255:{29,40,53}]
wire _taken_taken_T_3 = taken_prevRVI & _taken_taken_T_2; // @[Frontend.scala:234:31, :255:{17,40}]
wire _taken_taken_T_4 = taken_rvcJump | taken_rvcJALR; // @[Frontend.scala:246:47, :250:49, :256:27]
wire _taken_taken_T_5 = _taken_taken_T_4 | taken_rvcJR; // @[Frontend.scala:248:46, :256:{27,38}]
wire _taken_taken_T_6 = taken_rvcBranch & taken_predict_taken; // @[Frontend.scala:244:52, :253:54, :256:60]
wire _taken_taken_T_7 = _taken_taken_T_5 | _taken_taken_T_6; // @[Frontend.scala:256:{38,47,60}]
wire _taken_taken_T_8 = taken_valid & _taken_taken_T_7; // @[Frontend.scala:235:44, :256:{15,47}]
wire taken_taken = _taken_taken_T_3 | _taken_taken_T_8; // @[Frontend.scala:255:{17,71}, :256:15]
wire _taken_T_28 = taken_taken; // @[Frontend.scala:255:71, :313:51]
wire _taken_predictReturn_T = taken_prevRVI & taken_rviReturn; // @[Frontend.scala:234:31, :242:46, :257:61]
wire _taken_predictReturn_T_1 = taken_valid & taken_rvcReturn; // @[Frontend.scala:235:44, :249:29, :257:83]
wire _taken_predictReturn_T_2 = _taken_predictReturn_T | _taken_predictReturn_T_1; // @[Frontend.scala:257:{61,74,83}]
wire taken_predictReturn = _btb_io_ras_head_valid & _taken_predictReturn_T_2; // @[Frontend.scala:198:21, :257:{49,74}]
wire _taken_predictJump_T = taken_prevRVI & taken_rviJump; // @[Frontend.scala:234:31, :240:34, :258:33]
wire _taken_predictJump_T_1 = taken_valid & taken_rvcJump; // @[Frontend.scala:235:44, :246:47, :258:53]
wire taken_predictJump = _taken_predictJump_T | _taken_predictJump_T_1; // @[Frontend.scala:258:{33,44,53}]
wire _GEN_3 = taken_prevRVI & taken_rviBranch; // @[Frontend.scala:234:31, :239:36, :259:53]
wire _taken_predictBranch_T; // @[Frontend.scala:259:53]
assign _taken_predictBranch_T = _GEN_3; // @[Frontend.scala:259:53]
wire _taken_T_19; // @[Frontend.scala:294:23]
assign _taken_T_19 = _GEN_3; // @[Frontend.scala:259:53, :294:23]
wire _GEN_4 = taken_valid & taken_rvcBranch; // @[Frontend.scala:235:44, :244:52, :259:75]
wire _taken_predictBranch_T_1; // @[Frontend.scala:259:75]
assign _taken_predictBranch_T_1 = _GEN_4; // @[Frontend.scala:259:75]
wire _taken_T_20; // @[Frontend.scala:294:45]
assign _taken_T_20 = _GEN_4; // @[Frontend.scala:259:75, :294:45]
wire _taken_predictBranch_T_2 = _taken_predictBranch_T | _taken_predictBranch_T_1; // @[Frontend.scala:259:{53,66,75}]
wire taken_predictBranch = taken_predict_taken & _taken_predictBranch_T_2; // @[Frontend.scala:253:54, :259:{41,66}]
wire _GEN_5 = s2_valid & s2_btb_resp_valid; // @[Frontend.scala:108:25, :118:44, :261:22]
wire _taken_T; // @[Frontend.scala:261:22]
assign _taken_T = _GEN_5; // @[Frontend.scala:261:22]
wire _taken_T_29; // @[Frontend.scala:261:22]
assign _taken_T_29 = _GEN_5; // @[Frontend.scala:261:22]
wire _taken_T_1 = ~s2_btb_resp_bits_bridx; // @[Frontend.scala:119:29, :261:69]
wire _taken_T_2 = _taken_T & _taken_T_1; // @[Frontend.scala:261:{22,43,69}]
wire _taken_T_3 = _taken_T_2 & taken_valid; // @[Frontend.scala:235:44, :261:{43,79}]
wire _taken_T_4 = ~taken_rvc; // @[Frontend.scala:233:45, :261:91]
wire _taken_T_5 = _taken_T_3 & _taken_T_4; // @[Frontend.scala:261:{79,88,91}]
wire _taken_btb_io_ras_update_valid_T_1 = ~wrong_path; // @[Frontend.scala:126:27, :273:54, :319:52]
wire _taken_btb_io_ras_update_valid_T_2 = _taken_btb_io_ras_update_valid_T & _taken_btb_io_ras_update_valid_T_1; // @[Decoupled.scala:51:35]
wire _taken_btb_io_ras_update_valid_T_3 = taken_rviCall | taken_rviReturn; // @[Frontend.scala:242:46, :243:42, :273:90]
wire _taken_btb_io_ras_update_valid_T_4 = taken_prevRVI & _taken_btb_io_ras_update_valid_T_3; // @[Frontend.scala:234:31, :273:{78,90}]
wire _taken_btb_io_ras_update_valid_T_5 = taken_rvcCall | taken_rvcReturn; // @[Frontend.scala:249:29, :251:28, :273:125]
wire _taken_btb_io_ras_update_valid_T_6 = taken_valid & _taken_btb_io_ras_update_valid_T_5; // @[Frontend.scala:235:44, :273:{113,125}]
wire _taken_btb_io_ras_update_valid_T_7 = _taken_btb_io_ras_update_valid_T_4 | _taken_btb_io_ras_update_valid_T_6; // @[Frontend.scala:273:{78,104,113}]
wire _taken_btb_io_ras_update_valid_T_8 = _taken_btb_io_ras_update_valid_T_2 & _taken_btb_io_ras_update_valid_T_7; // @[Frontend.scala:273:{51,66,104}]
wire _taken_btb_io_ras_update_bits_cfiType_T = taken_prevRVI ? taken_rviReturn : taken_rvcReturn; // @[Frontend.scala:234:31, :242:46, :249:29, :274:50]
wire _taken_btb_io_ras_update_bits_cfiType_T_1 = taken_prevRVI ? taken_rviCall : taken_rvcCall; // @[Frontend.scala:234:31, :243:42, :251:28, :275:50]
wire _taken_btb_io_ras_update_bits_cfiType_T_2 = taken_prevRVI ? taken_rviBranch : taken_rvcBranch; // @[Frontend.scala:234:31, :239:36, :244:52, :276:50]
wire _taken_btb_io_ras_update_bits_cfiType_T_4 = _taken_btb_io_ras_update_bits_cfiType_T_2; // @[Frontend.scala:276:{50,82}]
wire _taken_btb_io_ras_update_bits_cfiType_T_5 = ~_taken_btb_io_ras_update_bits_cfiType_T_4; // @[Frontend.scala:276:{46,82}]
wire [1:0] _taken_btb_io_ras_update_bits_cfiType_T_6 = _taken_btb_io_ras_update_bits_cfiType_T_1 ? 2'h2 : {1'h0, _taken_btb_io_ras_update_bits_cfiType_T_5}; // @[Frontend.scala:275:{46,50}, :276:46]
wire [1:0] _taken_btb_io_ras_update_bits_cfiType_T_7 = _taken_btb_io_ras_update_bits_cfiType_T ? 2'h3 : _taken_btb_io_ras_update_bits_cfiType_T_6; // @[Frontend.scala:274:{46,50}, :275:46]
wire _taken_T_7 = ~s2_btb_taken; // @[Frontend.scala:120:40, :279:15]
wire _taken_T_9 = _taken_T_8 & taken_taken; // @[Decoupled.scala:51:35]
wire _taken_T_10 = ~taken_predictBranch; // @[Frontend.scala:259:41, :280:44]
wire _taken_T_11 = _taken_T_9 & _taken_T_10; // @[Frontend.scala:280:{32,41,44}]
wire _taken_T_12 = ~taken_predictJump; // @[Frontend.scala:258:44, :280:62]
wire _taken_T_13 = _taken_T_11 & _taken_T_12; // @[Frontend.scala:280:{41,59,62}]
wire _taken_T_14 = ~taken_predictReturn; // @[Frontend.scala:257:49, :280:78]
wire _taken_T_15 = _taken_T_13 & _taken_T_14; // @[Frontend.scala:280:{59,75,78}]
wire _taken_T_16 = s2_valid & taken_predictReturn; // @[Frontend.scala:108:25, :257:49, :283:26]
wire _taken_T_17 = taken_predictBranch | taken_predictJump; // @[Frontend.scala:258:44, :259:41, :286:44]
wire _taken_T_18 = s2_valid & _taken_T_17; // @[Frontend.scala:108:25, :286:{26,44}]
wire [39:0] _taken_npc_T = taken_pc; // @[Frontend.scala:287:33, :289:32]
wire [32:0] _taken_npc_T_1 = {taken_rviImm[31], taken_rviImm} - 33'h2; // @[Frontend.scala:252:23, :289:61]
wire [32:0] _taken_npc_T_2 = taken_prevRVI ? _taken_npc_T_1 : {{12{taken_rvcImm[20]}}, taken_rvcImm}; // @[Frontend.scala:234:31, :247:23, :289:{44,61}]
wire [40:0] _taken_npc_T_3 = {_taken_npc_T[39], _taken_npc_T} + {{8{_taken_npc_T_2[32]}}, _taken_npc_T_2}; // @[Frontend.scala:289:{32,39,44}]
wire [39:0] _taken_npc_T_4 = _taken_npc_T_3[39:0]; // @[Frontend.scala:289:39]
wire [39:0] taken_npc = _taken_npc_T_4; // @[Frontend.scala:289:39]
wire [39:0] _taken_predicted_npc_T = taken_npc; // @[Frontend.scala:289:39, :291:34]
wire _taken_T_21 = _taken_T_19 | _taken_T_20; // @[Frontend.scala:294:{23,36,45}]
wire _taken_btb_io_bht_advance_valid_T_1 = ~wrong_path; // @[Frontend.scala:126:27, :295:57, :319:52]
wire _taken_btb_io_bht_advance_valid_T_2 = _taken_btb_io_bht_advance_valid_T & _taken_btb_io_bht_advance_valid_T_1; // @[Decoupled.scala:51:35]
wire _taken_T_22 = ~s2_btb_resp_valid; // @[Frontend.scala:118:44, :298:15]
wire _taken_T_24 = taken_predictBranch & _taken_T_23; // @[Frontend.scala:259:41, :298:52]
wire _taken_T_25 = _taken_T_24 | taken_predictJump; // @[Frontend.scala:258:44, :298:{52,91}]
wire _taken_T_26 = _taken_T_25 | taken_predictReturn; // @[Frontend.scala:257:49, :298:{91,106}]
wire _taken_T_27 = _taken_T_22 & _taken_T_26; // @[Frontend.scala:298:{15,34,106}]
wire _taken_prevRVI_T_4 = _taken_prevRVI_T_3 != 2'h3; // @[Frontend.scala:233:{39,45}]
wire _taken_prevRVI_T_5 = ~_taken_prevRVI_T_4; // @[Frontend.scala:233:45, :234:34]
wire taken_prevRVI_1 = taken_valid & _taken_prevRVI_T_5; // @[Frontend.scala:234:{31,34}, :235:44]
wire _taken_valid_T_2 = _fq_io_enq_bits_mask_T_1[1]; // @[Frontend.scala:189:50, :235:38]
wire _taken_valid_T_3 = ~taken_prevRVI_1; // @[Frontend.scala:234:31, :235:47]
wire taken_valid_1 = _taken_valid_T_2 & _taken_valid_T_3; // @[Frontend.scala:235:{38,44,47}]
wire [15:0] taken_bits_1 = _icache_io_resp_bits_data[31:16]; // @[Frontend.scala:70:26, :236:37]
wire [1:0] _taken_rvc_T_1 = taken_bits_1[1:0]; // @[Frontend.scala:233:39, :236:37]
wire taken_rvc_1 = _taken_rvc_T_1 != 2'h3; // @[Frontend.scala:233:{39,45}]
wire [31:0] taken_rviBits_1 = {taken_bits_1, taken_bits}; // @[Frontend.scala:236:37, :238:24]
wire [6:0] _taken_rviBranch_T_1 = taken_rviBits_1[6:0]; // @[Frontend.scala:238:24, :239:30]
wire [6:0] _taken_rviJump_T_1 = taken_rviBits_1[6:0]; // @[Frontend.scala:238:24, :239:30, :240:28]
wire [6:0] _taken_rviJALR_T_1 = taken_rviBits_1[6:0]; // @[Frontend.scala:238:24, :239:30, :241:28]
wire taken_rviBranch_1 = _taken_rviBranch_T_1 == 7'h63; // @[Frontend.scala:239:{30,36}]
wire taken_rviJump_1 = _taken_rviJump_T_1 == 7'h6F; // @[Frontend.scala:240:{28,34}]
wire taken_rviJALR_1 = _taken_rviJALR_T_1 == 7'h67; // @[Frontend.scala:241:{28,34}]
wire _taken_rviReturn_T_6 = taken_rviBits_1[7]; // @[Frontend.scala:238:24, :242:42]
wire _taken_rviCall_T_3 = taken_rviBits_1[7]; // @[Frontend.scala:238:24, :242:42, :243:52]
wire _taken_rviImm_b11_T_29 = taken_rviBits_1[7]; // @[RocketCore.scala:1346:39]
wire _taken_rviImm_b0_T_17 = taken_rviBits_1[7]; // @[RocketCore.scala:1351:37]
wire _taken_rviImm_b11_T_40 = taken_rviBits_1[7]; // @[RocketCore.scala:1346:39]
wire _taken_rviImm_b0_T_25 = taken_rviBits_1[7]; // @[RocketCore.scala:1351:37]
wire _taken_rviReturn_T_7 = ~_taken_rviReturn_T_6; // @[Frontend.scala:242:{34,42}]
wire _taken_rviReturn_T_8 = taken_rviJALR_1 & _taken_rviReturn_T_7; // @[Frontend.scala:241:34, :242:{31,34}]
wire [4:0] _taken_rviReturn_T_9 = taken_rviBits_1[19:15]; // @[Frontend.scala:238:24, :242:77]
wire [4:0] _taken_rviReturn_T_10 = _taken_rviReturn_T_9 & 5'h1B; // @[Frontend.scala:242:{66,77}]
wire _taken_rviReturn_T_11 = _taken_rviReturn_T_10 == 5'h1; // @[Frontend.scala:242:66]
wire taken_rviReturn_1 = _taken_rviReturn_T_8 & _taken_rviReturn_T_11; // @[Frontend.scala:242:{31,46,66}]
wire _GEN_6 = taken_rviJALR_1 | taken_rviJump_1; // @[Frontend.scala:240:34, :241:34, :243:30]
wire _taken_rviCall_T_2; // @[Frontend.scala:243:30]
assign _taken_rviCall_T_2 = _GEN_6; // @[Frontend.scala:243:30]
wire _taken_taken_T_9; // @[Frontend.scala:255:29]
assign _taken_taken_T_9 = _GEN_6; // @[Frontend.scala:243:30, :255:29]
wire taken_rviCall_1 = _taken_rviCall_T_2 & _taken_rviCall_T_3; // @[Frontend.scala:243:{30,42,52}]
wire [15:0] _GEN_7 = taken_bits_1 & 16'hE003; // @[Frontend.scala:236:37, :244:28]
wire [15:0] _taken_rvcBranch_T_4; // @[Frontend.scala:244:28]
assign _taken_rvcBranch_T_4 = _GEN_7; // @[Frontend.scala:244:28]
wire [15:0] _taken_rvcBranch_T_6; // @[Frontend.scala:244:60]
assign _taken_rvcBranch_T_6 = _GEN_7; // @[Frontend.scala:244:{28,60}]
wire [15:0] _taken_rvcJAL_T_2; // @[Frontend.scala:245:43]
assign _taken_rvcJAL_T_2 = _GEN_7; // @[Frontend.scala:244:28, :245:43]
wire [15:0] _taken_rvcJump_T_2; // @[Frontend.scala:246:26]
assign _taken_rvcJump_T_2 = _GEN_7; // @[Frontend.scala:244:28, :246:26]
wire _taken_rvcBranch_T_5 = _taken_rvcBranch_T_4 == 16'hC001; // @[Frontend.scala:244:28]
wire _taken_rvcBranch_T_7 = _taken_rvcBranch_T_6 == 16'hE001; // @[Frontend.scala:244:60]
wire taken_rvcBranch_1 = _taken_rvcBranch_T_5 | _taken_rvcBranch_T_7; // @[Frontend.scala:244:{28,52,60}]
wire _taken_rvcJAL_T_3 = _taken_rvcJAL_T_2 == 16'h2001; // @[Frontend.scala:245:43]
wire _taken_rvcJump_T_3 = _taken_rvcJump_T_2 == 16'hA001; // @[Frontend.scala:246:26]
wire taken_rvcJump_1 = _taken_rvcJump_T_3; // @[Frontend.scala:246:{26,47}]
wire _taken_rvcImm_T_20 = taken_bits_1[14]; // @[Frontend.scala:236:37, :247:28]
wire _taken_rvcImm_T_21 = taken_bits_1[12]; // @[RVC.scala:45:27]
wire _taken_rvcImm_T_29 = taken_bits_1[12]; // @[RVC.scala:44:28, :45:27]
wire [4:0] _taken_rvcImm_T_22 = {5{_taken_rvcImm_T_21}}; // @[RVC.scala:45:{22,27}]
wire [1:0] _taken_rvcImm_T_23 = taken_bits_1[6:5]; // @[RVC.scala:45:35]
wire _taken_rvcImm_T_24 = taken_bits_1[2]; // @[RVC.scala:45:43]
wire _taken_rvcImm_T_35 = taken_bits_1[2]; // @[RVC.scala:44:63, :45:43]
wire [1:0] _taken_rvcImm_T_25 = taken_bits_1[11:10]; // @[RVC.scala:45:49]
wire [1:0] _taken_rvcImm_T_26 = taken_bits_1[4:3]; // @[RVC.scala:45:59]
wire [3:0] taken_rvcImm_lo_hi_2 = {_taken_rvcImm_T_25, _taken_rvcImm_T_26}; // @[RVC.scala:45:{17,49,59}]
wire [4:0] taken_rvcImm_lo_2 = {taken_rvcImm_lo_hi_2, 1'h0}; // @[RVC.scala:45:17]
wire [6:0] taken_rvcImm_hi_hi_2 = {_taken_rvcImm_T_22, _taken_rvcImm_T_23}; // @[RVC.scala:45:{17,22,35}]
wire [7:0] taken_rvcImm_hi_2 = {taken_rvcImm_hi_hi_2, _taken_rvcImm_T_24}; // @[RVC.scala:45:{17,43}]
wire [12:0] _taken_rvcImm_T_27 = {taken_rvcImm_hi_2, taken_rvcImm_lo_2}; // @[RVC.scala:45:17]
wire [12:0] _taken_rvcImm_T_28 = _taken_rvcImm_T_27; // @[RVC.scala:45:17]
wire [9:0] _taken_rvcImm_T_30 = {10{_taken_rvcImm_T_29}}; // @[RVC.scala:44:{22,28}]
wire _taken_rvcImm_T_31 = taken_bits_1[8]; // @[RVC.scala:44:36]
wire [1:0] _taken_rvcImm_T_32 = taken_bits_1[10:9]; // @[RVC.scala:44:42]
wire _taken_rvcImm_T_33 = taken_bits_1[6]; // @[RVC.scala:44:51]
wire _taken_rvcImm_T_34 = taken_bits_1[7]; // @[RVC.scala:44:57]
wire _taken_rvcImm_T_36 = taken_bits_1[11]; // @[RVC.scala:44:69]
wire [2:0] _taken_rvcImm_T_37 = taken_bits_1[5:3]; // @[RVC.scala:44:76]
wire [3:0] taken_rvcImm_lo_lo_1 = {_taken_rvcImm_T_37, 1'h0}; // @[RVC.scala:44:{17,76}]
wire [1:0] taken_rvcImm_lo_hi_3 = {_taken_rvcImm_T_35, _taken_rvcImm_T_36}; // @[RVC.scala:44:{17,63,69}]
wire [5:0] taken_rvcImm_lo_3 = {taken_rvcImm_lo_hi_3, taken_rvcImm_lo_lo_1}; // @[RVC.scala:44:17]
wire [1:0] taken_rvcImm_hi_lo_1 = {_taken_rvcImm_T_33, _taken_rvcImm_T_34}; // @[RVC.scala:44:{17,51,57}]
wire [10:0] taken_rvcImm_hi_hi_hi_1 = {_taken_rvcImm_T_30, _taken_rvcImm_T_31}; // @[RVC.scala:44:{17,22,36}]
wire [12:0] taken_rvcImm_hi_hi_3 = {taken_rvcImm_hi_hi_hi_1, _taken_rvcImm_T_32}; // @[RVC.scala:44:{17,42}]
wire [14:0] taken_rvcImm_hi_3 = {taken_rvcImm_hi_hi_3, taken_rvcImm_hi_lo_1}; // @[RVC.scala:44:17]
wire [20:0] _taken_rvcImm_T_38 = {taken_rvcImm_hi_3, taken_rvcImm_lo_3}; // @[RVC.scala:44:17]
wire [20:0] _taken_rvcImm_T_39 = _taken_rvcImm_T_38; // @[RVC.scala:44:17]
wire [20:0] taken_rvcImm_1 = _taken_rvcImm_T_20 ? {{8{_taken_rvcImm_T_28[12]}}, _taken_rvcImm_T_28} : _taken_rvcImm_T_39; // @[Frontend.scala:247:{23,28,72,118}]
wire [15:0] _GEN_8 = taken_bits_1 & 16'hF003; // @[Frontend.scala:236:37, :248:24]
wire [15:0] _taken_rvcJR_T_4; // @[Frontend.scala:248:24]
assign _taken_rvcJR_T_4 = _GEN_8; // @[Frontend.scala:248:24]
wire [15:0] _taken_rvcJALR_T_4; // @[Frontend.scala:250:26]
assign _taken_rvcJALR_T_4 = _GEN_8; // @[Frontend.scala:248:24, :250:26]
wire _taken_rvcJR_T_5 = _taken_rvcJR_T_4 == 16'h8002; // @[Frontend.scala:248:24]
wire [4:0] _taken_rvcJR_T_6 = taken_bits_1[6:2]; // @[Frontend.scala:236:37, :248:53]
wire [4:0] _taken_rvcJALR_T_6 = taken_bits_1[6:2]; // @[Frontend.scala:236:37, :248:53, :250:56]
wire _taken_rvcJR_T_7 = _taken_rvcJR_T_6 == 5'h0; // @[Frontend.scala:248:{53,59}]
wire taken_rvcJR_1 = _taken_rvcJR_T_5 & _taken_rvcJR_T_7; // @[Frontend.scala:248:{24,46,59}]
wire [4:0] _taken_rvcReturn_T_3 = taken_bits_1[11:7]; // @[Frontend.scala:236:37, :249:57]
wire [4:0] _taken_rvcReturn_T_4 = _taken_rvcReturn_T_3 & 5'h1B; // @[Frontend.scala:249:{49,57}]
wire _taken_rvcReturn_T_5 = _taken_rvcReturn_T_4 == 5'h1; // @[Frontend.scala:249:49]
wire taken_rvcReturn_1 = taken_rvcJR_1 & _taken_rvcReturn_T_5; // @[Frontend.scala:248:46, :249:{29,49}]
wire _taken_rvcJALR_T_5 = _taken_rvcJALR_T_4 == 16'h9002; // @[Frontend.scala:250:26]
wire _taken_rvcJALR_T_7 = _taken_rvcJALR_T_6 == 5'h0; // @[Frontend.scala:250:{56,62}]
wire taken_rvcJALR_1 = _taken_rvcJALR_T_5 & _taken_rvcJALR_T_7; // @[Frontend.scala:250:{26,49,62}]
wire taken_rvcCall_1 = taken_rvcJALR_1; // @[Frontend.scala:250:49, :251:28]
wire _taken_rviImm_T_5 = taken_rviBits_1[3]; // @[Frontend.scala:238:24, :252:31]
wire _taken_rviImm_sign_T_7 = taken_rviBits_1[31]; // @[RocketCore.scala:1341:44]
wire _taken_rviImm_sign_T_10 = taken_rviBits_1[31]; // @[RocketCore.scala:1341:44]
wire _taken_rviImm_sign_T_8 = _taken_rviImm_sign_T_7; // @[RocketCore.scala:1341:{44,49}]
wire taken_rviImm_sign_2 = _taken_rviImm_sign_T_8; // @[RocketCore.scala:1341:{19,49}]
wire _taken_rviImm_b11_T_31 = taken_rviImm_sign_2; // @[RocketCore.scala:1341:19, :1346:18]
wire taken_rviImm_hi_hi_hi_2 = taken_rviImm_sign_2; // @[RocketCore.scala:1341:19, :1355:8]
wire [10:0] _taken_rviImm_b30_20_T_7 = taken_rviBits_1[30:20]; // @[RocketCore.scala:1342:41]
wire [10:0] _taken_rviImm_b30_20_T_10 = taken_rviBits_1[30:20]; // @[RocketCore.scala:1342:41]
wire [10:0] _taken_rviImm_b30_20_T_8 = _taken_rviImm_b30_20_T_7; // @[RocketCore.scala:1342:{41,49}]
wire [10:0] taken_rviImm_b30_20_2 = {11{taken_rviImm_sign_2}}; // @[RocketCore.scala:1341:19, :1342:21]
wire [10:0] taken_rviImm_hi_hi_lo_2 = taken_rviImm_b30_20_2; // @[RocketCore.scala:1342:21, :1355:8]
wire [7:0] _taken_rviImm_b19_12_T_13 = taken_rviBits_1[19:12]; // @[RocketCore.scala:1343:65]
wire [7:0] _taken_rviImm_b19_12_T_18 = taken_rviBits_1[19:12]; // @[RocketCore.scala:1343:65]
wire [7:0] _taken_rviImm_b19_12_T_14 = _taken_rviImm_b19_12_T_13; // @[RocketCore.scala:1343:{65,73}]
wire [7:0] taken_rviImm_b19_12_2 = _taken_rviImm_b19_12_T_14; // @[RocketCore.scala:1343:{21,73}]
wire [7:0] taken_rviImm_hi_lo_hi_2 = taken_rviImm_b19_12_2; // @[RocketCore.scala:1343:21, :1355:8]
wire _taken_rviImm_b11_T_26 = taken_rviBits_1[20]; // @[RocketCore.scala:1345:39]
wire _taken_rviImm_b0_T_19 = taken_rviBits_1[20]; // @[RocketCore.scala:1345:39, :1352:37]
wire _taken_rviImm_b11_T_37 = taken_rviBits_1[20]; // @[RocketCore.scala:1345:39]
wire _taken_rviImm_b0_T_27 = taken_rviBits_1[20]; // @[RocketCore.scala:1345:39, :1352:37]
wire _taken_rviImm_b11_T_27 = _taken_rviImm_b11_T_26; // @[RocketCore.scala:1345:{39,44}]
wire _taken_rviImm_b11_T_32 = _taken_rviImm_b11_T_27; // @[RocketCore.scala:1345:{18,44}]
wire _taken_rviImm_b11_T_30 = _taken_rviImm_b11_T_29; // @[RocketCore.scala:1346:{39,43}]
wire taken_rviImm_b11_2 = _taken_rviImm_b11_T_32; // @[RocketCore.scala:1344:18, :1345:18]
wire taken_rviImm_hi_lo_lo_2 = taken_rviImm_b11_2; // @[RocketCore.scala:1344:18, :1355:8]
wire [5:0] _taken_rviImm_b10_5_T_11 = taken_rviBits_1[30:25]; // @[RocketCore.scala:1347:62]
wire [5:0] _taken_rviImm_b10_5_T_15 = taken_rviBits_1[30:25]; // @[RocketCore.scala:1347:62]
wire [5:0] taken_rviImm_b10_5_2 = _taken_rviImm_b10_5_T_11; // @[RocketCore.scala:1347:{20,62}]
wire [3:0] _taken_rviImm_b4_1_T_24 = taken_rviBits_1[11:8]; // @[RocketCore.scala:1349:57]
wire [3:0] _taken_rviImm_b4_1_T_34 = taken_rviBits_1[11:8]; // @[RocketCore.scala:1349:57]
wire [3:0] _taken_rviImm_b4_1_T_26 = taken_rviBits_1[19:16]; // @[RocketCore.scala:1350:39]
wire [3:0] _taken_rviImm_b4_1_T_36 = taken_rviBits_1[19:16]; // @[RocketCore.scala:1350:39]
wire [3:0] _taken_rviImm_b4_1_T_27 = taken_rviBits_1[24:21]; // @[RocketCore.scala:1350:52]
wire [3:0] _taken_rviImm_b4_1_T_37 = taken_rviBits_1[24:21]; // @[RocketCore.scala:1350:52]
wire [3:0] _taken_rviImm_b4_1_T_28 = _taken_rviImm_b4_1_T_27; // @[RocketCore.scala:1350:{19,52}]
wire [3:0] _taken_rviImm_b4_1_T_29 = _taken_rviImm_b4_1_T_28; // @[RocketCore.scala:1349:19, :1350:19]
wire [3:0] taken_rviImm_b4_1_2 = _taken_rviImm_b4_1_T_29; // @[RocketCore.scala:1348:19, :1349:19]
wire _taken_rviImm_b0_T_21 = taken_rviBits_1[15]; // @[RocketCore.scala:1353:37]
wire _taken_rviImm_b0_T_29 = taken_rviBits_1[15]; // @[RocketCore.scala:1353:37]
wire [9:0] taken_rviImm_lo_hi_2 = {taken_rviImm_b10_5_2, taken_rviImm_b4_1_2}; // @[RocketCore.scala:1347:20, :1348:19, :1355:8]
wire [10:0] taken_rviImm_lo_2 = {taken_rviImm_lo_hi_2, 1'h0}; // @[RocketCore.scala:1355:8]
wire [8:0] taken_rviImm_hi_lo_2 = {taken_rviImm_hi_lo_hi_2, taken_rviImm_hi_lo_lo_2}; // @[RocketCore.scala:1355:8]
wire [11:0] taken_rviImm_hi_hi_2 = {taken_rviImm_hi_hi_hi_2, taken_rviImm_hi_hi_lo_2}; // @[RocketCore.scala:1355:8]
wire [20:0] taken_rviImm_hi_2 = {taken_rviImm_hi_hi_2, taken_rviImm_hi_lo_2}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_6 = {taken_rviImm_hi_2, taken_rviImm_lo_2}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_7 = _taken_rviImm_T_6; // @[RocketCore.scala:1355:{8,53}]
wire _taken_rviImm_sign_T_11 = _taken_rviImm_sign_T_10; // @[RocketCore.scala:1341:{44,49}]
wire taken_rviImm_sign_3 = _taken_rviImm_sign_T_11; // @[RocketCore.scala:1341:{19,49}]
wire taken_rviImm_hi_hi_hi_3 = taken_rviImm_sign_3; // @[RocketCore.scala:1341:19, :1355:8]
wire [10:0] _taken_rviImm_b30_20_T_11 = _taken_rviImm_b30_20_T_10; // @[RocketCore.scala:1342:{41,49}]
wire [10:0] taken_rviImm_b30_20_3 = {11{taken_rviImm_sign_3}}; // @[RocketCore.scala:1341:19, :1342:21]
wire [10:0] taken_rviImm_hi_hi_lo_3 = taken_rviImm_b30_20_3; // @[RocketCore.scala:1342:21, :1355:8]
wire [7:0] _taken_rviImm_b19_12_T_19 = _taken_rviImm_b19_12_T_18; // @[RocketCore.scala:1343:{65,73}]
wire [7:0] taken_rviImm_b19_12_3 = {8{taken_rviImm_sign_3}}; // @[RocketCore.scala:1341:19, :1343:21]
wire [7:0] taken_rviImm_hi_lo_hi_3 = taken_rviImm_b19_12_3; // @[RocketCore.scala:1343:21, :1355:8]
wire _taken_rviImm_b11_T_38 = _taken_rviImm_b11_T_37; // @[RocketCore.scala:1345:{39,44}]
wire _taken_rviImm_b11_T_41 = _taken_rviImm_b11_T_40; // @[RocketCore.scala:1346:{39,43}]
wire _taken_rviImm_b11_T_42 = _taken_rviImm_b11_T_41; // @[RocketCore.scala:1346:{18,43}]
wire _taken_rviImm_b11_T_43 = _taken_rviImm_b11_T_42; // @[RocketCore.scala:1345:18, :1346:18]
wire taken_rviImm_b11_3 = _taken_rviImm_b11_T_43; // @[RocketCore.scala:1344:18, :1345:18]
wire taken_rviImm_hi_lo_lo_3 = taken_rviImm_b11_3; // @[RocketCore.scala:1344:18, :1355:8]
wire [5:0] taken_rviImm_b10_5_3 = _taken_rviImm_b10_5_T_15; // @[RocketCore.scala:1347:{20,62}]
wire [3:0] _taken_rviImm_b4_1_T_39 = _taken_rviImm_b4_1_T_34; // @[RocketCore.scala:1349:{19,57}]
wire [3:0] _taken_rviImm_b4_1_T_38 = _taken_rviImm_b4_1_T_37; // @[RocketCore.scala:1350:{19,52}]
wire [3:0] taken_rviImm_b4_1_3 = _taken_rviImm_b4_1_T_39; // @[RocketCore.scala:1348:19, :1349:19]
wire [9:0] taken_rviImm_lo_hi_3 = {taken_rviImm_b10_5_3, taken_rviImm_b4_1_3}; // @[RocketCore.scala:1347:20, :1348:19, :1355:8]
wire [10:0] taken_rviImm_lo_3 = {taken_rviImm_lo_hi_3, 1'h0}; // @[RocketCore.scala:1355:8]
wire [8:0] taken_rviImm_hi_lo_3 = {taken_rviImm_hi_lo_hi_3, taken_rviImm_hi_lo_lo_3}; // @[RocketCore.scala:1355:8]
wire [11:0] taken_rviImm_hi_hi_3 = {taken_rviImm_hi_hi_hi_3, taken_rviImm_hi_hi_lo_3}; // @[RocketCore.scala:1355:8]
wire [20:0] taken_rviImm_hi_3 = {taken_rviImm_hi_hi_3, taken_rviImm_hi_lo_3}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_8 = {taken_rviImm_hi_3, taken_rviImm_lo_3}; // @[RocketCore.scala:1355:8]
wire [31:0] _taken_rviImm_T_9 = _taken_rviImm_T_8; // @[RocketCore.scala:1355:{8,53}]
wire [31:0] taken_rviImm_1 = _taken_rviImm_T_5 ? _taken_rviImm_T_7 : _taken_rviImm_T_9; // @[RocketCore.scala:1355:53]
wire taken_predict_taken_1 = _taken_predict_taken_T_1; // @[Frontend.scala:253:54]
wire _taken_taken_T_10 = taken_rviBranch_1 & taken_predict_taken_1; // @[Frontend.scala:239:36, :253:54, :255:53]
wire _taken_taken_T_11 = _taken_taken_T_9 | _taken_taken_T_10; // @[Frontend.scala:255:{29,40,53}]
wire _taken_taken_T_12 = taken_prevRVI_1 & _taken_taken_T_11; // @[Frontend.scala:234:31, :255:{17,40}]
wire _taken_taken_T_13 = taken_rvcJump_1 | taken_rvcJALR_1; // @[Frontend.scala:246:47, :250:49, :256:27]
wire _taken_taken_T_14 = _taken_taken_T_13 | taken_rvcJR_1; // @[Frontend.scala:248:46, :256:{27,38}]
wire _taken_taken_T_15 = taken_rvcBranch_1 & taken_predict_taken_1; // @[Frontend.scala:244:52, :253:54, :256:60]
wire _taken_taken_T_16 = _taken_taken_T_14 | _taken_taken_T_15; // @[Frontend.scala:256:{38,47,60}]
wire _taken_taken_T_17 = taken_valid_1 & _taken_taken_T_16; // @[Frontend.scala:235:44, :256:{15,47}]
wire taken_taken_1 = _taken_taken_T_12 | _taken_taken_T_17; // @[Frontend.scala:255:{17,71}, :256:15]
wire _taken_predictReturn_T_3 = taken_prevRVI_1 & taken_rviReturn_1; // @[Frontend.scala:234:31, :242:46, :257:61]
wire _taken_predictReturn_T_4 = taken_valid_1 & taken_rvcReturn_1; // @[Frontend.scala:235:44, :249:29, :257:83]
wire _taken_predictReturn_T_5 = _taken_predictReturn_T_3 | _taken_predictReturn_T_4; // @[Frontend.scala:257:{61,74,83}]
wire taken_predictReturn_1 = _btb_io_ras_head_valid & _taken_predictReturn_T_5; // @[Frontend.scala:198:21, :257:{49,74}]
wire _taken_predictJump_T_2 = taken_prevRVI_1 & taken_rviJump_1; // @[Frontend.scala:234:31, :240:34, :258:33]
wire _taken_predictJump_T_3 = taken_valid_1 & taken_rvcJump_1; // @[Frontend.scala:235:44, :246:47, :258:53]
wire taken_predictJump_1 = _taken_predictJump_T_2 | _taken_predictJump_T_3; // @[Frontend.scala:258:{33,44,53}]
wire _GEN_9 = taken_prevRVI_1 & taken_rviBranch_1; // @[Frontend.scala:234:31, :239:36, :259:53]
wire _taken_predictBranch_T_3; // @[Frontend.scala:259:53]
assign _taken_predictBranch_T_3 = _GEN_9; // @[Frontend.scala:259:53]
wire _taken_T_48; // @[Frontend.scala:294:23]
assign _taken_T_48 = _GEN_9; // @[Frontend.scala:259:53, :294:23]
wire _GEN_10 = taken_valid_1 & taken_rvcBranch_1; // @[Frontend.scala:235:44, :244:52, :259:75]
wire _taken_predictBranch_T_4; // @[Frontend.scala:259:75]
assign _taken_predictBranch_T_4 = _GEN_10; // @[Frontend.scala:259:75]
wire _taken_T_49; // @[Frontend.scala:294:45]
assign _taken_T_49 = _GEN_10; // @[Frontend.scala:259:75, :294:45]
wire _taken_predictBranch_T_5 = _taken_predictBranch_T_3 | _taken_predictBranch_T_4; // @[Frontend.scala:259:{53,66,75}]
wire taken_predictBranch_1 = taken_predict_taken_1 & _taken_predictBranch_T_5; // @[Frontend.scala:253:54, :259:{41,66}]
wire _taken_T_31 = _taken_T_29 & _taken_T_30; // @[Frontend.scala:261:{22,43,69}]
wire _taken_T_32 = _taken_T_31 & taken_valid_1; // @[Frontend.scala:235:44, :261:{43,79}]
wire _taken_T_33 = ~taken_rvc_1; // @[Frontend.scala:233:45, :261:91]
wire _taken_T_34 = _taken_T_32 & _taken_T_33; // @[Frontend.scala:261:{79,88,91}]
assign _taken_T_35 = ~_taken_T_28; // @[Frontend.scala:270:13, :313:51]
assign taken_idx = _taken_T_35; // @[Frontend.scala:223:25, :270:13]
assign after_idx = _taken_T_35 ? 2'h2 : 2'h1; // @[Frontend.scala:224:25, :270:{13,25}, :272:19]
wire _taken_btb_io_ras_update_valid_T_10 = ~wrong_path; // @[Frontend.scala:126:27, :273:54, :319:52]
wire _taken_btb_io_ras_update_valid_T_11 = _taken_btb_io_ras_update_valid_T_9 & _taken_btb_io_ras_update_valid_T_10; // @[Decoupled.scala:51:35]
wire _taken_btb_io_ras_update_valid_T_12 = taken_rviCall_1 | taken_rviReturn_1; // @[Frontend.scala:242:46, :243:42, :273:90]
wire _taken_btb_io_ras_update_valid_T_13 = taken_prevRVI_1 & _taken_btb_io_ras_update_valid_T_12; // @[Frontend.scala:234:31, :273:{78,90}]
wire _taken_btb_io_ras_update_valid_T_14 = taken_rvcCall_1 | taken_rvcReturn_1; // @[Frontend.scala:249:29, :251:28, :273:125]
wire _taken_btb_io_ras_update_valid_T_15 = taken_valid_1 & _taken_btb_io_ras_update_valid_T_14; // @[Frontend.scala:235:44, :273:{113,125}]
wire _taken_btb_io_ras_update_valid_T_16 = _taken_btb_io_ras_update_valid_T_13 | _taken_btb_io_ras_update_valid_T_15; // @[Frontend.scala:273:{78,104,113}]
wire _taken_btb_io_ras_update_valid_T_17 = _taken_btb_io_ras_update_valid_T_11 & _taken_btb_io_ras_update_valid_T_16; // @[Frontend.scala:273:{51,66,104}]
wire _taken_btb_io_ras_update_bits_cfiType_T_8 = taken_prevRVI_1 ? taken_rviReturn_1 : taken_rvcReturn_1; // @[Frontend.scala:234:31, :242:46, :249:29, :274:50]
wire _taken_btb_io_ras_update_bits_cfiType_T_9 = taken_prevRVI_1 ? taken_rviCall_1 : taken_rvcCall_1; // @[Frontend.scala:234:31, :243:42, :251:28, :275:50]
wire _taken_btb_io_ras_update_bits_cfiType_T_10 = taken_prevRVI_1 ? taken_rviBranch_1 : taken_rvcBranch_1; // @[Frontend.scala:234:31, :239:36, :244:52, :276:50]
wire _taken_btb_io_ras_update_bits_cfiType_T_12 = _taken_btb_io_ras_update_bits_cfiType_T_10; // @[Frontend.scala:276:{50,82}]
wire _taken_btb_io_ras_update_bits_cfiType_T_13 = ~_taken_btb_io_ras_update_bits_cfiType_T_12; // @[Frontend.scala:276:{46,82}]
wire [1:0] _taken_btb_io_ras_update_bits_cfiType_T_14 = _taken_btb_io_ras_update_bits_cfiType_T_9 ? 2'h2 : {1'h0, _taken_btb_io_ras_update_bits_cfiType_T_13}; // @[Frontend.scala:275:{46,50}, :276:46]
wire [1:0] _taken_btb_io_ras_update_bits_cfiType_T_15 = _taken_btb_io_ras_update_bits_cfiType_T_8 ? 2'h3 : _taken_btb_io_ras_update_bits_cfiType_T_14; // @[Frontend.scala:274:{46,50}, :275:46]
assign btb_io_ras_update_bits_cfiType = _taken_T_35 ? _taken_btb_io_ras_update_bits_cfiType_T_15 : _taken_btb_io_ras_update_bits_cfiType_T_7; // @[Frontend.scala:270:{13,25}, :274:{40,46}]
wire _taken_T_36 = ~s2_btb_taken; // @[Frontend.scala:120:40, :279:15]
wire _taken_T_38 = _taken_T_37 & taken_taken_1; // @[Decoupled.scala:51:35]
wire _taken_T_39 = ~taken_predictBranch_1; // @[Frontend.scala:259:41, :280:44]
wire _taken_T_40 = _taken_T_38 & _taken_T_39; // @[Frontend.scala:280:{32,41,44}]
wire _taken_T_41 = ~taken_predictJump_1; // @[Frontend.scala:258:44, :280:62]
wire _taken_T_42 = _taken_T_40 & _taken_T_41; // @[Frontend.scala:280:{41,59,62}]
wire _taken_T_43 = ~taken_predictReturn_1; // @[Frontend.scala:257:49, :280:78]
wire _taken_T_44 = _taken_T_42 & _taken_T_43; // @[Frontend.scala:280:{59,75,78}]
wire _taken_T_45 = s2_valid & taken_predictReturn_1; // @[Frontend.scala:108:25, :257:49, :283:26]
assign useRAS = _taken_T_35 & _taken_T_36 & _taken_T_45 | _taken_T_7 & _taken_T_16; // @[Frontend.scala:225:29, :270:{13,25}, :279:{15,30}, :283:{26,44}, :284:20]
wire _taken_T_46 = taken_predictBranch_1 | taken_predictJump_1; // @[Frontend.scala:258:44, :259:41, :286:44]
wire _taken_T_47 = s2_valid & _taken_T_46; // @[Frontend.scala:108:25, :286:{26,44}]
wire [39:0] taken_pc_1 = {s2_base_pc[39:2], s2_base_pc[1:0] | 2'h2}; // @[Frontend.scala:222:22, :287:33, :323:50]
wire [40:0] _taken_npc_T_5 = {1'h0, taken_pc_1} - 41'h2; // @[Frontend.scala:287:33, :290:36]
wire [39:0] _taken_npc_T_6 = _taken_npc_T_5[39:0]; // @[Frontend.scala:290:36]
wire [39:0] _taken_npc_T_7 = taken_prevRVI_1 ? _taken_npc_T_6 : taken_pc_1; // @[Frontend.scala:234:31, :287:33, :290:{23,36}]
wire [39:0] _taken_npc_T_8 = _taken_npc_T_7; // @[Frontend.scala:290:{23,59}]
wire [31:0] _taken_npc_T_9 = taken_prevRVI_1 ? taken_rviImm_1 : {{11{taken_rvcImm_1[20]}}, taken_rvcImm_1}; // @[Frontend.scala:234:31, :247:23, :252:23, :290:71]
wire [40:0] _taken_npc_T_10 = {_taken_npc_T_8[39], _taken_npc_T_8} + {{9{_taken_npc_T_9[31]}}, _taken_npc_T_9}; // @[Frontend.scala:290:{59,66,71}]
wire [39:0] _taken_npc_T_11 = _taken_npc_T_10[39:0]; // @[Frontend.scala:290:66]
wire [39:0] taken_npc_1 = _taken_npc_T_11; // @[Frontend.scala:290:66]
wire [39:0] _taken_predicted_npc_T_1 = taken_npc_1; // @[Frontend.scala:290:66, :291:34]
wire _taken_T_50 = _taken_T_48 | _taken_T_49; // @[Frontend.scala:294:{23,36,45}]
wire _taken_btb_io_bht_advance_valid_T_4 = ~wrong_path; // @[Frontend.scala:126:27, :295:57, :319:52]
wire _taken_btb_io_bht_advance_valid_T_5 = _taken_btb_io_bht_advance_valid_T_3 & _taken_btb_io_bht_advance_valid_T_4; // @[Decoupled.scala:51:35]
wire _taken_T_51 = ~s2_btb_resp_valid; // @[Frontend.scala:118:44, :298:15]
wire _taken_T_53 = taken_predictBranch_1 & _taken_T_52; // @[Frontend.scala:259:41, :298:52]
wire _taken_T_54 = _taken_T_53 | taken_predictJump_1; // @[Frontend.scala:258:44, :298:{52,91}]
wire _taken_T_55 = _taken_T_54 | taken_predictReturn_1; // @[Frontend.scala:257:49, :298:{91,106}]
wire _taken_T_56 = _taken_T_51 & _taken_T_55; // @[Frontend.scala:298:{15,34,106}]
assign updateBTB = _taken_T_35 & _taken_T_56 | _taken_T_27; // @[Frontend.scala:226:32, :270:{13,25}, :298:{34,125}, :299:21]
wire _taken_T_58 = ~_taken_T_28; // @[Frontend.scala:270:13, :306:26, :313:51]
wire _taken_T_59 = taken_valid_1 & _taken_T_58; // @[Frontend.scala:235:44, :306:{23,26}]
wire _taken_T_60 = ~taken_rvc_1; // @[Frontend.scala:233:45, :261:91, :306:40]
wire _taken_T_61 = _taken_T_59 & _taken_T_60; // @[Frontend.scala:306:{23,37,40}]
wire [15:0] _taken_s2_partial_insn_T = {taken_bits_1[15:2], 2'h3}; // @[Frontend.scala:236:37, :308:37]
wire taken = _taken_T_28 | taken_taken_1; // @[Frontend.scala:255:71, :311:19, :313:51]
assign predicted_npc = useRAS ? {1'h0, _btb_io_ras_head_bits} : _taken_T_35 & _taken_T_36 & _taken_T_47 ? _taken_predicted_npc_T_1 : _taken_T_7 & _taken_T_18 ? _taken_predicted_npc_T : predicted_taken ? _predicted_npc_T_1 : ntpc; // @[package.scala:132:15]
wire _GEN_11 = ~s2_btb_taken & taken; // @[Frontend.scala:120:40, :191:22, :311:19, :336:{11,26}, :337:20, :338:34]
assign s2_redirect = ~s2_btb_taken & taken & _T_37 | io_cpu_req_valid_0; // @[Decoupled.scala:51:35] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
package constellation.channel
import chisel3._
import chisel3.util._
import freechips.rocketchip.diplomacy._
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.util._
import constellation.noc.{HasNoCParams}
class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams {
val io = IO(new Bundle {
val in = Input(new Channel(cParam))
})
val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B }))
for (i <- 0 until cParam.srcSpeedup) {
val flit = io.in.flit(i)
when (flit.valid) {
when (flit.bits.head) {
in_flight(flit.bits.virt_channel_id) := true.B
assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken")
}
when (flit.bits.tail) {
in_flight(flit.bits.virt_channel_id) := false.B
}
}
val possibleFlows = cParam.possibleFlows
when (flit.valid && flit.bits.head) {
cParam match {
case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) =>
assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR)
}
}
}
}
File Types.scala:
package constellation.routing
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Parameters}
import constellation.noc.{HasNoCParams}
import constellation.channel.{Flit}
/** A representation for 1 specific virtual channel in wormhole routing
*
* @param src the source node
* @param vc ID for the virtual channel
* @param dst the destination node
* @param n_vc the number of virtual channels
*/
// BEGIN: ChannelRoutingInfo
case class ChannelRoutingInfo(
src: Int,
dst: Int,
vc: Int,
n_vc: Int
) {
// END: ChannelRoutingInfo
require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this")
require (!(src == -1 && dst == -1), s"Illegal $this")
require (vc < n_vc, s"Illegal $this")
val isIngress = src == -1
val isEgress = dst == -1
}
/** Represents the properties of a packet that are relevant for routing
* ingressId and egressId uniquely identify a flow, but vnet and dst are used here
* to simplify the implementation of routingrelations
*
* @param ingressId packet's source ingress point
* @param egressId packet's destination egress point
* @param vNet virtual subnetwork identifier
* @param dst packet's destination node ID
*/
// BEGIN: FlowRoutingInfo
case class FlowRoutingInfo(
ingressId: Int,
egressId: Int,
vNetId: Int,
ingressNode: Int,
ingressNodeId: Int,
egressNode: Int,
egressNodeId: Int,
fifo: Boolean
) {
// END: FlowRoutingInfo
def isFlow(f: FlowRoutingBundle): Bool = {
(f.ingress_node === ingressNode.U &&
f.egress_node === egressNode.U &&
f.ingress_node_id === ingressNodeId.U &&
f.egress_node_id === egressNodeId.U)
}
def asLiteral(b: FlowRoutingBundle): BigInt = {
Seq(
(vNetId , b.vnet_id),
(ingressNode , b.ingress_node),
(ingressNodeId , b.ingress_node_id),
(egressNode , b.egress_node),
(egressNodeId , b.egress_node_id)
).foldLeft(0)((l, t) => {
(l << t._2.getWidth) | t._1
})
}
}
class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams {
// Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination
// This simplifies the routing tables
val vnet_id = UInt(log2Ceil(nVirtualNetworks).W)
val ingress_node = UInt(log2Ceil(nNodes).W)
val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W)
val egress_node = UInt(log2Ceil(nNodes).W)
val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W)
}
| module NoCMonitor_25( // @[Monitor.scala:11:7]
input clock, // @[Monitor.scala:11:7]
input reset, // @[Monitor.scala:11:7]
input io_in_flit_0_valid, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_head, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14]
input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14]
input io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14]
input [1:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14]
);
reg in_flight_0; // @[Monitor.scala:16:26]
reg in_flight_1; // @[Monitor.scala:16:26]
reg in_flight_2; // @[Monitor.scala:16:26]
reg in_flight_3; // @[Monitor.scala:16:26]
wire _GEN = io_in_flit_0_bits_virt_channel_id == 2'h2; // @[Monitor.scala:21:46] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Bundles.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import freechips.rocketchip.util._
import scala.collection.immutable.ListMap
import chisel3.util.Decoupled
import chisel3.util.DecoupledIO
import chisel3.reflect.DataMirror
abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle
// common combos in lazy policy:
// Put + Acquire
// Release + AccessAck
object TLMessages
{
// A B C D E
def PutFullData = 0.U // . . => AccessAck
def PutPartialData = 1.U // . . => AccessAck
def ArithmeticData = 2.U // . . => AccessAckData
def LogicalData = 3.U // . . => AccessAckData
def Get = 4.U // . . => AccessAckData
def Hint = 5.U // . . => HintAck
def AcquireBlock = 6.U // . => Grant[Data]
def AcquirePerm = 7.U // . => Grant[Data]
def Probe = 6.U // . => ProbeAck[Data]
def AccessAck = 0.U // . .
def AccessAckData = 1.U // . .
def HintAck = 2.U // . .
def ProbeAck = 4.U // .
def ProbeAckData = 5.U // .
def Release = 6.U // . => ReleaseAck
def ReleaseData = 7.U // . => ReleaseAck
def Grant = 4.U // . => GrantAck
def GrantData = 5.U // . => GrantAck
def ReleaseAck = 6.U // .
def GrantAck = 0.U // .
def isA(x: UInt) = x <= AcquirePerm
def isB(x: UInt) = x <= Probe
def isC(x: UInt) = x <= ReleaseData
def isD(x: UInt) = x <= ReleaseAck
def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant)
def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck)
def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("AcquireBlock",TLPermissions.PermMsgGrow),
("AcquirePerm",TLPermissions.PermMsgGrow))
def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved),
("PutPartialData",TLPermissions.PermMsgReserved),
("ArithmeticData",TLAtomics.ArithMsg),
("LogicalData",TLAtomics.LogicMsg),
("Get",TLPermissions.PermMsgReserved),
("Hint",TLHints.HintsMsg),
("Probe",TLPermissions.PermMsgCap))
def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("ProbeAck",TLPermissions.PermMsgReport),
("ProbeAckData",TLPermissions.PermMsgReport),
("Release",TLPermissions.PermMsgReport),
("ReleaseData",TLPermissions.PermMsgReport))
def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved),
("AccessAckData",TLPermissions.PermMsgReserved),
("HintAck",TLPermissions.PermMsgReserved),
("Invalid Opcode",TLPermissions.PermMsgReserved),
("Grant",TLPermissions.PermMsgCap),
("GrantData",TLPermissions.PermMsgCap),
("ReleaseAck",TLPermissions.PermMsgReserved))
}
/**
* The three primary TileLink permissions are:
* (T)runk: the agent is (or is on inwards path to) the global point of serialization.
* (B)ranch: the agent is on an outwards path to
* (N)one:
* These permissions are permuted by transfer operations in various ways.
* Operations can cap permissions, request for them to be grown or shrunk,
* or for a report on their current status.
*/
object TLPermissions
{
val aWidth = 2
val bdWidth = 2
val cWidth = 3
// Cap types (Grant = new permissions, Probe = permisions <= target)
def toT = 0.U(bdWidth.W)
def toB = 1.U(bdWidth.W)
def toN = 2.U(bdWidth.W)
def isCap(x: UInt) = x <= toN
// Grow types (Acquire = permissions >= target)
def NtoB = 0.U(aWidth.W)
def NtoT = 1.U(aWidth.W)
def BtoT = 2.U(aWidth.W)
def isGrow(x: UInt) = x <= BtoT
// Shrink types (ProbeAck, Release)
def TtoB = 0.U(cWidth.W)
def TtoN = 1.U(cWidth.W)
def BtoN = 2.U(cWidth.W)
def isShrink(x: UInt) = x <= BtoN
// Report types (ProbeAck, Release)
def TtoT = 3.U(cWidth.W)
def BtoB = 4.U(cWidth.W)
def NtoN = 5.U(cWidth.W)
def isReport(x: UInt) = x <= NtoN
def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT")
def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN")
def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN")
def PermMsgReserved:Seq[String] = Seq("Reserved")
}
object TLAtomics
{
val width = 3
// Arithmetic types
def MIN = 0.U(width.W)
def MAX = 1.U(width.W)
def MINU = 2.U(width.W)
def MAXU = 3.U(width.W)
def ADD = 4.U(width.W)
def isArithmetic(x: UInt) = x <= ADD
// Logical types
def XOR = 0.U(width.W)
def OR = 1.U(width.W)
def AND = 2.U(width.W)
def SWAP = 3.U(width.W)
def isLogical(x: UInt) = x <= SWAP
def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD")
def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP")
}
object TLHints
{
val width = 1
def PREFETCH_READ = 0.U(width.W)
def PREFETCH_WRITE = 1.U(width.W)
def isHints(x: UInt) = x <= PREFETCH_WRITE
def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite")
}
sealed trait TLChannel extends TLBundleBase {
val channelName: String
}
sealed trait TLDataChannel extends TLChannel
sealed trait TLAddrChannel extends TLDataChannel
final class TLBundleA(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleA_${params.shortName}"
val channelName = "'A' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleB(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleB_${params.shortName}"
val channelName = "'B' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val address = UInt(params.addressBits.W) // from
// variable fields during multibeat:
val mask = UInt((params.dataBits/8).W)
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleC(params: TLBundleParameters)
extends TLBundleBase(params) with TLAddrChannel
{
override def typeName = s"TLBundleC_${params.shortName}"
val channelName = "'C' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.cWidth.W) // shrink or report perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // from
val address = UInt(params.addressBits.W) // to
val user = BundleMap(params.requestFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleD(params: TLBundleParameters)
extends TLBundleBase(params) with TLDataChannel
{
override def typeName = s"TLBundleD_${params.shortName}"
val channelName = "'D' channel"
// fixed fields during multibeat:
val opcode = UInt(3.W)
val param = UInt(TLPermissions.bdWidth.W) // cap perms
val size = UInt(params.sizeBits.W)
val source = UInt(params.sourceBits.W) // to
val sink = UInt(params.sinkBits.W) // from
val denied = Bool() // implies corrupt iff *Data
val user = BundleMap(params.responseFields)
val echo = BundleMap(params.echoFields)
// variable fields during multibeat:
val data = UInt(params.dataBits.W)
val corrupt = Bool() // only applies to *Data messages
}
final class TLBundleE(params: TLBundleParameters)
extends TLBundleBase(params) with TLChannel
{
override def typeName = s"TLBundleE_${params.shortName}"
val channelName = "'E' channel"
val sink = UInt(params.sinkBits.W) // to
}
class TLBundle(val params: TLBundleParameters) extends Record
{
// Emulate a Bundle with elements abcde or ad depending on params.hasBCE
private val optA = Some (Decoupled(new TLBundleA(params)))
private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params))))
private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params)))
private val optD = Some (Flipped(Decoupled(new TLBundleD(params))))
private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params)))
def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params)))))
def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params)))))
def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params)))))
def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params)))))
def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params)))))
val elements =
if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a)
else ListMap("d" -> d, "a" -> a)
def tieoff(): Unit = {
DataMirror.specifiedDirectionOf(a.ready) match {
case SpecifiedDirection.Input =>
a.ready := false.B
c.ready := false.B
e.ready := false.B
b.valid := false.B
d.valid := false.B
case SpecifiedDirection.Output =>
a.valid := false.B
c.valid := false.B
e.valid := false.B
b.ready := false.B
d.ready := false.B
case _ =>
}
}
}
object TLBundle
{
def apply(params: TLBundleParameters) = new TLBundle(params)
}
class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle
class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params)
{
val a = new AsyncBundle(new TLBundleA(params.base), params.async)
val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async))
val c = new AsyncBundle(new TLBundleC(params.base), params.async)
val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async))
val e = new AsyncBundle(new TLBundleE(params.base), params.async)
}
class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = RationalIO(new TLBundleA(params))
val b = Flipped(RationalIO(new TLBundleB(params)))
val c = RationalIO(new TLBundleC(params))
val d = Flipped(RationalIO(new TLBundleD(params)))
val e = RationalIO(new TLBundleE(params))
}
class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params)
{
val a = CreditedIO(new TLBundleA(params))
val b = Flipped(CreditedIO(new TLBundleB(params)))
val c = CreditedIO(new TLBundleC(params))
val d = Flipped(CreditedIO(new TLBundleD(params)))
val e = CreditedIO(new TLBundleE(params))
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_14( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7]
wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7]
wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7]
wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7]
wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7]
wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7]
wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7]
wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7]
wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7]
wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7]
wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7]
wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7]
wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7]
wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7]
wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7]
wire sink_ok = 1'h0; // @[Monitor.scala:309:31]
wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35]
wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36]
wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25]
wire c_first_done = 1'h0; // @[Edges.scala:233:22]
wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47]
wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95]
wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71]
wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44]
wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36]
wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51]
wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40]
wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55]
wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74]
wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61]
wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61]
wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88]
wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42]
wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59]
wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14]
wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27]
wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25]
wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21]
wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74]
wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61]
wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61]
wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_63 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_69 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:57:20]
wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:56:32]
wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:57:20]
wire c_first = 1'h1; // @[Edges.scala:231:25]
wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43]
wire c_first_last = 1'h1; // @[Edges.scala:232:33]
wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28]
wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28]
wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74]
wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74]
wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74]
wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61]
wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57]
wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57]
wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57]
wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57]
wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51]
wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51]
wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54]
wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52]
wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79]
wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77]
wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61]
wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59]
wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40]
wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40]
wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53]
wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51]
wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35]
wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35]
wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34]
wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34]
wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34]
wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34]
wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46]
wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76]
wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71]
wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42]
wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42]
wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42]
wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42]
wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42]
wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123]
wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117]
wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48]
wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48]
wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123]
wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119]
wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48]
wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48]
wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34]
wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7]
wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31]
wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31]
wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h25; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31]
wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h26; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31]
wire _source_ok_T_28 = io_in_a_bits_source_0 == 7'h2E; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_8 = _source_ok_T_28; // @[Parameters.scala:1138:31]
wire _source_ok_T_29 = io_in_a_bits_source_0 == 7'h2F; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_9 = _source_ok_T_29; // @[Parameters.scala:1138:31]
wire _source_ok_T_30 = io_in_a_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_10 = _source_ok_T_30; // @[Parameters.scala:1138:31]
wire _source_ok_T_31 = io_in_a_bits_source_0 == 7'h2D; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_11 = _source_ok_T_31; // @[Parameters.scala:1138:31]
wire _source_ok_T_32 = io_in_a_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_12 = _source_ok_T_32; // @[Parameters.scala:1138:31]
wire _source_ok_T_33 = io_in_a_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_13 = _source_ok_T_33; // @[Parameters.scala:1138:31]
wire _source_ok_T_34 = io_in_a_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_14 = _source_ok_T_34; // @[Parameters.scala:1138:31]
wire _source_ok_T_35 = io_in_a_bits_source_0 == 7'h29; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_15 = _source_ok_T_35; // @[Parameters.scala:1138:31]
wire _source_ok_T_36 = io_in_a_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_16 = _source_ok_T_36; // @[Parameters.scala:1138:31]
wire _source_ok_T_37 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_17 = _source_ok_T_37; // @[Parameters.scala:1138:31]
wire _source_ok_T_38 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_18 = _source_ok_T_38; // @[Parameters.scala:1138:31]
wire _source_ok_T_39 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_19 = _source_ok_T_39; // @[Parameters.scala:1138:31]
wire _source_ok_T_40 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_50 = _source_ok_T_49 | _source_ok_WIRE_11; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_51 = _source_ok_T_50 | _source_ok_WIRE_12; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_52 = _source_ok_T_51 | _source_ok_WIRE_13; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_53 = _source_ok_T_52 | _source_ok_WIRE_14; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_54 = _source_ok_T_53 | _source_ok_WIRE_15; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_55 = _source_ok_T_54 | _source_ok_WIRE_16; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_56 = _source_ok_T_55 | _source_ok_WIRE_17; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_57 = _source_ok_T_56 | _source_ok_WIRE_18; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok = _source_ok_T_57 | _source_ok_WIRE_19; // @[Parameters.scala:1138:31, :1139:46]
wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71]
wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71]
assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71]
assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71]
wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71]
wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}]
wire [28:0] _is_aligned_T = {23'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46]
wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}]
wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21]
wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26]
wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26]
wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26]
wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20]
wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}]
wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27]
wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10]
wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}]
wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_58 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_0 = _source_ok_T_58; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}]
wire [4:0] _source_ok_T_59 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_65 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_71 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire [4:0] _source_ok_T_77 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7]
wire _source_ok_T_60 = _source_ok_T_59 == 5'h0; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_64 = _source_ok_T_62; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_1 = _source_ok_T_64; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_66 = _source_ok_T_65 == 5'h1; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_70 = _source_ok_T_68; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_2 = _source_ok_T_70; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_72 = _source_ok_T_71 == 5'h2; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_3 = _source_ok_T_76; // @[Parameters.scala:1138:31]
wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}]
wire _source_ok_T_78 = _source_ok_T_77 == 5'h3; // @[Parameters.scala:54:{10,32}]
wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:{32,67}]
wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:67, :56:48]
wire _source_ok_WIRE_1_4 = _source_ok_T_82; // @[Parameters.scala:1138:31]
wire _source_ok_T_83 = io_in_d_bits_source_0 == 7'h24; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_5 = _source_ok_T_83; // @[Parameters.scala:1138:31]
wire _source_ok_T_84 = io_in_d_bits_source_0 == 7'h25; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_6 = _source_ok_T_84; // @[Parameters.scala:1138:31]
wire _source_ok_T_85 = io_in_d_bits_source_0 == 7'h26; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_7 = _source_ok_T_85; // @[Parameters.scala:1138:31]
wire _source_ok_T_86 = io_in_d_bits_source_0 == 7'h2E; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_8 = _source_ok_T_86; // @[Parameters.scala:1138:31]
wire _source_ok_T_87 = io_in_d_bits_source_0 == 7'h2F; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_9 = _source_ok_T_87; // @[Parameters.scala:1138:31]
wire _source_ok_T_88 = io_in_d_bits_source_0 == 7'h2C; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_10 = _source_ok_T_88; // @[Parameters.scala:1138:31]
wire _source_ok_T_89 = io_in_d_bits_source_0 == 7'h2D; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_11 = _source_ok_T_89; // @[Parameters.scala:1138:31]
wire _source_ok_T_90 = io_in_d_bits_source_0 == 7'h2A; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_12 = _source_ok_T_90; // @[Parameters.scala:1138:31]
wire _source_ok_T_91 = io_in_d_bits_source_0 == 7'h2B; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_13 = _source_ok_T_91; // @[Parameters.scala:1138:31]
wire _source_ok_T_92 = io_in_d_bits_source_0 == 7'h28; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_14 = _source_ok_T_92; // @[Parameters.scala:1138:31]
wire _source_ok_T_93 = io_in_d_bits_source_0 == 7'h29; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_15 = _source_ok_T_93; // @[Parameters.scala:1138:31]
wire _source_ok_T_94 = io_in_d_bits_source_0 == 7'h22; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_16 = _source_ok_T_94; // @[Parameters.scala:1138:31]
wire _source_ok_T_95 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_17 = _source_ok_T_95; // @[Parameters.scala:1138:31]
wire _source_ok_T_96 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_18 = _source_ok_T_96; // @[Parameters.scala:1138:31]
wire _source_ok_T_97 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7]
wire _source_ok_WIRE_1_19 = _source_ok_T_97; // @[Parameters.scala:1138:31]
wire _source_ok_T_98 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_100 = _source_ok_T_99 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_101 = _source_ok_T_100 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_102 = _source_ok_T_101 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_103 = _source_ok_T_102 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_104 = _source_ok_T_103 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_105 = _source_ok_T_104 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_106 = _source_ok_T_105 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_107 = _source_ok_T_106 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_108 = _source_ok_T_107 | _source_ok_WIRE_1_11; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_109 = _source_ok_T_108 | _source_ok_WIRE_1_12; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_110 = _source_ok_T_109 | _source_ok_WIRE_1_13; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_111 = _source_ok_T_110 | _source_ok_WIRE_1_14; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_112 = _source_ok_T_111 | _source_ok_WIRE_1_15; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_113 = _source_ok_T_112 | _source_ok_WIRE_1_16; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_114 = _source_ok_T_113 | _source_ok_WIRE_1_17; // @[Parameters.scala:1138:31, :1139:46]
wire _source_ok_T_115 = _source_ok_T_114 | _source_ok_WIRE_1_18; // @[Parameters.scala:1138:31, :1139:46]
wire source_ok_1 = _source_ok_T_115 | _source_ok_WIRE_1_19; // @[Parameters.scala:1138:31, :1139:46]
wire _T_1518 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35]
wire _a_first_T; // @[Decoupled.scala:51:35]
assign _a_first_T = _T_1518; // @[Decoupled.scala:51:35]
wire _a_first_T_1; // @[Decoupled.scala:51:35]
assign _a_first_T_1 = _T_1518; // @[Decoupled.scala:51:35]
wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7]
wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [6:0] source; // @[Monitor.scala:390:22]
reg [28:0] address; // @[Monitor.scala:391:22]
wire _T_1591 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35]
wire _d_first_T; // @[Decoupled.scala:51:35]
assign _d_first_T = _T_1591; // @[Decoupled.scala:51:35]
wire _d_first_T_1; // @[Decoupled.scala:51:35]
assign _d_first_T_1 = _T_1591; // @[Decoupled.scala:51:35]
wire _d_first_T_2; // @[Decoupled.scala:51:35]
assign _d_first_T_2 = _T_1591; // @[Decoupled.scala:51:35]
wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71]
assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71]
wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71]
assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71]
wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46]
wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7]
wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28]
wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}]
wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [6:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [64:0] inflight; // @[Monitor.scala:614:27]
reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [259:0] inflight_sizes; // @[Monitor.scala:618:33]
wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}]
wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [64:0] a_set; // @[Monitor.scala:626:34]
wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34]
wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33]
wire [259:0] a_sizes_set; // @[Monitor.scala:632:31]
wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35]
wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69]
wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69]
assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69]
wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65]
assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65]
wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101]
assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101]
wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99]
assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99]
wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69]
assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69]
wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67]
assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67]
wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101]
assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101]
wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99]
assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99]
wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}]
wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}]
wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}]
assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}]
wire [3:0] a_size_lookup; // @[Monitor.scala:639:33]
wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}]
wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}]
wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}]
assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}]
wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40]
wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38]
wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44]
wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35]
assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35]
wire [127:0] _a_set_T; // @[OneHot.scala:58:35]
assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35]
assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1444 = _T_1518 & a_first_1; // @[Decoupled.scala:51:35]
assign a_set = _T_1444 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53]
wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}]
assign a_opcodes_set_interm = _T_1444 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}]
wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51]
wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}]
assign a_sizes_set_interm = _T_1444 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}]
wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79]
wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79]
assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79]
wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77]
assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77]
wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}]
assign a_opcodes_set = _T_1444 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}]
wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}]
assign a_sizes_set = _T_1444 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}]
wire [64:0] d_clr; // @[Monitor.scala:664:34]
wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34]
wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33]
wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31]
wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46]
wire d_release_ack; // @[Monitor.scala:673:46]
assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46]
wire d_release_ack_1; // @[Monitor.scala:783:46]
assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46]
wire _T_1490 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26]
wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T; // @[OneHot.scala:58:35]
assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35]
assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35]
wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35]
assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35]
assign d_clr_wo_ready = _T_1490 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1459 = _T_1591 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35]
assign d_clr = _T_1459 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}]
assign d_opcodes_clr = _T_1459 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}]
wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}]
assign d_sizes_clr = _T_1459 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}]
wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}]
wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113]
wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}]
wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27]
wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38]
wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}]
wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43]
wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62]
wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}]
wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39]
wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56]
wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26]
wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26]
reg [64:0] inflight_1; // @[Monitor.scala:726:35]
wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35]
reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35]
wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43]
reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35]
wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41]
wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}]
wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46]
wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28]
wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25]
wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43]
wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}]
wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35]
wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27]
wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}]
wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21]
wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35]
wire [3:0] c_size_lookup; // @[Monitor.scala:748:35]
wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}]
wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}]
wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}]
assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}]
wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}]
wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}]
wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}]
assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}]
wire [64:0] d_clr_1; // @[Monitor.scala:774:34]
wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34]
wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34]
wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34]
wire _T_1562 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26]
assign d_clr_wo_ready_1 = _T_1562 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire _T_1544 = _T_1591 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35]
assign d_clr_1 = _T_1544 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35]
wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}]
assign d_opcodes_clr_1 = _T_1544 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}]
wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}]
assign d_sizes_clr_1 = _T_1544 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}]
wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113]
wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46]
wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}]
wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62]
wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}]
wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58]
wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_41( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [27:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [9:0] inflight; // @[Monitor.scala:614:27]
reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [39:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [9:0] inflight_1; // @[Monitor.scala:726:35]
reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Fragmenter.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.{AddressSet, BufferParams, IdRange, TransferSizes}
import freechips.rocketchip.util.{Repeater, OH1ToUInt, UIntToOH1}
import scala.math.min
import freechips.rocketchip.util.DataToAugmentedData
object EarlyAck {
sealed trait T
case object AllPuts extends T
case object PutFulls extends T
case object None extends T
}
// minSize: minimum size of transfers supported by all outward managers
// maxSize: maximum size of transfers supported after the Fragmenter is applied
// alwaysMin: fragment all requests down to minSize (else fragment to maximum supported by manager)
// earlyAck: should a multibeat Put should be acknowledged on the first beat or last beat
// holdFirstDeny: allow the Fragmenter to unsafely combine multibeat Gets by taking the first denied for the whole burst
// nameSuffix: appends a suffix to the module name
// Fragmenter modifies: PutFull, PutPartial, LogicalData, Get, Hint
// Fragmenter passes: ArithmeticData (truncated to minSize if alwaysMin)
// Fragmenter cannot modify acquire (could livelock); thus it is unsafe to put caches on both sides
class TLFragmenter(val minSize: Int, val maxSize: Int, val alwaysMin: Boolean = false, val earlyAck: EarlyAck.T = EarlyAck.None, val holdFirstDeny: Boolean = false, val nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule
{
require(isPow2 (maxSize), s"TLFragmenter expects pow2(maxSize), but got $maxSize")
require(isPow2 (minSize), s"TLFragmenter expects pow2(minSize), but got $minSize")
require(minSize <= maxSize, s"TLFragmenter expects min <= max, but got $minSize > $maxSize")
val fragmentBits = log2Ceil(maxSize / minSize)
val fullBits = if (earlyAck == EarlyAck.PutFulls) 1 else 0
val toggleBits = 1
val addedBits = fragmentBits + toggleBits + fullBits
def expandTransfer(x: TransferSizes, op: String) = if (!x) x else {
// validate that we can apply the fragmenter correctly
require (x.max >= minSize, s"TLFragmenter (with parent $parent) max transfer size $op(${x.max}) must be >= min transfer size (${minSize})")
TransferSizes(x.min, maxSize)
}
private def noChangeRequired = minSize == maxSize
private def shrinkTransfer(x: TransferSizes) =
if (!alwaysMin) x
else if (x.min <= minSize) TransferSizes(x.min, min(minSize, x.max))
else TransferSizes.none
private def mapManager(m: TLSlaveParameters) = m.v1copy(
supportsArithmetic = shrinkTransfer(m.supportsArithmetic),
supportsLogical = shrinkTransfer(m.supportsLogical),
supportsGet = expandTransfer(m.supportsGet, "Get"),
supportsPutFull = expandTransfer(m.supportsPutFull, "PutFull"),
supportsPutPartial = expandTransfer(m.supportsPutPartial, "PutParital"),
supportsHint = expandTransfer(m.supportsHint, "Hint"))
val node = new TLAdapterNode(
// We require that all the responses are mutually FIFO
// Thus we need to compact all of the masters into one big master
clientFn = { c => (if (noChangeRequired) c else c.v2copy(
masters = Seq(TLMasterParameters.v2(
name = "TLFragmenter",
sourceId = IdRange(0, if (minSize == maxSize) c.endSourceId else (c.endSourceId << addedBits)),
requestFifo = true,
emits = TLMasterToSlaveTransferSizes(
acquireT = shrinkTransfer(c.masters.map(_.emits.acquireT) .reduce(_ mincover _)),
acquireB = shrinkTransfer(c.masters.map(_.emits.acquireB) .reduce(_ mincover _)),
arithmetic = shrinkTransfer(c.masters.map(_.emits.arithmetic).reduce(_ mincover _)),
logical = shrinkTransfer(c.masters.map(_.emits.logical) .reduce(_ mincover _)),
get = shrinkTransfer(c.masters.map(_.emits.get) .reduce(_ mincover _)),
putFull = shrinkTransfer(c.masters.map(_.emits.putFull) .reduce(_ mincover _)),
putPartial = shrinkTransfer(c.masters.map(_.emits.putPartial).reduce(_ mincover _)),
hint = shrinkTransfer(c.masters.map(_.emits.hint) .reduce(_ mincover _))
)
))
))},
managerFn = { m => if (noChangeRequired) m else m.v2copy(slaves = m.slaves.map(mapManager)) }
) {
override def circuitIdentity = noChangeRequired
}
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = (Seq("TLFragmenter") ++ nameSuffix).mkString("_")
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
if (noChangeRequired) {
out <> in
} else {
// All managers must share a common FIFO domain (responses might end up interleaved)
val manager = edgeOut.manager
val managers = manager.managers
val beatBytes = manager.beatBytes
val fifoId = managers(0).fifoId
require (fifoId.isDefined && managers.map(_.fifoId == fifoId).reduce(_ && _))
require (!manager.anySupportAcquireB || !edgeOut.client.anySupportProbe,
s"TLFragmenter (with parent $parent) can't fragment a caching client's requests into a cacheable region")
require (minSize >= beatBytes, s"TLFragmenter (with parent $parent) can't support fragmenting ($minSize) to sub-beat ($beatBytes) accesses")
// We can't support devices which are cached on both sides of us
require (!edgeOut.manager.anySupportAcquireB || !edgeIn.client.anySupportProbe)
// We can't support denied because we reassemble fragments
require (!edgeOut.manager.mayDenyGet || holdFirstDeny, s"TLFragmenter (with parent $parent) can't support denials without holdFirstDeny=true")
require (!edgeOut.manager.mayDenyPut || earlyAck == EarlyAck.None)
/* The Fragmenter is a bit tricky, because there are 5 sizes in play:
* max size -- the maximum transfer size possible
* orig size -- the original pre-fragmenter size
* frag size -- the modified post-fragmenter size
* min size -- the threshold below which frag=orig
* beat size -- the amount transfered on any given beat
*
* The relationships are as follows:
* max >= orig >= frag
* max > min >= beat
* It IS possible that orig <= min (then frag=orig; ie: no fragmentation)
*
* The fragment# (sent via TL.source) is measured in multiples of min size.
* Meanwhile, to track the progress, counters measure in multiples of beat size.
*
* Here is an example of a bus with max=256, min=8, beat=4 and a device supporting 16.
*
* in.A out.A (frag#) out.D (frag#) in.D gen# ack#
* get64 get16 6 ackD16 6 ackD64 12 15
* ackD16 6 ackD64 14
* ackD16 6 ackD64 13
* ackD16 6 ackD64 12
* get16 4 ackD16 4 ackD64 8 11
* ackD16 4 ackD64 10
* ackD16 4 ackD64 9
* ackD16 4 ackD64 8
* get16 2 ackD16 2 ackD64 4 7
* ackD16 2 ackD64 6
* ackD16 2 ackD64 5
* ackD16 2 ackD64 4
* get16 0 ackD16 0 ackD64 0 3
* ackD16 0 ackD64 2
* ackD16 0 ackD64 1
* ackD16 0 ackD64 0
*
* get8 get8 0 ackD8 0 ackD8 0 1
* ackD8 0 ackD8 0
*
* get4 get4 0 ackD4 0 ackD4 0 0
* get1 get1 0 ackD1 0 ackD1 0 0
*
* put64 put16 6 15
* put64 put16 6 14
* put64 put16 6 13
* put64 put16 6 ack16 6 12 12
* put64 put16 4 11
* put64 put16 4 10
* put64 put16 4 9
* put64 put16 4 ack16 4 8 8
* put64 put16 2 7
* put64 put16 2 6
* put64 put16 2 5
* put64 put16 2 ack16 2 4 4
* put64 put16 0 3
* put64 put16 0 2
* put64 put16 0 1
* put64 put16 0 ack16 0 ack64 0 0
*
* put8 put8 0 1
* put8 put8 0 ack8 0 ack8 0 0
*
* put4 put4 0 ack4 0 ack4 0 0
* put1 put1 0 ack1 0 ack1 0 0
*/
val counterBits = log2Up(maxSize/beatBytes)
val maxDownSize = if (alwaysMin) minSize else min(manager.maxTransfer, maxSize)
// Consider the following waveform for two 4-beat bursts:
// ---A----A------------
// -------D-----DDD-DDDD
// Under TL rules, the second A can use the same source as the first A,
// because the source is released for reuse on the first response beat.
//
// However, if we fragment the requests, it looks like this:
// ---3210-3210---------
// -------3-----210-3210
// ... now we've broken the rules because 210 are twice inflight.
//
// This phenomenon means we can have essentially 2*maxSize/minSize-1
// fragmented transactions in flight per original transaction source.
//
// To keep the source unique, we encode the beat counter in the low
// bits of the source. To solve the overlap, we use a toggle bit.
// Whatever toggle bit the D is reassembling, A will use the opposite.
// First, handle the return path
val acknum = RegInit(0.U(counterBits.W))
val dOrig = Reg(UInt())
val dToggle = RegInit(false.B)
val dFragnum = out.d.bits.source(fragmentBits-1, 0)
val dFirst = acknum === 0.U
val dLast = dFragnum === 0.U // only for AccessAck (!Data)
val dsizeOH = UIntToOH (out.d.bits.size, log2Ceil(maxDownSize)+1)
val dsizeOH1 = UIntToOH1(out.d.bits.size, log2Up(maxDownSize))
val dHasData = edgeOut.hasData(out.d.bits)
// calculate new acknum
val acknum_fragment = dFragnum << log2Ceil(minSize/beatBytes)
val acknum_size = dsizeOH1 >> log2Ceil(beatBytes)
assert (!out.d.valid || (acknum_fragment & acknum_size) === 0.U)
val dFirst_acknum = acknum_fragment | Mux(dHasData, acknum_size, 0.U)
val ack_decrement = Mux(dHasData, 1.U, dsizeOH >> log2Ceil(beatBytes))
// calculate the original size
val dFirst_size = OH1ToUInt((dFragnum << log2Ceil(minSize)) | dsizeOH1)
when (out.d.fire) {
acknum := Mux(dFirst, dFirst_acknum, acknum - ack_decrement)
when (dFirst) {
dOrig := dFirst_size
dToggle := out.d.bits.source(fragmentBits)
}
}
// Swallow up non-data ack fragments
val doEarlyAck = earlyAck match {
case EarlyAck.AllPuts => true.B
case EarlyAck.PutFulls => out.d.bits.source(fragmentBits+1)
case EarlyAck.None => false.B
}
val drop = !dHasData && !Mux(doEarlyAck, dFirst, dLast)
out.d.ready := in.d.ready || drop
in.d.valid := out.d.valid && !drop
in.d.bits := out.d.bits // pass most stuff unchanged
in.d.bits.source := out.d.bits.source >> addedBits
in.d.bits.size := Mux(dFirst, dFirst_size, dOrig)
if (edgeOut.manager.mayDenyPut) {
val r_denied = Reg(Bool())
val d_denied = (!dFirst && r_denied) || out.d.bits.denied
when (out.d.fire) { r_denied := d_denied }
in.d.bits.denied := d_denied
}
if (edgeOut.manager.mayDenyGet) {
// Take denied only from the first beat and hold that value
val d_denied = out.d.bits.denied holdUnless dFirst
when (dHasData) {
in.d.bits.denied := d_denied
in.d.bits.corrupt := d_denied || out.d.bits.corrupt
}
}
// What maximum transfer sizes do downstream devices support?
val maxArithmetics = managers.map(_.supportsArithmetic.max)
val maxLogicals = managers.map(_.supportsLogical.max)
val maxGets = managers.map(_.supportsGet.max)
val maxPutFulls = managers.map(_.supportsPutFull.max)
val maxPutPartials = managers.map(_.supportsPutPartial.max)
val maxHints = managers.map(m => if (m.supportsHint) maxDownSize else 0)
// We assume that the request is valid => size 0 is impossible
val lgMinSize = log2Ceil(minSize).U
val maxLgArithmetics = maxArithmetics.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgLogicals = maxLogicals .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgGets = maxGets .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutFulls = maxPutFulls .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgPutPartials = maxPutPartials.map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
val maxLgHints = maxHints .map(m => if (m == 0) lgMinSize else log2Ceil(m).U)
// Make the request repeatable
val repeater = Module(new Repeater(in.a.bits))
repeater.io.enq <> in.a
val in_a = repeater.io.deq
// If this is infront of a single manager, these become constants
val find = manager.findFast(edgeIn.address(in_a.bits))
val maxLgArithmetic = Mux1H(find, maxLgArithmetics)
val maxLgLogical = Mux1H(find, maxLgLogicals)
val maxLgGet = Mux1H(find, maxLgGets)
val maxLgPutFull = Mux1H(find, maxLgPutFulls)
val maxLgPutPartial = Mux1H(find, maxLgPutPartials)
val maxLgHint = Mux1H(find, maxLgHints)
val limit = if (alwaysMin) lgMinSize else
MuxLookup(in_a.bits.opcode, lgMinSize)(Array(
TLMessages.PutFullData -> maxLgPutFull,
TLMessages.PutPartialData -> maxLgPutPartial,
TLMessages.ArithmeticData -> maxLgArithmetic,
TLMessages.LogicalData -> maxLgLogical,
TLMessages.Get -> maxLgGet,
TLMessages.Hint -> maxLgHint))
val aOrig = in_a.bits.size
val aFrag = Mux(aOrig > limit, limit, aOrig)
val aOrigOH1 = UIntToOH1(aOrig, log2Ceil(maxSize))
val aFragOH1 = UIntToOH1(aFrag, log2Up(maxDownSize))
val aHasData = edgeIn.hasData(in_a.bits)
val aMask = Mux(aHasData, 0.U, aFragOH1)
val gennum = RegInit(0.U(counterBits.W))
val aFirst = gennum === 0.U
val old_gennum1 = Mux(aFirst, aOrigOH1 >> log2Ceil(beatBytes), gennum - 1.U)
val new_gennum = ~(~old_gennum1 | (aMask >> log2Ceil(beatBytes))) // ~(~x|y) is width safe
val aFragnum = ~(~(old_gennum1 >> log2Ceil(minSize/beatBytes)) | (aFragOH1 >> log2Ceil(minSize)))
val aLast = aFragnum === 0.U
val aToggle = !Mux(aFirst, dToggle, RegEnable(dToggle, aFirst))
val aFull = if (earlyAck == EarlyAck.PutFulls) Some(in_a.bits.opcode === TLMessages.PutFullData) else None
when (out.a.fire) { gennum := new_gennum }
repeater.io.repeat := !aHasData && aFragnum =/= 0.U
out.a <> in_a
out.a.bits.address := in_a.bits.address | ~(old_gennum1 << log2Ceil(beatBytes) | ~aOrigOH1 | aFragOH1 | (minSize-1).U)
out.a.bits.source := Cat(Seq(in_a.bits.source) ++ aFull ++ Seq(aToggle.asUInt, aFragnum))
out.a.bits.size := aFrag
// Optimize away some of the Repeater's registers
assert (!repeater.io.full || !aHasData)
out.a.bits.data := in.a.bits.data
val fullMask = ((BigInt(1) << beatBytes) - 1).U
assert (!repeater.io.full || in_a.bits.mask === fullMask)
out.a.bits.mask := Mux(repeater.io.full, fullMask, in.a.bits.mask)
out.a.bits.user.waiveAll :<= in.a.bits.user.subset(_.isData)
// Tie off unused channels
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLFragmenter
{
def apply(minSize: Int, maxSize: Int, alwaysMin: Boolean = false, earlyAck: EarlyAck.T = EarlyAck.None, holdFirstDeny: Boolean = false, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode =
{
if (minSize <= maxSize) {
val fragmenter = LazyModule(new TLFragmenter(minSize, maxSize, alwaysMin, earlyAck, holdFirstDeny, nameSuffix))
fragmenter.node
} else { TLEphemeralNode()(ValName("no_fragmenter")) }
}
def apply(wrapper: TLBusWrapper, nameSuffix: Option[String])(implicit p: Parameters): TLNode = apply(wrapper.beatBytes, wrapper.blockBytes, nameSuffix = nameSuffix)
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper, None)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMFragmenter(ramBeatBytes: Int, maxSize: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("Fragmenter"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff), beatBytes = ramBeatBytes))
(ram.node
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLDelayer(0.1)
:= TLFragmenter(ramBeatBytes, maxSize, earlyAck = EarlyAck.AllPuts)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= TLFragmenter(ramBeatBytes, maxSize/2)
:= TLDelayer(0.1)
:= TLBuffer(BufferParams.flow)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMFragmenterTest(ramBeatBytes: Int, maxSize: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMFragmenter(ramBeatBytes,maxSize,txns)).module)
io.finished := dut.io.finished
dut.io.start := io.start
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.diplomacy.{
AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry,
IdRange, RegionType, TransferSizes
}
import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions}
import freechips.rocketchip.util.{
AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase,
CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct
}
import scala.math.max
//These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel
case class TLMasterToSlaveTransferSizes(
// Supports both Acquire+Release of the following two sizes:
acquireT: TransferSizes = TransferSizes.none,
acquireB: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none)
extends TLCommonTransferSizes {
def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .intersect(rhs.acquireT),
acquireB = acquireB .intersect(rhs.acquireB),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint))
def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes(
acquireT = acquireT .mincover(rhs.acquireT),
acquireB = acquireB .mincover(rhs.acquireB),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint))
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(acquireT, "T"),
str(acquireB, "B"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""acquireT = ${acquireT}
|acquireB = ${acquireB}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLMasterToSlaveTransferSizes {
def unknownEmits = TLMasterToSlaveTransferSizes(
acquireT = TransferSizes(1, 4096),
acquireB = TransferSizes(1, 4096),
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096))
def unknownSupports = TLMasterToSlaveTransferSizes()
}
//These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel
case class TLSlaveToMasterTransferSizes(
probe: TransferSizes = TransferSizes.none,
arithmetic: TransferSizes = TransferSizes.none,
logical: TransferSizes = TransferSizes.none,
get: TransferSizes = TransferSizes.none,
putFull: TransferSizes = TransferSizes.none,
putPartial: TransferSizes = TransferSizes.none,
hint: TransferSizes = TransferSizes.none
) extends TLCommonTransferSizes {
def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .intersect(rhs.probe),
arithmetic = arithmetic.intersect(rhs.arithmetic),
logical = logical .intersect(rhs.logical),
get = get .intersect(rhs.get),
putFull = putFull .intersect(rhs.putFull),
putPartial = putPartial.intersect(rhs.putPartial),
hint = hint .intersect(rhs.hint)
)
def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes(
probe = probe .mincover(rhs.probe),
arithmetic = arithmetic.mincover(rhs.arithmetic),
logical = logical .mincover(rhs.logical),
get = get .mincover(rhs.get),
putFull = putFull .mincover(rhs.putFull),
putPartial = putPartial.mincover(rhs.putPartial),
hint = hint .mincover(rhs.hint)
)
// Reduce rendering to a simple yes/no per field
override def toString = {
def str(x: TransferSizes, flag: String) = if (x.none) "" else flag
def flags = Vector(
str(probe, "P"),
str(arithmetic, "A"),
str(logical, "L"),
str(get, "G"),
str(putFull, "F"),
str(putPartial, "P"),
str(hint, "H"))
flags.mkString
}
// Prints out the actual information in a user readable way
def infoString = {
s"""probe = ${probe}
|arithmetic = ${arithmetic}
|logical = ${logical}
|get = ${get}
|putFull = ${putFull}
|putPartial = ${putPartial}
|hint = ${hint}
|
|""".stripMargin
}
}
object TLSlaveToMasterTransferSizes {
def unknownEmits = TLSlaveToMasterTransferSizes(
arithmetic = TransferSizes(1, 4096),
logical = TransferSizes(1, 4096),
get = TransferSizes(1, 4096),
putFull = TransferSizes(1, 4096),
putPartial = TransferSizes(1, 4096),
hint = TransferSizes(1, 4096),
probe = TransferSizes(1, 4096))
def unknownSupports = TLSlaveToMasterTransferSizes()
}
trait TLCommonTransferSizes {
def arithmetic: TransferSizes
def logical: TransferSizes
def get: TransferSizes
def putFull: TransferSizes
def putPartial: TransferSizes
def hint: TransferSizes
}
class TLSlaveParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
setName: Option[String],
val address: Seq[AddressSet],
val regionType: RegionType.T,
val executable: Boolean,
val fifoId: Option[Int],
val supports: TLMasterToSlaveTransferSizes,
val emits: TLSlaveToMasterTransferSizes,
// By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation)
val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData
// If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order
// Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo
val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData
val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck
// ReleaseAck may NEVER be denied
extends SimpleProduct
{
def sortedAddress = address.sorted
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters]
override def productPrefix = "TLSlaveParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 11
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => address
case 2 => resources
case 3 => regionType
case 4 => executable
case 5 => fifoId
case 6 => supports
case 7 => emits
case 8 => alwaysGrantsT
case 9 => mayDenyGet
case 10 => mayDenyPut
case _ => throw new IndexOutOfBoundsException(n.toString)
}
def supportsAcquireT: TransferSizes = supports.acquireT
def supportsAcquireB: TransferSizes = supports.acquireB
def supportsArithmetic: TransferSizes = supports.arithmetic
def supportsLogical: TransferSizes = supports.logical
def supportsGet: TransferSizes = supports.get
def supportsPutFull: TransferSizes = supports.putFull
def supportsPutPartial: TransferSizes = supports.putPartial
def supportsHint: TransferSizes = supports.hint
require (!address.isEmpty, "Address cannot be empty")
address.foreach { a => require (a.finite, "Address must be finite") }
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)")
require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)")
require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)")
require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)")
require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)")
require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)")
require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT")
// Make sure that the regionType agrees with the capabilities
require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached
require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire
require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet
val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected")
val maxTransfer = List( // Largest supported transfer of all types
supportsAcquireT.max,
supportsAcquireB.max,
supportsArithmetic.max,
supportsLogical.max,
supportsGet.max,
supportsPutFull.max,
supportsPutPartial.max).max
val maxAddress = address.map(_.max).max
val minAlignment = address.map(_.alignment).min
// The device had better not support a transfer larger than its alignment
require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)")
def toResource: ResourceAddress = {
ResourceAddress(address, ResourcePermissions(
r = supportsAcquireB || supportsGet,
w = supportsAcquireT || supportsPutFull,
x = executable,
c = supportsAcquireB,
a = supportsArithmetic && supportsLogical))
}
def findTreeViolation() = nodePath.find {
case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false
case _: SinkNode[_, _, _, _, _] => false
case node => node.inputs.size != 1
}
def isTree = findTreeViolation() == None
def infoString = {
s"""Slave Name = ${name}
|Slave Address = ${address}
|supports = ${supports.infoString}
|
|""".stripMargin
}
def v1copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
new TLSlaveParameters(
setName = setName,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = emits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: Option[String] = setName,
address: Seq[AddressSet] = address,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
fifoId: Option[Int] = fifoId,
supports: TLMasterToSlaveTransferSizes = supports,
emits: TLSlaveToMasterTransferSizes = emits,
alwaysGrantsT: Boolean = alwaysGrantsT,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
@deprecated("Use v1copy instead of copy","")
def copy(
address: Seq[AddressSet] = address,
resources: Seq[Resource] = resources,
regionType: RegionType.T = regionType,
executable: Boolean = executable,
nodePath: Seq[BaseNode] = nodePath,
supportsAcquireT: TransferSizes = supports.acquireT,
supportsAcquireB: TransferSizes = supports.acquireB,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint,
mayDenyGet: Boolean = mayDenyGet,
mayDenyPut: Boolean = mayDenyPut,
alwaysGrantsT: Boolean = alwaysGrantsT,
fifoId: Option[Int] = fifoId) =
{
v1copy(
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supportsAcquireT = supportsAcquireT,
supportsAcquireB = supportsAcquireB,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
}
object TLSlaveParameters {
def v1(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
{
new TLSlaveParameters(
setName = None,
address = address,
resources = resources,
regionType = regionType,
executable = executable,
nodePath = nodePath,
supports = TLMasterToSlaveTransferSizes(
acquireT = supportsAcquireT,
acquireB = supportsAcquireB,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLSlaveToMasterTransferSizes.unknownEmits,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut,
alwaysGrantsT = alwaysGrantsT,
fifoId = fifoId)
}
def v2(
address: Seq[AddressSet],
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Seq(),
name: Option[String] = None,
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
fifoId: Option[Int] = None,
supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports,
emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits,
alwaysGrantsT: Boolean = false,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false) =
{
new TLSlaveParameters(
nodePath = nodePath,
resources = resources,
setName = name,
address = address,
regionType = regionType,
executable = executable,
fifoId = fifoId,
supports = supports,
emits = emits,
alwaysGrantsT = alwaysGrantsT,
mayDenyGet = mayDenyGet,
mayDenyPut = mayDenyPut)
}
}
object TLManagerParameters {
@deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","")
def apply(
address: Seq[AddressSet],
resources: Seq[Resource] = Seq(),
regionType: RegionType.T = RegionType.GET_EFFECTS,
executable: Boolean = false,
nodePath: Seq[BaseNode] = Seq(),
supportsAcquireT: TransferSizes = TransferSizes.none,
supportsAcquireB: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none,
mayDenyGet: Boolean = false,
mayDenyPut: Boolean = false,
alwaysGrantsT: Boolean = false,
fifoId: Option[Int] = None) =
TLSlaveParameters.v1(
address,
resources,
regionType,
executable,
nodePath,
supportsAcquireT,
supportsAcquireB,
supportsArithmetic,
supportsLogical,
supportsGet,
supportsPutFull,
supportsPutPartial,
supportsHint,
mayDenyGet,
mayDenyPut,
alwaysGrantsT,
fifoId,
)
}
case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int])
{
def members = Seq(a, b, c, d)
members.collect { case Some(beatBytes) =>
require (isPow2(beatBytes), "Data channel width must be a power of 2")
}
}
object TLChannelBeatBytes{
def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes(
Some(beatBytes),
Some(beatBytes),
Some(beatBytes),
Some(beatBytes))
def apply(): TLChannelBeatBytes = TLChannelBeatBytes(
None,
None,
None,
None)
}
class TLSlavePortParameters private(
val slaves: Seq[TLSlaveParameters],
val channelBytes: TLChannelBeatBytes,
val endSinkId: Int,
val minLatency: Int,
val responseFields: Seq[BundleFieldBase],
val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
def sortedSlaves = slaves.sortBy(_.sortedAddress.head)
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters]
override def productPrefix = "TLSlavePortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => slaves
case 1 => channelBytes
case 2 => endSinkId
case 3 => minLatency
case 4 => responseFields
case 5 => requestKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!slaves.isEmpty, "Slave ports must have slaves")
require (endSinkId >= 0, "Sink ids cannot be negative")
require (minLatency >= 0, "Minimum required latency cannot be negative")
// Using this API implies you cannot handle mixed-width busses
def beatBytes = {
channelBytes.members.foreach { width =>
require (width.isDefined && width == channelBytes.a)
}
channelBytes.a.get
}
// TODO this should be deprecated
def managers = slaves
def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = {
val relevant = slaves.filter(m => policy(m))
relevant.foreach { m =>
require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ")
}
}
// Bounds on required sizes
def maxAddress = slaves.map(_.maxAddress).max
def maxTransfer = slaves.map(_.maxTransfer).max
def mayDenyGet = slaves.exists(_.mayDenyGet)
def mayDenyPut = slaves.exists(_.mayDenyPut)
// Diplomatically determined operation sizes emitted by all outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _)
// Operation Emitted by at least one outward Slaves
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _)
val allSupportAcquireT = allSupportClaims.acquireT
val allSupportAcquireB = allSupportClaims.acquireB
val allSupportArithmetic = allSupportClaims.arithmetic
val allSupportLogical = allSupportClaims.logical
val allSupportGet = allSupportClaims.get
val allSupportPutFull = allSupportClaims.putFull
val allSupportPutPartial = allSupportClaims.putPartial
val allSupportHint = allSupportClaims.hint
// Operation supported by at least one outward Slaves
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _)
val anySupportAcquireT = !anySupportClaims.acquireT.none
val anySupportAcquireB = !anySupportClaims.acquireB.none
val anySupportArithmetic = !anySupportClaims.arithmetic.none
val anySupportLogical = !anySupportClaims.logical.none
val anySupportGet = !anySupportClaims.get.none
val anySupportPutFull = !anySupportClaims.putFull.none
val anySupportPutPartial = !anySupportClaims.putPartial.none
val anySupportHint = !anySupportClaims.hint.none
// Supporting Acquire means being routable for GrantAck
require ((endSinkId == 0) == !anySupportAcquireB)
// These return Option[TLSlaveParameters] for your convenience
def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address)))
// The safe version will check the entire address
def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _)))
// The fast version assumes the address is valid (you probably want fastProperty instead of this function)
def findFast(address: UInt) = {
val routingMask = AddressDecoder(slaves.map(_.address))
VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _)))
}
// Compute the simplest AddressSets that decide a key
def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = {
val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) =>
k -> vs.flatMap(_._2)
}
val reductionMask = AddressDecoder(groups.map(_._2))
groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) }
}
// Select a property
def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D =
Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) })
// Note: returns the actual fifoId + 1 or 0 if None
def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U)
def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B)
// Does this Port manage this ID/address?
def containsSafe(address: UInt) = findSafe(address).reduce(_ || _)
private def addressHelper(
// setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity
safe: Boolean,
// member filters out the sizes being checked based on the opcode being emitted or supported
member: TLSlaveParameters => TransferSizes,
address: UInt,
lgSize: UInt,
// range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity
range: Option[TransferSizes]): Bool = {
// trim reduces circuit complexity by intersecting checked sizes with the range argument
def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x)
// groupBy returns an unordered map, convert back to Seq and sort the result for determinism
// groupByIntoSeq is turning slaves into trimmed membership sizes
// We are grouping all the slaves by their transfer size where
// if they support the trimmed size then
// member is the type of transfer that you are looking for (What you are trying to filter on)
// When you consider membership, you are trimming the sizes to only the ones that you care about
// you are filtering the slaves based on both whether they support a particular opcode and the size
// Grouping the slaves based on the actual transfer size range they support
// intersecting the range and checking their membership
// FOR SUPPORTCASES instead of returning the list of slaves,
// you are returning a map from transfer size to the set of
// address sets that are supported for that transfer size
// find all the slaves that support a certain type of operation and then group their addresses by the supported size
// for every size there could be multiple address ranges
// safety is a trade off between checking between all possible addresses vs only the addresses
// that are known to have supported sizes
// the trade off is 'checking all addresses is a more expensive circuit but will always give you
// the right answer even if you give it an illegal address'
// the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer
// fast presumes address legality
// This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies.
// In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size.
val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) =>
k -> vs.flatMap(_.address)
}
// safe produces a circuit that compares against all possible addresses,
// whereas fast presumes that the address is legal but uses an efficient address decoder
val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2))
// Simplified creates the most concise possible representation of each cases' address sets based on the mask.
val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) }
simplified.map { case (s, a) =>
// s is a size, you are checking for this size either the size of the operation is in s
// We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire.
((Some(s) == range).B || s.containsLg(lgSize)) &&
a.map(_.contains(address)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range)
def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range)
def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range)
def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range)
def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range)
def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range)
def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range)
def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range)
def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range)
def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range)
def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range)
def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range)
def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range)
def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range)
def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range)
def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range)
def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range)
def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range)
def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range)
def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range)
def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption
def isTree = !slaves.exists(!_.isTree)
def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString
def v1copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
def v2copy(
slaves: Seq[TLSlaveParameters] = slaves,
channelBytes: TLChannelBeatBytes = channelBytes,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
new TLSlavePortParameters(
slaves = slaves,
channelBytes = channelBytes,
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
managers: Seq[TLSlaveParameters] = slaves,
beatBytes: Int = -1,
endSinkId: Int = endSinkId,
minLatency: Int = minLatency,
responseFields: Seq[BundleFieldBase] = responseFields,
requestKeys: Seq[BundleKeyBase] = requestKeys) =
{
v1copy(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
object TLSlavePortParameters {
def v1(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
new TLSlavePortParameters(
slaves = managers,
channelBytes = TLChannelBeatBytes(beatBytes),
endSinkId = endSinkId,
minLatency = minLatency,
responseFields = responseFields,
requestKeys = requestKeys)
}
}
object TLManagerPortParameters {
@deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","")
def apply(
managers: Seq[TLSlaveParameters],
beatBytes: Int,
endSinkId: Int = 0,
minLatency: Int = 0,
responseFields: Seq[BundleFieldBase] = Nil,
requestKeys: Seq[BundleKeyBase] = Nil) =
{
TLSlavePortParameters.v1(
managers,
beatBytes,
endSinkId,
minLatency,
responseFields,
requestKeys)
}
}
class TLMasterParameters private(
val nodePath: Seq[BaseNode],
val resources: Seq[Resource],
val name: String,
val visibility: Seq[AddressSet],
val unusedRegionTypes: Set[RegionType.T],
val executesOnly: Boolean,
val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C.
val supports: TLSlaveToMasterTransferSizes,
val emits: TLMasterToSlaveTransferSizes,
val neverReleasesData: Boolean,
val sourceId: IdRange) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters]
override def productPrefix = "TLMasterParameters"
// We intentionally omit nodePath for equality testing / formatting
def productArity: Int = 10
def productElement(n: Int): Any = n match {
case 0 => name
case 1 => sourceId
case 2 => resources
case 3 => visibility
case 4 => unusedRegionTypes
case 5 => executesOnly
case 6 => requestFifo
case 7 => supports
case 8 => emits
case 9 => neverReleasesData
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!sourceId.isEmpty)
require (!visibility.isEmpty)
require (supports.putFull.contains(supports.putPartial))
// We only support these operations if we support Probe (ie: we're a cache)
require (supports.probe.contains(supports.arithmetic))
require (supports.probe.contains(supports.logical))
require (supports.probe.contains(supports.get))
require (supports.probe.contains(supports.putFull))
require (supports.probe.contains(supports.putPartial))
require (supports.probe.contains(supports.hint))
visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") }
val maxTransfer = List(
supports.probe.max,
supports.arithmetic.max,
supports.logical.max,
supports.get.max,
supports.putFull.max,
supports.putPartial.max).max
def infoString = {
s"""Master Name = ${name}
|visibility = ${visibility}
|emits = ${emits.infoString}
|sourceId = ${sourceId}
|
|""".stripMargin
}
def v1copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = this.resources,
name = name,
visibility = visibility,
unusedRegionTypes = this.unusedRegionTypes,
executesOnly = this.executesOnly,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = this.emits,
neverReleasesData = this.neverReleasesData,
sourceId = sourceId)
}
def v2copy(
nodePath: Seq[BaseNode] = nodePath,
resources: Seq[Resource] = resources,
name: String = name,
visibility: Seq[AddressSet] = visibility,
unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes,
executesOnly: Boolean = executesOnly,
requestFifo: Boolean = requestFifo,
supports: TLSlaveToMasterTransferSizes = supports,
emits: TLMasterToSlaveTransferSizes = emits,
neverReleasesData: Boolean = neverReleasesData,
sourceId: IdRange = sourceId) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
@deprecated("Use v1copy instead of copy","")
def copy(
name: String = name,
sourceId: IdRange = sourceId,
nodePath: Seq[BaseNode] = nodePath,
requestFifo: Boolean = requestFifo,
visibility: Seq[AddressSet] = visibility,
supportsProbe: TransferSizes = supports.probe,
supportsArithmetic: TransferSizes = supports.arithmetic,
supportsLogical: TransferSizes = supports.logical,
supportsGet: TransferSizes = supports.get,
supportsPutFull: TransferSizes = supports.putFull,
supportsPutPartial: TransferSizes = supports.putPartial,
supportsHint: TransferSizes = supports.hint) =
{
v1copy(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
object TLMasterParameters {
def v1(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = Nil,
name = name,
visibility = visibility,
unusedRegionTypes = Set(),
executesOnly = false,
requestFifo = requestFifo,
supports = TLSlaveToMasterTransferSizes(
probe = supportsProbe,
arithmetic = supportsArithmetic,
logical = supportsLogical,
get = supportsGet,
putFull = supportsPutFull,
putPartial = supportsPutPartial,
hint = supportsHint),
emits = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData = false,
sourceId = sourceId)
}
def v2(
nodePath: Seq[BaseNode] = Seq(),
resources: Seq[Resource] = Nil,
name: String,
visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)),
unusedRegionTypes: Set[RegionType.T] = Set(),
executesOnly: Boolean = false,
requestFifo: Boolean = false,
supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports,
emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits,
neverReleasesData: Boolean = false,
sourceId: IdRange = IdRange(0,1)) =
{
new TLMasterParameters(
nodePath = nodePath,
resources = resources,
name = name,
visibility = visibility,
unusedRegionTypes = unusedRegionTypes,
executesOnly = executesOnly,
requestFifo = requestFifo,
supports = supports,
emits = emits,
neverReleasesData = neverReleasesData,
sourceId = sourceId)
}
}
object TLClientParameters {
@deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","")
def apply(
name: String,
sourceId: IdRange = IdRange(0,1),
nodePath: Seq[BaseNode] = Seq(),
requestFifo: Boolean = false,
visibility: Seq[AddressSet] = Seq(AddressSet.everything),
supportsProbe: TransferSizes = TransferSizes.none,
supportsArithmetic: TransferSizes = TransferSizes.none,
supportsLogical: TransferSizes = TransferSizes.none,
supportsGet: TransferSizes = TransferSizes.none,
supportsPutFull: TransferSizes = TransferSizes.none,
supportsPutPartial: TransferSizes = TransferSizes.none,
supportsHint: TransferSizes = TransferSizes.none) =
{
TLMasterParameters.v1(
name = name,
sourceId = sourceId,
nodePath = nodePath,
requestFifo = requestFifo,
visibility = visibility,
supportsProbe = supportsProbe,
supportsArithmetic = supportsArithmetic,
supportsLogical = supportsLogical,
supportsGet = supportsGet,
supportsPutFull = supportsPutFull,
supportsPutPartial = supportsPutPartial,
supportsHint = supportsHint)
}
}
class TLMasterPortParameters private(
val masters: Seq[TLMasterParameters],
val channelBytes: TLChannelBeatBytes,
val minLatency: Int,
val echoFields: Seq[BundleFieldBase],
val requestFields: Seq[BundleFieldBase],
val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct
{
override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters]
override def productPrefix = "TLMasterPortParameters"
def productArity: Int = 6
def productElement(n: Int): Any = n match {
case 0 => masters
case 1 => channelBytes
case 2 => minLatency
case 3 => echoFields
case 4 => requestFields
case 5 => responseKeys
case _ => throw new IndexOutOfBoundsException(n.toString)
}
require (!masters.isEmpty)
require (minLatency >= 0)
def clients = masters
// Require disjoint ranges for Ids
IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) =>
require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}")
}
// Bounds on required sizes
def endSourceId = masters.map(_.sourceId.end).max
def maxTransfer = masters.map(_.maxTransfer).max
// The unused sources < endSourceId
def unusedSources: Seq[Int] = {
val usedSources = masters.map(_.sourceId).sortBy(_.start)
((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) =>
end until start
}
}
// Diplomatically determined operation sizes emitted by all inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val allEmitClaims = masters.map(_.emits).reduce( _ intersect _)
// Diplomatically determined operation sizes Emitted by at least one inward Masters
// as opposed to emits* which generate circuitry to check which specific addresses
val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _)
// Diplomatically determined operation sizes supported by all inward Masters
// as opposed to supports* which generate circuitry to check which specific addresses
val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _)
val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _)
val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _)
val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _)
val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _)
val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _)
val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _)
// Diplomatically determined operation sizes supported by at least one master
// as opposed to supports* which generate circuitry to check which specific addresses
val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _)
val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _)
val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _)
val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _)
val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _)
val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _)
val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _)
// These return Option[TLMasterParameters] for your convenience
def find(id: Int) = masters.find(_.sourceId.contains(id))
// Synthesizable lookup methods
def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id)))
def contains(id: UInt) = find(id).reduce(_ || _)
def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B))
// Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters
private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = {
val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _)
// this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version;
// the case where there is only one group.
if (allSame) member(masters(0)).containsLg(lgSize) else {
// Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize
Mux1H(find(id), masters.map(member(_).containsLg(lgSize)))
}
}
// Check for support of a given operation at a specific id
val supportsProbe = sourceIdHelper(_.supports.probe) _
val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _
val supportsLogical = sourceIdHelper(_.supports.logical) _
val supportsGet = sourceIdHelper(_.supports.get) _
val supportsPutFull = sourceIdHelper(_.supports.putFull) _
val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _
val supportsHint = sourceIdHelper(_.supports.hint) _
// TODO: Merge sourceIdHelper2 with sourceIdHelper
private def sourceIdHelper2(
member: TLMasterParameters => TransferSizes,
sourceId: UInt,
lgSize: UInt): Bool = {
// Because sourceIds are uniquely owned by each master, we use them to group the
// cases that have to be checked.
val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) =>
k -> vs.map(_.sourceId)
}
emitCases.map { case (s, a) =>
(s.containsLg(lgSize)) &&
a.map(_.contains(sourceId)).reduce(_||_)
}.foldLeft(false.B)(_||_)
}
// Check for emit of a given operation at a specific id
def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize)
def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize)
def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize)
def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize)
def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize)
def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize)
def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize)
def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize)
def infoString = masters.map(_.infoString).mkString
def v1copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2copy(
masters: Seq[TLMasterParameters] = masters,
channelBytes: TLChannelBeatBytes = channelBytes,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
@deprecated("Use v1copy instead of copy","")
def copy(
clients: Seq[TLMasterParameters] = masters,
minLatency: Int = minLatency,
echoFields: Seq[BundleFieldBase] = echoFields,
requestFields: Seq[BundleFieldBase] = requestFields,
responseKeys: Seq[BundleKeyBase] = responseKeys) =
{
v1copy(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLClientPortParameters {
@deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","")
def apply(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
TLMasterPortParameters.v1(
clients,
minLatency,
echoFields,
requestFields,
responseKeys)
}
}
object TLMasterPortParameters {
def v1(
clients: Seq[TLMasterParameters],
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = clients,
channelBytes = TLChannelBeatBytes(),
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
def v2(
masters: Seq[TLMasterParameters],
channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(),
minLatency: Int = 0,
echoFields: Seq[BundleFieldBase] = Nil,
requestFields: Seq[BundleFieldBase] = Nil,
responseKeys: Seq[BundleKeyBase] = Nil) =
{
new TLMasterPortParameters(
masters = masters,
channelBytes = channelBytes,
minLatency = minLatency,
echoFields = echoFields,
requestFields = requestFields,
responseKeys = responseKeys)
}
}
case class TLBundleParameters(
addressBits: Int,
dataBits: Int,
sourceBits: Int,
sinkBits: Int,
sizeBits: Int,
echoFields: Seq[BundleFieldBase],
requestFields: Seq[BundleFieldBase],
responseFields: Seq[BundleFieldBase],
hasBCE: Boolean)
{
// Chisel has issues with 0-width wires
require (addressBits >= 1)
require (dataBits >= 8)
require (sourceBits >= 1)
require (sinkBits >= 1)
require (sizeBits >= 1)
require (isPow2(dataBits))
echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") }
val addrLoBits = log2Up(dataBits/8)
// Used to uniquify bus IP names
def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u")
def union(x: TLBundleParameters) =
TLBundleParameters(
max(addressBits, x.addressBits),
max(dataBits, x.dataBits),
max(sourceBits, x.sourceBits),
max(sinkBits, x.sinkBits),
max(sizeBits, x.sizeBits),
echoFields = BundleField.union(echoFields ++ x.echoFields),
requestFields = BundleField.union(requestFields ++ x.requestFields),
responseFields = BundleField.union(responseFields ++ x.responseFields),
hasBCE || x.hasBCE)
}
object TLBundleParameters
{
val emptyBundleParams = TLBundleParameters(
addressBits = 1,
dataBits = 8,
sourceBits = 1,
sinkBits = 1,
sizeBits = 1,
echoFields = Nil,
requestFields = Nil,
responseFields = Nil,
hasBCE = false)
def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y))
def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) =
new TLBundleParameters(
addressBits = log2Up(slave.maxAddress + 1),
dataBits = slave.beatBytes * 8,
sourceBits = log2Up(master.endSourceId),
sinkBits = log2Up(slave.endSinkId),
sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1),
echoFields = master.echoFields,
requestFields = BundleField.accept(master.requestFields, slave.requestKeys),
responseFields = BundleField.accept(slave.responseFields, master.responseKeys),
hasBCE = master.anySupportProbe && slave.anySupportAcquireB)
}
case class TLEdgeParameters(
master: TLMasterPortParameters,
slave: TLSlavePortParameters,
params: Parameters,
sourceInfo: SourceInfo) extends FormatEdge
{
// legacy names:
def manager = slave
def client = master
val maxTransfer = max(master.maxTransfer, slave.maxTransfer)
val maxLgSize = log2Ceil(maxTransfer)
// Sanity check the link...
require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})")
def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims)
val bundle = TLBundleParameters(master, slave)
def formatEdge = master.infoString + "\n" + slave.infoString
}
case class TLCreditedDelay(
a: CreditedDelay,
b: CreditedDelay,
c: CreditedDelay,
d: CreditedDelay,
e: CreditedDelay)
{
def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay(
a = a + that.a,
b = b + that.b,
c = c + that.c,
d = d + that.d,
e = e + that.e)
override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})"
}
object TLCreditedDelay {
def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay)
}
case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val delay = client.delay + manager.delay
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters)
case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base))
def formatEdge = client.infoString + "\n" + manager.infoString
}
case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString}
case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString}
case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge
{
val bundle = TLBundleParameters(client.base, manager.base)
def formatEdge = client.infoString + "\n" + manager.infoString
}
// To be unified, devices must agree on all of these terms
case class ManagerUnificationKey(
resources: Seq[Resource],
regionType: RegionType.T,
executable: Boolean,
supportsAcquireT: TransferSizes,
supportsAcquireB: TransferSizes,
supportsArithmetic: TransferSizes,
supportsLogical: TransferSizes,
supportsGet: TransferSizes,
supportsPutFull: TransferSizes,
supportsPutPartial: TransferSizes,
supportsHint: TransferSizes)
object ManagerUnificationKey
{
def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey(
resources = x.resources,
regionType = x.regionType,
executable = x.executable,
supportsAcquireT = x.supportsAcquireT,
supportsAcquireB = x.supportsAcquireB,
supportsArithmetic = x.supportsArithmetic,
supportsLogical = x.supportsLogical,
supportsGet = x.supportsGet,
supportsPutFull = x.supportsPutFull,
supportsPutPartial = x.supportsPutPartial,
supportsHint = x.supportsHint)
}
object ManagerUnification
{
def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = {
slaves.groupBy(ManagerUnificationKey.apply).values.map { seq =>
val agree = seq.forall(_.fifoId == seq.head.fifoId)
seq(0).v1copy(
address = AddressSet.unify(seq.flatMap(_.address)),
fifoId = if (agree) seq(0).fifoId else None)
}.toList
}
}
case class TLBufferParams(
a: BufferParams = BufferParams.none,
b: BufferParams = BufferParams.none,
c: BufferParams = BufferParams.none,
d: BufferParams = BufferParams.none,
e: BufferParams = BufferParams.none
) extends DirectedBuffers[TLBufferParams] {
def copyIn(x: BufferParams) = this.copy(b = x, d = x)
def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x)
def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x)
}
/** Pretty printing of TL source id maps */
class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] {
private val tlDigits = String.valueOf(tl.endSourceId-1).length()
protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s"
private val sorted = tl.masters.sortBy(_.sourceId)
val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c =>
TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo)
}
}
case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean)
extends IdMapEntry
{
val from = tlId
val to = tlId
val maxTransactionsInFlight = Some(tlId.size)
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLFragmenter_BootROM( // @[Fragmenter.scala:92:9]
input clock, // @[Fragmenter.scala:92:9]
input reset, // @[Fragmenter.scala:92:9]
output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [16:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [11:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [16:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [11:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_out_d_bits_data // @[LazyModuleImp.scala:107:25]
);
wire _repeater_io_full; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_opcode; // @[Fragmenter.scala:274:30]
wire [2:0] _repeater_io_deq_bits_size; // @[Fragmenter.scala:274:30]
wire [7:0] _repeater_io_deq_bits_source; // @[Fragmenter.scala:274:30]
wire [16:0] _repeater_io_deq_bits_address; // @[Fragmenter.scala:274:30]
wire [7:0] _repeater_io_deq_bits_mask; // @[Fragmenter.scala:274:30]
wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_a_bits_param_0 = auto_anon_in_a_bits_param; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_a_bits_size_0 = auto_anon_in_a_bits_size; // @[Fragmenter.scala:92:9]
wire [7:0] auto_anon_in_a_bits_source_0 = auto_anon_in_a_bits_source; // @[Fragmenter.scala:92:9]
wire [16:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[Fragmenter.scala:92:9]
wire [7:0] auto_anon_in_a_bits_mask_0 = auto_anon_in_a_bits_mask; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[Fragmenter.scala:92:9]
wire auto_anon_in_a_bits_corrupt_0 = auto_anon_in_a_bits_corrupt; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[Fragmenter.scala:92:9]
wire auto_anon_out_a_ready_0 = auto_anon_out_a_ready; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_valid_0 = auto_anon_out_d_valid; // @[Fragmenter.scala:92:9]
wire [1:0] auto_anon_out_d_bits_size_0 = auto_anon_out_d_bits_size; // @[Fragmenter.scala:92:9]
wire [11:0] auto_anon_out_d_bits_source_0 = auto_anon_out_d_bits_source; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_out_d_bits_data_0 = auto_anon_out_d_bits_data; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_d_bits_opcode = 3'h1; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_out_d_bits_opcode = 3'h1; // @[Fragmenter.scala:92:9]
wire [2:0] anonIn_d_bits_opcode = 3'h1; // @[MixedNode.scala:551:17]
wire [2:0] anonOut_d_bits_opcode = 3'h1; // @[MixedNode.scala:542:17]
wire [1:0] auto_anon_in_d_bits_param = 2'h0; // @[Fragmenter.scala:92:9]
wire [1:0] auto_anon_out_d_bits_param = 2'h0; // @[Fragmenter.scala:92:9]
wire [1:0] anonIn_d_bits_param = 2'h0; // @[MixedNode.scala:551:17]
wire [1:0] anonOut_d_bits_param = 2'h0; // @[MixedNode.scala:542:17]
wire auto_anon_in_d_bits_sink = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_bits_denied = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_bits_corrupt = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_bits_sink = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_bits_denied = 1'h0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_bits_corrupt = 1'h0; // @[Fragmenter.scala:92:9]
wire anonIn_d_bits_sink = 1'h0; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_denied = 1'h0; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_corrupt = 1'h0; // @[MixedNode.scala:551:17]
wire anonOut_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17]
wire anonOut_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17]
wire anonOut_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17]
wire acknum_size = 1'h0; // @[Fragmenter.scala:213:36]
wire _dFirst_acknum_T = 1'h0; // @[Fragmenter.scala:215:50]
wire _drop_T = 1'h0; // @[Fragmenter.scala:234:20]
wire drop = 1'h0; // @[Fragmenter.scala:234:30]
wire _new_gennum_T_1 = 1'h0; // @[Fragmenter.scala:306:50]
wire _aFragnum_T_2 = 1'h0; // @[Fragmenter.scala:307:84]
wire dHasData_opdata = 1'h1; // @[Edges.scala:106:36]
wire ack_decrement = 1'h1; // @[Fragmenter.scala:216:32]
wire _anonIn_d_valid_T = 1'h1; // @[Fragmenter.scala:236:39]
wire _find_T_4 = 1'h1; // @[Parameters.scala:137:59]
wire find_0 = 1'h1; // @[Parameters.scala:616:12]
wire _repeater_io_repeat_T = 1'h1; // @[Fragmenter.scala:314:31]
wire [1:0] _limit_T_1 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_3 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_5 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_7 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] _limit_T_9 = 2'h3; // @[Fragmenter.scala:288:49]
wire [1:0] limit = 2'h3; // @[Fragmenter.scala:288:49]
wire [17:0] _find_T_2 = 18'h0; // @[Parameters.scala:137:46]
wire [17:0] _find_T_3 = 18'h0; // @[Parameters.scala:137:46]
wire anonIn_a_ready; // @[MixedNode.scala:551:17]
wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[Fragmenter.scala:92:9]
wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[Fragmenter.scala:92:9]
wire [2:0] anonIn_a_bits_param = auto_anon_in_a_bits_param_0; // @[Fragmenter.scala:92:9]
wire [2:0] anonIn_a_bits_size = auto_anon_in_a_bits_size_0; // @[Fragmenter.scala:92:9]
wire [7:0] anonIn_a_bits_source = auto_anon_in_a_bits_source_0; // @[Fragmenter.scala:92:9]
wire [16:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[Fragmenter.scala:92:9]
wire [7:0] anonIn_a_bits_mask = auto_anon_in_a_bits_mask_0; // @[Fragmenter.scala:92:9]
wire [63:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[Fragmenter.scala:92:9]
wire anonIn_a_bits_corrupt = auto_anon_in_a_bits_corrupt_0; // @[Fragmenter.scala:92:9]
wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[Fragmenter.scala:92:9]
wire anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [7:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire [63:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire anonOut_a_ready = auto_anon_out_a_ready_0; // @[Fragmenter.scala:92:9]
wire anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [1:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [11:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [16:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire anonOut_d_ready; // @[MixedNode.scala:542:17]
wire anonOut_d_valid = auto_anon_out_d_valid_0; // @[Fragmenter.scala:92:9]
wire [1:0] anonOut_d_bits_size = auto_anon_out_d_bits_size_0; // @[Fragmenter.scala:92:9]
wire [11:0] anonOut_d_bits_source = auto_anon_out_d_bits_source_0; // @[Fragmenter.scala:92:9]
wire [63:0] anonOut_d_bits_data = auto_anon_out_d_bits_data_0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_a_ready_0; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_in_d_bits_size_0; // @[Fragmenter.scala:92:9]
wire [7:0] auto_anon_in_d_bits_source_0; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_in_d_bits_data_0; // @[Fragmenter.scala:92:9]
wire auto_anon_in_d_valid_0; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_out_a_bits_opcode_0; // @[Fragmenter.scala:92:9]
wire [2:0] auto_anon_out_a_bits_param_0; // @[Fragmenter.scala:92:9]
wire [1:0] auto_anon_out_a_bits_size_0; // @[Fragmenter.scala:92:9]
wire [11:0] auto_anon_out_a_bits_source_0; // @[Fragmenter.scala:92:9]
wire [16:0] auto_anon_out_a_bits_address_0; // @[Fragmenter.scala:92:9]
wire [7:0] auto_anon_out_a_bits_mask_0; // @[Fragmenter.scala:92:9]
wire [63:0] auto_anon_out_a_bits_data_0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_a_bits_corrupt_0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_a_valid_0; // @[Fragmenter.scala:92:9]
wire auto_anon_out_d_ready_0; // @[Fragmenter.scala:92:9]
assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[Fragmenter.scala:92:9]
assign anonOut_a_bits_data = anonIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17]
wire _anonOut_d_ready_T = anonIn_d_ready; // @[Fragmenter.scala:235:35]
wire _anonIn_d_valid_T_1; // @[Fragmenter.scala:236:36]
assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[Fragmenter.scala:92:9]
wire [2:0] _anonIn_d_bits_size_T; // @[Fragmenter.scala:239:32]
assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[Fragmenter.scala:92:9]
wire [7:0] _anonIn_d_bits_source_T; // @[Fragmenter.scala:238:47]
assign auto_anon_in_d_bits_source_0 = anonIn_d_bits_source; // @[Fragmenter.scala:92:9]
assign auto_anon_in_d_bits_data_0 = anonIn_d_bits_data; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_valid_0 = anonOut_a_valid; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_param_0 = anonOut_a_bits_param; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_size_0 = anonOut_a_bits_size; // @[Fragmenter.scala:92:9]
wire [11:0] _anonOut_a_bits_source_T; // @[Fragmenter.scala:317:33]
assign auto_anon_out_a_bits_source_0 = anonOut_a_bits_source; // @[Fragmenter.scala:92:9]
wire [16:0] _anonOut_a_bits_address_T_6; // @[Fragmenter.scala:316:49]
assign auto_anon_out_a_bits_address_0 = anonOut_a_bits_address; // @[Fragmenter.scala:92:9]
wire [7:0] _anonOut_a_bits_mask_T; // @[Fragmenter.scala:325:31]
assign auto_anon_out_a_bits_mask_0 = anonOut_a_bits_mask; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_data_0 = anonOut_a_bits_data; // @[Fragmenter.scala:92:9]
assign auto_anon_out_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[Fragmenter.scala:92:9]
assign auto_anon_out_d_ready_0 = anonOut_d_ready; // @[Fragmenter.scala:92:9]
assign _anonIn_d_valid_T_1 = anonOut_d_valid; // @[Fragmenter.scala:236:36]
wire [1:0] dsizeOH_shiftAmount = anonOut_d_bits_size; // @[OneHot.scala:64:49]
assign anonIn_d_bits_data = anonOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17]
reg [2:0] acknum; // @[Fragmenter.scala:201:29]
reg [2:0] dOrig; // @[Fragmenter.scala:202:24]
reg dToggle; // @[Fragmenter.scala:203:30]
wire [2:0] dFragnum = anonOut_d_bits_source[2:0]; // @[Fragmenter.scala:204:41]
wire [2:0] acknum_fragment = dFragnum; // @[Fragmenter.scala:204:41, :212:40]
wire dFirst = acknum == 3'h0; // @[Fragmenter.scala:201:29, :205:29]
wire dLast = dFragnum == 3'h0; // @[Fragmenter.scala:204:41, :206:30]
wire _drop_T_1 = dLast; // @[Fragmenter.scala:206:30, :234:37]
wire [3:0] _dsizeOH_T = 4'h1 << dsizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [3:0] dsizeOH = _dsizeOH_T; // @[OneHot.scala:65:{12,27}]
wire [5:0] _dsizeOH1_T = 6'h7 << anonOut_d_bits_size; // @[package.scala:243:71]
wire [2:0] _dsizeOH1_T_1 = _dsizeOH1_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] dsizeOH1 = ~_dsizeOH1_T_1; // @[package.scala:243:{46,76}]
wire [2:0] dFirst_acknum = acknum_fragment; // @[Fragmenter.scala:212:40, :215:45]
wire _ack_decrement_T = dsizeOH[3]; // @[OneHot.scala:65:27]
wire [5:0] _dFirst_size_T = {dFragnum, 3'h0}; // @[Fragmenter.scala:204:41, :218:47]
wire [5:0] _dFirst_size_T_1 = {_dFirst_size_T[5:3], _dFirst_size_T[2:0] | dsizeOH1}; // @[package.scala:243:46]
wire [6:0] _dFirst_size_T_2 = {_dFirst_size_T_1, 1'h0}; // @[package.scala:241:35]
wire [6:0] _dFirst_size_T_3 = {_dFirst_size_T_2[6:1], 1'h1}; // @[package.scala:241:{35,40}]
wire [6:0] _dFirst_size_T_4 = {1'h0, _dFirst_size_T_1}; // @[package.scala:241:53]
wire [6:0] _dFirst_size_T_5 = ~_dFirst_size_T_4; // @[package.scala:241:{49,53}]
wire [6:0] _dFirst_size_T_6 = _dFirst_size_T_3 & _dFirst_size_T_5; // @[package.scala:241:{40,47,49}]
wire [2:0] dFirst_size_hi = _dFirst_size_T_6[6:4]; // @[OneHot.scala:30:18]
wire [3:0] dFirst_size_lo = _dFirst_size_T_6[3:0]; // @[OneHot.scala:31:18]
wire _dFirst_size_T_7 = |dFirst_size_hi; // @[OneHot.scala:30:18, :32:14]
wire [3:0] _dFirst_size_T_8 = {1'h0, dFirst_size_hi} | dFirst_size_lo; // @[OneHot.scala:30:18, :31:18, :32:28]
wire [1:0] dFirst_size_hi_1 = _dFirst_size_T_8[3:2]; // @[OneHot.scala:30:18, :32:28]
wire [1:0] dFirst_size_lo_1 = _dFirst_size_T_8[1:0]; // @[OneHot.scala:31:18, :32:28]
wire _dFirst_size_T_9 = |dFirst_size_hi_1; // @[OneHot.scala:30:18, :32:14]
wire [1:0] _dFirst_size_T_10 = dFirst_size_hi_1 | dFirst_size_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28]
wire _dFirst_size_T_11 = _dFirst_size_T_10[1]; // @[OneHot.scala:32:28]
wire [1:0] _dFirst_size_T_12 = {_dFirst_size_T_9, _dFirst_size_T_11}; // @[OneHot.scala:32:{10,14}]
wire [2:0] dFirst_size = {_dFirst_size_T_7, _dFirst_size_T_12}; // @[OneHot.scala:32:{10,14}]
wire [3:0] _acknum_T = {1'h0, acknum} - 4'h1; // @[Fragmenter.scala:201:29, :221:55]
wire [2:0] _acknum_T_1 = _acknum_T[2:0]; // @[Fragmenter.scala:221:55]
wire [2:0] _acknum_T_2 = dFirst ? dFirst_acknum : _acknum_T_1; // @[Fragmenter.scala:205:29, :215:45, :221:{24,55}]
wire _dToggle_T = anonOut_d_bits_source[3]; // @[Fragmenter.scala:224:41]
wire _drop_T_2 = ~_drop_T_1; // @[Fragmenter.scala:234:{33,37}]
assign anonOut_d_ready = _anonOut_d_ready_T; // @[Fragmenter.scala:235:35]
assign anonIn_d_valid = _anonIn_d_valid_T_1; // @[Fragmenter.scala:236:36]
assign _anonIn_d_bits_source_T = anonOut_d_bits_source[11:4]; // @[Fragmenter.scala:238:47]
assign anonIn_d_bits_source = _anonIn_d_bits_source_T; // @[Fragmenter.scala:238:47]
assign _anonIn_d_bits_size_T = dFirst ? dFirst_size : dOrig; // @[OneHot.scala:32:10]
assign anonIn_d_bits_size = _anonIn_d_bits_size_T; // @[Fragmenter.scala:239:32]
wire [16:0] _find_T; // @[Parameters.scala:137:31]
wire [17:0] _find_T_1 = {1'h0, _find_T}; // @[Parameters.scala:137:{31,41}]
wire _limit_T = _repeater_io_deq_bits_opcode == 3'h0; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_2 = _repeater_io_deq_bits_opcode == 3'h1; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_4 = _repeater_io_deq_bits_opcode == 3'h2; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_6 = _repeater_io_deq_bits_opcode == 3'h3; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_8 = _repeater_io_deq_bits_opcode == 3'h4; // @[Fragmenter.scala:274:30, :288:49]
wire _limit_T_10 = _repeater_io_deq_bits_opcode == 3'h5; // @[Fragmenter.scala:274:30, :288:49]
wire _aFrag_T = _repeater_io_deq_bits_size[2]; // @[Fragmenter.scala:274:30, :297:31]
wire [2:0] aFrag = _aFrag_T ? 3'h3 : _repeater_io_deq_bits_size; // @[Fragmenter.scala:274:30, :297:{24,31}]
wire [12:0] _aOrigOH1_T = 13'h3F << _repeater_io_deq_bits_size; // @[package.scala:243:71]
wire [5:0] _aOrigOH1_T_1 = _aOrigOH1_T[5:0]; // @[package.scala:243:{71,76}]
wire [5:0] aOrigOH1 = ~_aOrigOH1_T_1; // @[package.scala:243:{46,76}]
wire [9:0] _aFragOH1_T = 10'h7 << aFrag; // @[package.scala:243:71]
wire [2:0] _aFragOH1_T_1 = _aFragOH1_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] aFragOH1 = ~_aFragOH1_T_1; // @[package.scala:243:{46,76}]
wire [2:0] aMask = aFragOH1; // @[package.scala:243:46]
wire _aHasData_opdata_T = _repeater_io_deq_bits_opcode[2]; // @[Fragmenter.scala:274:30]
wire aHasData_opdata = ~_aHasData_opdata_T; // @[Edges.scala:92:{28,37}]
reg [2:0] gennum; // @[Fragmenter.scala:303:29]
wire aFirst = gennum == 3'h0; // @[Fragmenter.scala:303:29, :304:29]
wire [2:0] _old_gennum1_T = aOrigOH1[5:3]; // @[package.scala:243:46]
wire [3:0] _old_gennum1_T_1 = {1'h0, gennum} - 4'h1; // @[Fragmenter.scala:303:29, :305:79]
wire [2:0] _old_gennum1_T_2 = _old_gennum1_T_1[2:0]; // @[Fragmenter.scala:305:79]
wire [2:0] old_gennum1 = aFirst ? _old_gennum1_T : _old_gennum1_T_2; // @[Fragmenter.scala:304:29, :305:{30,48,79}]
wire [2:0] _aFragnum_T = old_gennum1; // @[Fragmenter.scala:305:30, :307:40]
wire [2:0] _new_gennum_T = ~old_gennum1; // @[Fragmenter.scala:305:30, :306:28]
wire [2:0] _new_gennum_T_2 = _new_gennum_T; // @[Fragmenter.scala:306:{28,41}]
wire [2:0] new_gennum = ~_new_gennum_T_2; // @[Fragmenter.scala:306:{26,41}]
wire [2:0] _aFragnum_T_1 = ~_aFragnum_T; // @[Fragmenter.scala:307:{26,40}]
wire [2:0] _aFragnum_T_3 = _aFragnum_T_1; // @[Fragmenter.scala:307:{26,72}]
wire [2:0] aFragnum = ~_aFragnum_T_3; // @[Fragmenter.scala:307:{24,72}]
wire aLast = ~(|aFragnum); // @[Fragmenter.scala:307:24, :308:30]
reg aToggle_r; // @[Fragmenter.scala:309:54]
wire _aToggle_T = aFirst ? dToggle : aToggle_r; // @[Fragmenter.scala:203:30, :304:29, :309:{27,54}]
wire aToggle = ~_aToggle_T; // @[Fragmenter.scala:309:{23,27}]
wire _repeater_io_repeat_T_1 = |aFragnum; // @[Fragmenter.scala:307:24, :308:30, :314:53]
wire _repeater_io_repeat_T_2 = _repeater_io_repeat_T_1; // @[Fragmenter.scala:314:{41,53}]
wire [5:0] _anonOut_a_bits_address_T = {old_gennum1, 3'h0}; // @[Fragmenter.scala:305:30, :316:65]
wire [5:0] _anonOut_a_bits_address_T_1 = ~aOrigOH1; // @[package.scala:243:46]
wire [5:0] _anonOut_a_bits_address_T_2 = _anonOut_a_bits_address_T | _anonOut_a_bits_address_T_1; // @[Fragmenter.scala:316:{65,88,90}]
wire [5:0] _anonOut_a_bits_address_T_3 = {_anonOut_a_bits_address_T_2[5:3], _anonOut_a_bits_address_T_2[2:0] | aFragOH1}; // @[package.scala:243:46]
wire [5:0] _anonOut_a_bits_address_T_4 = {_anonOut_a_bits_address_T_3[5:3], 3'h7}; // @[Fragmenter.scala:316:{100,111}]
wire [5:0] _anonOut_a_bits_address_T_5 = ~_anonOut_a_bits_address_T_4; // @[Fragmenter.scala:316:{51,111}]
assign _anonOut_a_bits_address_T_6 = {_repeater_io_deq_bits_address[16:6], _repeater_io_deq_bits_address[5:0] | _anonOut_a_bits_address_T_5}; // @[Fragmenter.scala:274:30, :316:{49,51}]
assign anonOut_a_bits_address = _anonOut_a_bits_address_T_6; // @[Fragmenter.scala:316:49]
wire [8:0] anonOut_a_bits_source_hi = {_repeater_io_deq_bits_source, aToggle}; // @[Fragmenter.scala:274:30, :309:23, :317:33]
assign _anonOut_a_bits_source_T = {anonOut_a_bits_source_hi, aFragnum}; // @[Fragmenter.scala:307:24, :317:33]
assign anonOut_a_bits_source = _anonOut_a_bits_source_T; // @[Fragmenter.scala:317:33]
assign anonOut_a_bits_size = aFrag[1:0]; // @[Fragmenter.scala:297:24, :318:25] |
Generate the Verilog code corresponding to the following Chisel files.
File Breakpoint.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.rocket
import chisel3._
import chisel3.util.{Cat}
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.tile.{CoreBundle, HasCoreParameters}
import freechips.rocketchip.util._
class BPControl(implicit p: Parameters) extends CoreBundle()(p) {
val ttype = UInt(4.W)
val dmode = Bool()
val maskmax = UInt(6.W)
val reserved = UInt((xLen - (if (coreParams.useBPWatch) 26 else 24)).W)
val action = UInt((if (coreParams.useBPWatch) 3 else 1).W)
val chain = Bool()
val zero = UInt(2.W)
val tmatch = UInt(2.W)
val m = Bool()
val h = Bool()
val s = Bool()
val u = Bool()
val x = Bool()
val w = Bool()
val r = Bool()
def tType = 2
def maskMax = 4
def enabled(mstatus: MStatus) = !mstatus.debug && Cat(m, h, s, u)(mstatus.prv)
}
class TExtra(implicit p: Parameters) extends CoreBundle()(p) {
def mvalueBits: Int = if (xLen == 32) coreParams.mcontextWidth min 6 else coreParams.mcontextWidth min 13
def svalueBits: Int = if (xLen == 32) coreParams.scontextWidth min 16 else coreParams.scontextWidth min 34
def mselectPos: Int = if (xLen == 32) 25 else 50
def mvaluePos : Int = mselectPos + 1
def sselectPos: Int = 0
def svaluePos : Int = 2
val mvalue = UInt(mvalueBits.W)
val mselect = Bool()
val pad2 = UInt((mselectPos - svalueBits - 2).W)
val svalue = UInt(svalueBits.W)
val pad1 = UInt(1.W)
val sselect = Bool()
}
class BP(implicit p: Parameters) extends CoreBundle()(p) {
val control = new BPControl
val address = UInt(vaddrBits.W)
val textra = new TExtra
def contextMatch(mcontext: UInt, scontext: UInt) =
(if (coreParams.mcontextWidth > 0) (!textra.mselect || (mcontext(textra.mvalueBits-1,0) === textra.mvalue)) else true.B) &&
(if (coreParams.scontextWidth > 0) (!textra.sselect || (scontext(textra.svalueBits-1,0) === textra.svalue)) else true.B)
def mask(dummy: Int = 0) =
(0 until control.maskMax-1).scanLeft(control.tmatch(0))((m, i) => m && address(i)).asUInt
def pow2AddressMatch(x: UInt) =
(~x | mask()) === (~address | mask())
def rangeAddressMatch(x: UInt) =
(x >= address) ^ control.tmatch(0)
def addressMatch(x: UInt) =
Mux(control.tmatch(1), rangeAddressMatch(x), pow2AddressMatch(x))
}
class BPWatch (val n: Int) extends Bundle() {
val valid = Vec(n, Bool())
val rvalid = Vec(n, Bool())
val wvalid = Vec(n, Bool())
val ivalid = Vec(n, Bool())
val action = UInt(3.W)
}
class BreakpointUnit(n: Int)(implicit val p: Parameters) extends Module with HasCoreParameters {
val io = IO(new Bundle {
val status = Input(new MStatus())
val bp = Input(Vec(n, new BP))
val pc = Input(UInt(vaddrBits.W))
val ea = Input(UInt(vaddrBits.W))
val mcontext = Input(UInt(coreParams.mcontextWidth.W))
val scontext = Input(UInt(coreParams.scontextWidth.W))
val xcpt_if = Output(Bool())
val xcpt_ld = Output(Bool())
val xcpt_st = Output(Bool())
val debug_if = Output(Bool())
val debug_ld = Output(Bool())
val debug_st = Output(Bool())
val bpwatch = Output(Vec(n, new BPWatch(1)))
})
io.xcpt_if := false.B
io.xcpt_ld := false.B
io.xcpt_st := false.B
io.debug_if := false.B
io.debug_ld := false.B
io.debug_st := false.B
(io.bpwatch zip io.bp).foldLeft((true.B, true.B, true.B)) { case ((ri, wi, xi), (bpw, bp)) =>
val en = bp.control.enabled(io.status)
val cx = bp.contextMatch(io.mcontext, io.scontext)
val r = en && bp.control.r && bp.addressMatch(io.ea) && cx
val w = en && bp.control.w && bp.addressMatch(io.ea) && cx
val x = en && bp.control.x && bp.addressMatch(io.pc) && cx
val end = !bp.control.chain
val action = bp.control.action
bpw.action := action
bpw.valid(0) := false.B
bpw.rvalid(0) := false.B
bpw.wvalid(0) := false.B
bpw.ivalid(0) := false.B
when (end && r && ri) { io.xcpt_ld := (action === 0.U); io.debug_ld := (action === 1.U); bpw.valid(0) := true.B; bpw.rvalid(0) := true.B }
when (end && w && wi) { io.xcpt_st := (action === 0.U); io.debug_st := (action === 1.U); bpw.valid(0) := true.B; bpw.wvalid(0) := true.B }
when (end && x && xi) { io.xcpt_if := (action === 0.U); io.debug_if := (action === 1.U); bpw.valid(0) := true.B; bpw.ivalid(0) := true.B }
(end || r, end || w, end || x)
}
}
| module BreakpointUnit( // @[Breakpoint.scala:79:7]
input io_status_debug, // @[Breakpoint.scala:80:14]
input [1:0] io_status_prv, // @[Breakpoint.scala:80:14]
input io_bp_0_control_action, // @[Breakpoint.scala:80:14]
input [1:0] io_bp_0_control_tmatch, // @[Breakpoint.scala:80:14]
input io_bp_0_control_m, // @[Breakpoint.scala:80:14]
input io_bp_0_control_s, // @[Breakpoint.scala:80:14]
input io_bp_0_control_u, // @[Breakpoint.scala:80:14]
input io_bp_0_control_x, // @[Breakpoint.scala:80:14]
input io_bp_0_control_w, // @[Breakpoint.scala:80:14]
input io_bp_0_control_r, // @[Breakpoint.scala:80:14]
input [38:0] io_bp_0_address, // @[Breakpoint.scala:80:14]
input [38:0] io_pc, // @[Breakpoint.scala:80:14]
input [38:0] io_ea, // @[Breakpoint.scala:80:14]
output io_xcpt_if, // @[Breakpoint.scala:80:14]
output io_xcpt_ld, // @[Breakpoint.scala:80:14]
output io_xcpt_st, // @[Breakpoint.scala:80:14]
output io_debug_if, // @[Breakpoint.scala:80:14]
output io_debug_ld, // @[Breakpoint.scala:80:14]
output io_debug_st // @[Breakpoint.scala:80:14]
);
wire [3:0] _en_T_2 = {io_bp_0_control_m, 1'h0, io_bp_0_control_s, io_bp_0_control_u} >> io_status_prv; // @[Breakpoint.scala:30:{56,68}]
wire en = ~io_status_debug & _en_T_2[0]; // @[Breakpoint.scala:30:{35,50,68}]
wire _w_T_2 = io_ea >= io_bp_0_address; // @[Breakpoint.scala:65:8]
wire [38:0] _w_T_5 = ~io_ea; // @[Breakpoint.scala:62:6]
wire _r_T_8 = io_bp_0_control_tmatch[0] & io_bp_0_address[0]; // @[Breakpoint.scala:59:{73,83}, :65:36]
wire _r_T_10 = _r_T_8 & io_bp_0_address[1]; // @[Breakpoint.scala:59:{73,83}]
wire [38:0] _x_T_15 = ~io_bp_0_address; // @[Breakpoint.scala:62:24]
wire _r_T_18 = io_bp_0_control_tmatch[0] & io_bp_0_address[0]; // @[Breakpoint.scala:59:{73,83}, :65:36]
wire _r_T_20 = _r_T_18 & io_bp_0_address[1]; // @[Breakpoint.scala:59:{73,83}]
wire r = en & io_bp_0_control_r & (io_bp_0_control_tmatch[1] ? _w_T_2 ^ io_bp_0_control_tmatch[0] : {_w_T_5[38:4], _w_T_5[3:0] | {_r_T_10 & io_bp_0_address[2], _r_T_10, _r_T_8, io_bp_0_control_tmatch[0]}} == {_x_T_15[38:4], _x_T_15[3:0] | {_r_T_20 & io_bp_0_address[2], _r_T_20, _r_T_18, io_bp_0_control_tmatch[0]}}); // @[package.scala:45:27]
wire _w_T_8 = io_bp_0_control_tmatch[0] & io_bp_0_address[0]; // @[Breakpoint.scala:59:{73,83}, :65:36]
wire _w_T_10 = _w_T_8 & io_bp_0_address[1]; // @[Breakpoint.scala:59:{73,83}]
wire _w_T_18 = io_bp_0_control_tmatch[0] & io_bp_0_address[0]; // @[Breakpoint.scala:59:{73,83}, :65:36]
wire _w_T_20 = _w_T_18 & io_bp_0_address[1]; // @[Breakpoint.scala:59:{73,83}]
wire w = en & io_bp_0_control_w & (io_bp_0_control_tmatch[1] ? _w_T_2 ^ io_bp_0_control_tmatch[0] : {_w_T_5[38:4], _w_T_5[3:0] | {_w_T_10 & io_bp_0_address[2], _w_T_10, _w_T_8, io_bp_0_control_tmatch[0]}} == {_x_T_15[38:4], _x_T_15[3:0] | {_w_T_20 & io_bp_0_address[2], _w_T_20, _w_T_18, io_bp_0_control_tmatch[0]}}); // @[package.scala:45:27]
wire [38:0] _x_T_5 = ~io_pc; // @[Breakpoint.scala:62:6]
wire _x_T_8 = io_bp_0_control_tmatch[0] & io_bp_0_address[0]; // @[Breakpoint.scala:59:{73,83}, :65:36]
wire _x_T_10 = _x_T_8 & io_bp_0_address[1]; // @[Breakpoint.scala:59:{73,83}]
wire _x_T_18 = io_bp_0_control_tmatch[0] & io_bp_0_address[0]; // @[Breakpoint.scala:59:{73,83}, :65:36]
wire _x_T_20 = _x_T_18 & io_bp_0_address[1]; // @[Breakpoint.scala:59:{73,83}]
wire x = en & io_bp_0_control_x & (io_bp_0_control_tmatch[1] ? io_pc >= io_bp_0_address ^ io_bp_0_control_tmatch[0] : {_x_T_5[38:4], _x_T_5[3:0] | {_x_T_10 & io_bp_0_address[2], _x_T_10, _x_T_8, io_bp_0_control_tmatch[0]}} == {_x_T_15[38:4], _x_T_15[3:0] | {_x_T_20 & io_bp_0_address[2], _x_T_20, _x_T_18, io_bp_0_control_tmatch[0]}}); // @[package.scala:45:27]
assign io_xcpt_if = x & ~io_bp_0_control_action; // @[Breakpoint.scala:79:7, :96:14, :108:{16,32}, :120:{27,40,51}]
assign io_xcpt_ld = r & ~io_bp_0_control_action; // @[Breakpoint.scala:79:7, :97:14, :106:{16,32}, :118:{27,40,51}]
assign io_xcpt_st = w & ~io_bp_0_control_action; // @[Breakpoint.scala:79:7, :98:14, :107:{16,32}, :119:{27,40,51}]
assign io_debug_if = x & io_bp_0_control_action; // @[Breakpoint.scala:79:7, :99:15, :108:{16,32}, :120:{27,73}]
assign io_debug_ld = r & io_bp_0_control_action; // @[Breakpoint.scala:79:7, :100:15, :106:{16,32}, :118:{27,73}]
assign io_debug_st = w & io_bp_0_control_action; // @[Breakpoint.scala:79:7, :101:15, :107:{16,32}, :119:{27,73}]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_70( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
output io_q // @[ShiftReg.scala:36:14]
);
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire io_d = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire _output_T_1 = 1'h1; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_110 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_40( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File AsyncQueue.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
case class AsyncQueueParams(
depth: Int = 8,
sync: Int = 3,
safe: Boolean = true,
// If safe is true, then effort is made to resynchronize the crossing indices when either side is reset.
// This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty.
narrow: Boolean = false)
// If narrow is true then the read mux is moved to the source side of the crossing.
// This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing,
// at the expense of a combinational path from the sink to the source and back to the sink.
{
require (depth > 0 && isPow2(depth))
require (sync >= 2)
val bits = log2Ceil(depth)
val wires = if (narrow) 1 else depth
}
object AsyncQueueParams {
// When there is only one entry, we don't need narrow.
def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false)
}
class AsyncBundleSafety extends Bundle {
val ridx_valid = Input (Bool())
val widx_valid = Output(Bool())
val source_reset_n = Output(Bool())
val sink_reset_n = Input (Bool())
}
class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle {
// Data-path synchronization
val mem = Output(Vec(params.wires, gen))
val ridx = Input (UInt((params.bits+1).W))
val widx = Output(UInt((params.bits+1).W))
val index = params.narrow.option(Input(UInt(params.bits.W)))
// Signals used to self-stabilize a safe AsyncQueue
val safe = params.safe.option(new AsyncBundleSafety)
}
object GrayCounter {
def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = {
val incremented = Wire(UInt(bits.W))
val binary = RegNext(next=incremented, init=0.U).suggestName(name)
incremented := Mux(clear, 0.U, binary + increment.asUInt)
incremented ^ (incremented >> 1)
}
}
class AsyncValidSync(sync: Int, desc: String) extends RawModule {
val io = IO(new Bundle {
val in = Input(Bool())
val out = Output(Bool())
})
val clock = IO(Input(Clock()))
val reset = IO(Input(AsyncReset()))
withClockAndReset(clock, reset){
io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc))
}
}
class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSource_${gen.typeName}"
val io = IO(new Bundle {
// These come from the source domain
val enq = Flipped(Decoupled(gen))
// These cross to the sink clock domain
val async = new AsyncBundle(gen, params)
})
val bits = params.bits
val sink_ready = WireInit(true.B)
val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all.
val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin"))
val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray"))
val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U)
val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1))
when (io.enq.fire) { mem(index) := io.enq.bits }
val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg"))
io.enq.ready := ready_reg && sink_ready
val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray"))
io.async.widx := widx_reg
io.async.index match {
case Some(index) => io.async.mem(0) := mem(index)
case None => io.async.mem := mem
}
io.async.safe.foreach { sio =>
val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0"))
val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1"))
val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend"))
val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid"))
source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset
sink_valid .reset := reset.asAsyncReset
source_valid_0.clock := clock
source_valid_1.clock := clock
sink_extend .clock := clock
sink_valid .clock := clock
source_valid_0.io.in := true.B
source_valid_1.io.in := source_valid_0.io.out
sio.widx_valid := source_valid_1.io.out
sink_extend.io.in := sio.ridx_valid
sink_valid.io.in := sink_extend.io.out
sink_ready := sink_valid.io.out
sio.source_reset_n := !reset.asBool
// Assert that if there is stuff in the queue, then reset cannot happen
// Impossible to write because dequeue can occur on the receiving side,
// then reset allowed to happen, but write side cannot know that dequeue
// occurred.
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
// assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected")
// assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty")
}
}
class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module {
override def desiredName = s"AsyncQueueSink_${gen.typeName}"
val io = IO(new Bundle {
// These come from the sink domain
val deq = Decoupled(gen)
// These cross to the source clock domain
val async = Flipped(new AsyncBundle(gen, params))
})
val bits = params.bits
val source_ready = WireInit(true.B)
val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin"))
val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray"))
val valid = source_ready && ridx =/= widx
// The mux is safe because timing analysis ensures ridx has reached the register
// On an ASIC, changes to the unread location cannot affect the selected value
// On an FPGA, only one input changes at a time => mem updates don't cause glitches
// The register only latches when the selected valued is not being written
val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1))
io.async.index.foreach { _ := index }
// This register does not NEED to be reset, as its contents will not
// be considered unless the asynchronously reset deq valid register is set.
// It is possible that bits latches when the source domain is reset / has power cut
// This is safe, because isolation gates brought mem low before the zeroed widx reached us
val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index)
io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg"))
val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg"))
io.deq.valid := valid_reg && source_ready
val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray"))
io.async.ridx := ridx_reg
io.async.safe.foreach { sio =>
val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0"))
val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1"))
val source_extend = Module(new AsyncValidSync(params.sync, "source_extend"))
val source_valid = Module(new AsyncValidSync(params.sync, "source_valid"))
sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset
source_valid .reset := reset.asAsyncReset
sink_valid_0 .clock := clock
sink_valid_1 .clock := clock
source_extend.clock := clock
source_valid .clock := clock
sink_valid_0.io.in := true.B
sink_valid_1.io.in := sink_valid_0.io.out
sio.ridx_valid := sink_valid_1.io.out
source_extend.io.in := sio.widx_valid
source_valid.io.in := source_extend.io.out
source_ready := source_valid.io.out
sio.sink_reset_n := !reset.asBool
// TODO: write some sort of sanity check assertion for users
// that denote don't reset when there is activity
//
// val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool
// val reset_and_extend_prev = RegNext(reset_and_extend, true.B)
// val reset_rise = !reset_and_extend_prev && reset_and_extend
// val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0)
// assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty")
}
}
object FromAsyncBundle
{
// Sometimes it makes sense for the sink to have different sync than the source
def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync)
def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = {
val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync)))
sink.io.async <> x
sink.io.deq
}
}
object ToAsyncBundle
{
def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = {
val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params))
source.io.enq <> x
source.io.async
}
}
class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] {
val io = IO(new CrossingIO(gen))
val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) }
val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) }
source.io.enq <> io.enq
io.deq <> sink.io.deq
sink.io.async <> source.io.async
}
| module AsyncValidSync_261( // @[AsyncQueue.scala:58:7]
input io_in, // @[AsyncQueue.scala:59:14]
output io_out, // @[AsyncQueue.scala:59:14]
input clock, // @[AsyncQueue.scala:63:17]
input reset // @[AsyncQueue.scala:64:17]
);
wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7]
wire _io_out_WIRE; // @[ShiftReg.scala:48:24]
wire io_out_0; // @[AsyncQueue.scala:58:7]
assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24]
AsyncResetSynchronizerShiftReg_w1_d3_i0_278 io_out_sink_valid_1 ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (io_in_0), // @[AsyncQueue.scala:58:7]
.io_q (_io_out_WIRE)
); // @[ShiftReg.scala:45:23]
assign io_out = io_out_0; // @[AsyncQueue.scala:58:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_239( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0, // @[Tile.scala:17:14]
output io_bad_dataflow // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire [7:0] io_out_a_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
wire io_bad_dataflow_0; // @[Tile.scala:16:7]
PE_495 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0),
.io_bad_dataflow (io_bad_dataflow_0)
); // @[Tile.scala:42:44]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File util.scala:
//******************************************************************************
// Copyright (c) 2015 - 2019, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Utility Functions
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
package boom.v4.util
import chisel3._
import chisel3.util._
import freechips.rocketchip.rocket.Instructions._
import freechips.rocketchip.rocket._
import freechips.rocketchip.util.{Str}
import org.chipsalliance.cde.config.{Parameters}
import freechips.rocketchip.tile.{TileKey}
import boom.v4.common.{MicroOp}
import boom.v4.exu.{BrUpdateInfo}
/**
* Object to XOR fold a input register of fullLength into a compressedLength.
*/
object Fold
{
def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = {
val clen = compressedLength
val hlen = fullLength
if (hlen <= clen) {
input
} else {
var res = 0.U(clen.W)
var remaining = input.asUInt
for (i <- 0 to hlen-1 by clen) {
val len = if (i + clen > hlen ) (hlen - i) else clen
require(len > 0)
res = res(clen-1,0) ^ remaining(len-1,0)
remaining = remaining >> len.U
}
res
}
}
}
/**
* Object to check if MicroOp was killed due to a branch mispredict.
* Uses "Fast" branch masks
*/
object IsKilledByBranch
{
def apply(brupdate: BrUpdateInfo, flush: Bool, uop: MicroOp): Bool = {
return apply(brupdate, flush, uop.br_mask)
}
def apply(brupdate: BrUpdateInfo, flush: Bool, uop_mask: UInt): Bool = {
return maskMatch(brupdate.b1.mispredict_mask, uop_mask) || flush
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: T): Bool = {
return apply(brupdate, flush, bundle.uop)
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Bool = {
return apply(brupdate, flush, bundle.bits)
}
}
/**
* Object to return new MicroOp with a new BR mask given a MicroOp mask
* and old BR mask.
*/
object GetNewUopAndBrMask
{
def apply(uop: MicroOp, brupdate: BrUpdateInfo)
(implicit p: Parameters): MicroOp = {
val newuop = WireInit(uop)
newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask
newuop
}
}
/**
* Object to return a BR mask given a MicroOp mask and old BR mask.
*/
object GetNewBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = {
return uop.br_mask & ~brupdate.b1.resolve_mask
}
def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = {
return br_mask & ~brupdate.b1.resolve_mask
}
}
object UpdateBrMask
{
def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = {
val out = WireInit(uop)
out.br_mask := GetNewBrMask(brupdate, uop)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = {
val out = WireInit(bundle)
out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask)
out
}
def apply[T <: boom.v4.common.HasBoomUOP](brupdate: BrUpdateInfo, flush: Bool, bundle: Valid[T]): Valid[T] = {
val out = WireInit(bundle)
out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask)
out.valid := bundle.valid && !IsKilledByBranch(brupdate, flush, bundle.bits.uop.br_mask)
out
}
}
/**
* Object to check if at least 1 bit matches in two masks
*/
object maskMatch
{
def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U
}
/**
* Object to clear one bit in a mask given an index
*/
object clearMaskBit
{
def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0)
}
/**
* Object to shift a register over by one bit and concat a new one
*/
object PerformShiftRegister
{
def apply(reg_val: UInt, new_bit: Bool): UInt = {
reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt
reg_val
}
}
/**
* Object to shift a register over by one bit, wrapping the top bit around to the bottom
* (XOR'ed with a new-bit), and evicting a bit at index HLEN.
* This is used to simulate a longer HLEN-width shift register that is folded
* down to a compressed CLEN.
*/
object PerformCircularShiftRegister
{
def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = {
val carry = csr(clen-1)
val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U)
newval
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapAdd
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, amt: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + amt)(log2Ceil(n)-1,0)
} else {
val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt)
Mux(sum >= n.U,
sum - n.U,
sum)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapSub
{
// "n" is the number of increments, so we wrap to n-1.
def apply(value: UInt, amt: Int, n: Int): UInt = {
if (isPow2(n)) {
(value - amt.U)(log2Ceil(n)-1,0)
} else {
val v = Cat(0.U(1.W), value)
val b = Cat(0.U(1.W), amt.U)
Mux(value >= amt.U,
value - amt.U,
n.U - amt.U + value)
}
}
}
/**
* Object to increment an input value, wrapping it if
* necessary.
*/
object WrapInc
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value + 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === (n-1).U)
Mux(wrap, 0.U, value + 1.U)
}
}
}
/**
* Object to decrement an input value, wrapping it if
* necessary.
*/
object WrapDec
{
// "n" is the number of increments, so we wrap at n-1.
def apply(value: UInt, n: Int): UInt = {
if (isPow2(n)) {
(value - 1.U)(log2Ceil(n)-1,0)
} else {
val wrap = (value === 0.U)
Mux(wrap, (n-1).U, value - 1.U)
}
}
}
/**
* Object to mask off lower bits of a PC to align to a "b"
* Byte boundary.
*/
object AlignPCToBoundary
{
def apply(pc: UInt, b: Int): UInt = {
// Invert for scenario where pc longer than b
// (which would clear all bits above size(b)).
~(~pc | (b-1).U)
}
}
/**
* Object to rotate a signal left by one
*/
object RotateL1
{
def apply(signal: UInt): UInt = {
val w = signal.getWidth
val out = Cat(signal(w-2,0), signal(w-1))
return out
}
}
/**
* Object to sext a value to a particular length.
*/
object Sext
{
def apply(x: UInt, length: Int): UInt = {
if (x.getWidth == length) return x
else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x)
}
}
/**
* Object to translate from BOOM's special "packed immediate" to a 32b signed immediate
* Asking for U-type gives it shifted up 12 bits.
*/
object ImmGen
{
import boom.v4.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U, IS_N}
def apply(i: UInt, isel: UInt): UInt = {
val ip = Mux(isel === IS_N, 0.U(LONGEST_IMM_SZ.W), i)
val sign = ip(LONGEST_IMM_SZ-1).asSInt
val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign)
val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign)
val i11 = Mux(isel === IS_U, 0.S,
Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign))
val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt)
val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt)
val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S)
return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0)
}
}
/**
* Object to see if an instruction is a JALR.
*/
object DebugIsJALR
{
def apply(inst: UInt): Bool = {
// TODO Chisel not sure why this won't compile
// val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)),
// Array(
// JALR -> Bool(true)))
inst(6,0) === "b1100111".U
}
}
/**
* Object to take an instruction and output its branch or jal target. Only used
* for a debug assert (no where else would we jump straight from instruction
* bits to a target).
*/
object DebugGetBJImm
{
def apply(inst: UInt): UInt = {
// TODO Chisel not sure why this won't compile
//val csignals =
//rocket.DecodeLogic(inst,
// List(Bool(false), Bool(false)),
// Array(
// BEQ -> List(Bool(true ), Bool(false)),
// BNE -> List(Bool(true ), Bool(false)),
// BGE -> List(Bool(true ), Bool(false)),
// BGEU -> List(Bool(true ), Bool(false)),
// BLT -> List(Bool(true ), Bool(false)),
// BLTU -> List(Bool(true ), Bool(false))
// ))
//val is_br :: nothing :: Nil = csignals
val is_br = (inst(6,0) === "b1100011".U)
val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W))
val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W))
Mux(is_br, br_targ, jal_targ)
}
}
/**
* Object to return the lowest bit position after the head.
*/
object AgePriorityEncoder
{
def apply(in: Seq[Bool], head: UInt): UInt = {
val n = in.size
val width = log2Ceil(in.size)
val n_padded = 1 << width
val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in
val idx = PriorityEncoder(temp_vec)
idx(width-1, 0) //discard msb
}
}
/**
* Object to determine whether queue
* index i0 is older than index i1.
*/
object IsOlder
{
def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head))
}
object IsYoungerMask
{
def apply(i: UInt, head: UInt, n: Integer): UInt = {
val hi_mask = ~MaskLower(UIntToOH(i)(n-1,0))
val lo_mask = ~MaskUpper(UIntToOH(head)(n-1,0))
Mux(i < head, hi_mask & lo_mask, hi_mask | lo_mask)(n-1,0)
}
}
/**
* Set all bits at or below the highest order '1'.
*/
object MaskLower
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => in >> i.U).reduce(_|_)
}
}
/**
* Set all bits at or above the lowest order '1'.
*/
object MaskUpper
{
def apply(in: UInt) = {
val n = in.getWidth
(0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_)
}
}
/**
* Transpose a matrix of Chisel Vecs.
*/
object Transpose
{
def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = {
val n = in(0).size
VecInit((0 until n).map(i => VecInit(in.map(row => row(i)))))
}
}
/**
* N-wide one-hot priority encoder.
*/
object SelectFirstN
{
def apply(in: UInt, n: Int) = {
val sels = Wire(Vec(n, UInt(in.getWidth.W)))
var mask = in
for (i <- 0 until n) {
sels(i) := PriorityEncoderOH(mask)
mask = mask & ~sels(i)
}
sels
}
}
/**
* Connect the first k of n valid input interfaces to k output interfaces.
*/
class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module
{
require(n >= k)
val io = IO(new Bundle {
val in = Vec(n, Flipped(DecoupledIO(gen)))
val out = Vec(k, DecoupledIO(gen))
})
if (n == k) {
io.out <> io.in
} else {
val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c))
val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col =>
(col zip io.in.map(_.valid)) map {case (c,v) => c && v})
val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_))
val out_valids = sels map (col => col.reduce(_||_))
val out_data = sels map (s => Mux1H(s, io.in.map(_.bits)))
in_readys zip io.in foreach {case (r,i) => i.ready := r}
out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d}
}
}
/**
* Create a queue that can be killed with a branch kill signal.
* Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq).
*/
class BranchKillableQueue[T <: boom.v4.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v4.common.MicroOp => Bool = u => true.B, fastDeq: Boolean = false)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val enq = Flipped(Decoupled(gen))
val deq = Decoupled(gen)
val brupdate = Input(new BrUpdateInfo())
val flush = Input(Bool())
val empty = Output(Bool())
val count = Output(UInt(log2Ceil(entries).W))
})
if (fastDeq && entries > 1) {
// Pipeline dequeue selection so the mux gets an entire cycle
val main = Module(new BranchKillableQueue(gen, entries-1, flush_fn, false))
val out_reg = Reg(gen)
val out_valid = RegInit(false.B)
val out_uop = Reg(new MicroOp)
main.io.enq <> io.enq
main.io.brupdate := io.brupdate
main.io.flush := io.flush
io.empty := main.io.empty && !out_valid
io.count := main.io.count + out_valid
io.deq.valid := out_valid
io.deq.bits := out_reg
io.deq.bits.uop := out_uop
out_uop := UpdateBrMask(io.brupdate, out_uop)
out_valid := out_valid && !IsKilledByBranch(io.brupdate, false.B, out_uop) && !(io.flush && flush_fn(out_uop))
main.io.deq.ready := false.B
when (io.deq.fire || !out_valid) {
out_valid := main.io.deq.valid && !IsKilledByBranch(io.brupdate, false.B, main.io.deq.bits.uop) && !(io.flush && flush_fn(main.io.deq.bits.uop))
out_reg := main.io.deq.bits
out_uop := UpdateBrMask(io.brupdate, main.io.deq.bits.uop)
main.io.deq.ready := true.B
}
} else {
val ram = Mem(entries, gen)
val valids = RegInit(VecInit(Seq.fill(entries) {false.B}))
val uops = Reg(Vec(entries, new MicroOp))
val enq_ptr = Counter(entries)
val deq_ptr = Counter(entries)
val maybe_full = RegInit(false.B)
val ptr_match = enq_ptr.value === deq_ptr.value
io.empty := ptr_match && !maybe_full
val full = ptr_match && maybe_full
val do_enq = WireInit(io.enq.fire && !IsKilledByBranch(io.brupdate, false.B, io.enq.bits.uop) && !(io.flush && flush_fn(io.enq.bits.uop)))
val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty)
for (i <- 0 until entries) {
val mask = uops(i).br_mask
val uop = uops(i)
valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, false.B, mask) && !(io.flush && flush_fn(uop))
when (valids(i)) {
uops(i).br_mask := GetNewBrMask(io.brupdate, mask)
}
}
when (do_enq) {
ram(enq_ptr.value) := io.enq.bits
valids(enq_ptr.value) := true.B
uops(enq_ptr.value) := io.enq.bits.uop
uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop)
enq_ptr.inc()
}
when (do_deq) {
valids(deq_ptr.value) := false.B
deq_ptr.inc()
}
when (do_enq =/= do_deq) {
maybe_full := do_enq
}
io.enq.ready := !full
val out = Wire(gen)
out := ram(deq_ptr.value)
out.uop := uops(deq_ptr.value)
io.deq.valid := !io.empty && valids(deq_ptr.value)
io.deq.bits := out
val ptr_diff = enq_ptr.value - deq_ptr.value
if (isPow2(entries)) {
io.count := Cat(maybe_full && ptr_match, ptr_diff)
}
else {
io.count := Mux(ptr_match,
Mux(maybe_full,
entries.asUInt, 0.U),
Mux(deq_ptr.value > enq_ptr.value,
entries.asUInt + ptr_diff, ptr_diff))
}
}
}
// ------------------------------------------
// Printf helper functions
// ------------------------------------------
object BoolToChar
{
/**
* Take in a Chisel Bool and convert it into a Str
* based on the Chars given
*
* @param c_bool Chisel Bool
* @param trueChar Scala Char if bool is true
* @param falseChar Scala Char if bool is false
* @return UInt ASCII Char for "trueChar" or "falseChar"
*/
def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = {
Mux(c_bool, Str(trueChar), Str(falseChar))
}
}
object CfiTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param cfi_type specific cfi type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(cfi_type: UInt) = {
val strings = Seq("----", "BR ", "JAL ", "JALR")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(cfi_type)
}
}
object BpdTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param bpd_type specific bpd type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(bpd_type: UInt) = {
val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(bpd_type)
}
}
object RobTypeToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param rob_type specific rob type
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(rob_type: UInt) = {
val strings = Seq("RST", "NML", "RBK", " WT")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(rob_type)
}
}
object XRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param xreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(xreg: UInt) = {
val strings = Seq(" x0", " ra", " sp", " gp",
" tp", " t0", " t1", " t2",
" s0", " s1", " a0", " a1",
" a2", " a3", " a4", " a5",
" a6", " a7", " s2", " s3",
" s4", " s5", " s6", " s7",
" s8", " s9", "s10", "s11",
" t3", " t4", " t5", " t6")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(xreg)
}
}
object FPRegToChars
{
/**
* Get a Vec of Strs that can be used for printing
*
* @param fpreg specific register number
* @return Vec of Strs (must be indexed to get specific char)
*/
def apply(fpreg: UInt) = {
val strings = Seq(" ft0", " ft1", " ft2", " ft3",
" ft4", " ft5", " ft6", " ft7",
" fs0", " fs1", " fa0", " fa1",
" fa2", " fa3", " fa4", " fa5",
" fa6", " fa7", " fs2", " fs3",
" fs4", " fs5", " fs6", " fs7",
" fs8", " fs9", "fs10", "fs11",
" ft8", " ft9", "ft10", "ft11")
val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) })
multiVec(fpreg)
}
}
object BoomCoreStringPrefix
{
/**
* Add prefix to BOOM strings (currently only adds the hartId)
*
* @param strs list of strings
* @return String combining the list with the prefix per line
*/
def apply(strs: String*)(implicit p: Parameters) = {
val prefix = "[C" + s"${p(TileKey).tileId}" + "] "
strs.map(str => prefix + str + "\n").mkString("")
}
}
class BranchKillablePipeline[T <: boom.v4.common.HasBoomUOP](gen: T, stages: Int)
(implicit p: org.chipsalliance.cde.config.Parameters)
extends boom.v4.common.BoomModule()(p)
with boom.v4.common.HasBoomCoreParameters
{
val io = IO(new Bundle {
val req = Input(Valid(gen))
val flush = Input(Bool())
val brupdate = Input(new BrUpdateInfo)
val resp = Output(Vec(stages, Valid(gen)))
})
require(stages > 0)
val uops = Reg(Vec(stages, Valid(gen)))
uops(0).valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.flush, io.req.bits)
uops(0).bits := UpdateBrMask(io.brupdate, io.req.bits)
for (i <- 1 until stages) {
uops(i).valid := uops(i-1).valid && !IsKilledByBranch(io.brupdate, io.flush, uops(i-1).bits)
uops(i).bits := UpdateBrMask(io.brupdate, uops(i-1).bits)
}
for (i <- 0 until stages) { when (reset.asBool) { uops(i).valid := false.B } }
io.resp := uops
}
File issue-slot.scala:
//******************************************************************************
// Copyright (c) 2015 - 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE and LICENSE.SiFive for license details.
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// RISCV Processor Issue Slot Logic
//--------------------------------------------------------------------------
//------------------------------------------------------------------------------
//
// Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot.
// TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores.
// TODO Disable ldspec for FP queue.
package boom.v4.exu
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.Parameters
import boom.v4.common._
import boom.v4.util._
class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle
{
val valid = Output(Bool())
val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely?
val request = Output(Bool())
val grant = Input(Bool())
val iss_uop = Output(new MicroOp())
val in_uop = Input(Valid(new MicroOp())) // if valid, this WILL overwrite an entry!
val out_uop = Output(new MicroOp())
val brupdate = Input(new BrUpdateInfo())
val kill = Input(Bool()) // pipeline flush
val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant)
val squash_grant = Input(Bool())
val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new Wakeup)))
val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W)))
val child_rebusys = Input(UInt(aluWidth.W))
}
class IssueSlot(val numWakeupPorts: Int, val isMem: Boolean, val isFp: Boolean)(implicit p: Parameters)
extends BoomModule
{
val io = IO(new IssueSlotIO(numWakeupPorts))
val slot_valid = RegInit(false.B)
val slot_uop = Reg(new MicroOp())
val next_valid = WireInit(slot_valid)
val next_uop = WireInit(UpdateBrMask(io.brupdate, slot_uop))
val killed = IsKilledByBranch(io.brupdate, io.kill, slot_uop)
io.valid := slot_valid
io.out_uop := next_uop
io.will_be_valid := next_valid && !killed
when (io.kill) {
slot_valid := false.B
} .elsewhen (io.in_uop.valid) {
slot_valid := true.B
} .elsewhen (io.clear) {
slot_valid := false.B
} .otherwise {
slot_valid := next_valid && !killed
}
when (io.in_uop.valid) {
slot_uop := io.in_uop.bits
assert (!slot_valid || io.clear || io.kill)
} .otherwise {
slot_uop := next_uop
}
// Wakeups
next_uop.iw_p1_bypass_hint := false.B
next_uop.iw_p2_bypass_hint := false.B
next_uop.iw_p3_bypass_hint := false.B
next_uop.iw_p1_speculative_child := 0.U
next_uop.iw_p2_speculative_child := 0.U
val rebusied_prs1 = WireInit(false.B)
val rebusied_prs2 = WireInit(false.B)
val rebusied = rebusied_prs1 || rebusied_prs2
val prs1_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs1 }
val prs2_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs2 }
val prs3_matches = io.wakeup_ports.map { w => w.bits.uop.pdst === slot_uop.prs3 }
val prs1_wakeups = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.valid && m }
val prs2_wakeups = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.valid && m }
val prs3_wakeups = (io.wakeup_ports zip prs3_matches).map { case (w,m) => w.valid && m }
val prs1_rebusys = (io.wakeup_ports zip prs1_matches).map { case (w,m) => w.bits.rebusy && m }
val prs2_rebusys = (io.wakeup_ports zip prs2_matches).map { case (w,m) => w.bits.rebusy && m }
val bypassables = io.wakeup_ports.map { w => w.bits.bypassable }
val speculative_masks = io.wakeup_ports.map { w => w.bits.speculative_mask }
when (prs1_wakeups.reduce(_||_)) {
next_uop.prs1_busy := false.B
next_uop.iw_p1_speculative_child := Mux1H(prs1_wakeups, speculative_masks)
next_uop.iw_p1_bypass_hint := Mux1H(prs1_wakeups, bypassables)
}
when ((prs1_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p1_speculative_child) =/= 0.U)) &&
slot_uop.lrs1_rtype === RT_FIX) {
next_uop.prs1_busy := true.B
rebusied_prs1 := true.B
}
when (prs2_wakeups.reduce(_||_)) {
next_uop.prs2_busy := false.B
next_uop.iw_p2_speculative_child := Mux1H(prs2_wakeups, speculative_masks)
next_uop.iw_p2_bypass_hint := Mux1H(prs2_wakeups, bypassables)
}
when ((prs2_rebusys.reduce(_||_) || ((io.child_rebusys & slot_uop.iw_p2_speculative_child) =/= 0.U)) &&
slot_uop.lrs2_rtype === RT_FIX) {
next_uop.prs2_busy := true.B
rebusied_prs2 := true.B
}
when (prs3_wakeups.reduce(_||_)) {
next_uop.prs3_busy := false.B
next_uop.iw_p3_bypass_hint := Mux1H(prs3_wakeups, bypassables)
}
when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === slot_uop.ppred) {
next_uop.ppred_busy := false.B
}
val iss_ready = !slot_uop.prs1_busy && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && !(slot_uop.prs3_busy && isFp.B)
val agen_ready = (slot_uop.fu_code(FC_AGEN) && !slot_uop.prs1_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
val dgen_ready = (slot_uop.fu_code(FC_DGEN) && !slot_uop.prs2_busy && !(slot_uop.ppred_busy && enableSFBOpt.B) && isMem.B)
io.request := slot_valid && !slot_uop.iw_issued && (
iss_ready || agen_ready || dgen_ready
)
io.iss_uop := slot_uop
// Update state for current micro-op based on grant
next_uop.iw_issued := false.B
next_uop.iw_issued_partial_agen := false.B
next_uop.iw_issued_partial_dgen := false.B
when (io.grant && !io.squash_grant) {
next_uop.iw_issued := true.B
}
if (isMem) {
when (slot_uop.fu_code(FC_AGEN) && slot_uop.fu_code(FC_DGEN)) {
when (agen_ready) {
// Issue the AGEN, next slot entry is a DGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_agen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := true.B
io.iss_uop.fu_code(FC_DGEN) := false.B
} .otherwise {
// Issue the DGEN, next slot entry is the AGEN
when (io.grant && !io.squash_grant) {
next_uop.iw_issued_partial_dgen := true.B
}
io.iss_uop.fu_code(FC_AGEN) := false.B
io.iss_uop.fu_code(FC_DGEN) := true.B
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
} .elsewhen (slot_uop.fu_code(FC_DGEN)) {
io.iss_uop.imm_sel := IS_N
io.iss_uop.prs1 := slot_uop.prs2
io.iss_uop.lrs1_rtype := slot_uop.lrs2_rtype
io.iss_uop.iw_p1_bypass_hint := slot_uop.iw_p2_bypass_hint
}
io.iss_uop.lrs2_rtype := RT_X
io.iss_uop.prs2 := io.iss_uop.prs1 // helps with DCE
}
when (slot_valid && slot_uop.iw_issued) {
next_valid := rebusied
if (isMem) {
when (slot_uop.iw_issued_partial_agen) {
next_valid := true.B
when (!rebusied_prs1) {
next_uop.fu_code(FC_AGEN) := false.B
next_uop.fu_code(FC_DGEN) := true.B
}
} .elsewhen (slot_uop.iw_issued_partial_dgen) {
next_valid := true.B
when (!rebusied_prs2) {
next_uop.fu_code(FC_AGEN) := true.B
next_uop.fu_code(FC_DGEN) := false.B
}
}
}
}
}
| module IssueSlot_86( // @[issue-slot.scala:49:7]
input clock, // @[issue-slot.scala:49:7]
input reset, // @[issue-slot.scala:49:7]
output io_valid, // @[issue-slot.scala:52:14]
output io_will_be_valid, // @[issue-slot.scala:52:14]
output io_request, // @[issue-slot.scala:52:14]
input io_grant, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_iss_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_iss_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_iss_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_iss_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_issued, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_iss_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [15:0] io_iss_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_iss_uop_br_type, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_iss_uop_is_amo, // @[issue-slot.scala:52:14]
output io_iss_uop_is_eret, // @[issue-slot.scala:52:14]
output io_iss_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_iss_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_iss_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_iss_uop_taken, // @[issue-slot.scala:52:14]
output io_iss_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_iss_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_rob_idx, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_ppred, // @[issue-slot.scala:52:14]
output io_iss_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_iss_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_iss_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_iss_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_iss_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_mem_size, // @[issue-slot.scala:52:14]
output io_iss_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_iss_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_iss_uop_is_unique, // @[issue-slot.scala:52:14]
output io_iss_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_iss_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_iss_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_iss_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_iss_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_iss_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_iss_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_iss_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_iss_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_iss_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_iss_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_in_uop_valid, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:52:14]
input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iq_type_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_0, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_4, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_5, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_6, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_7, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_8, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fu_code_9, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_in_uop_bits_br_type, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfb, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_fencei, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sfence, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_amo, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_eret, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_rocc, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:52:14]
input io_in_uop_bits_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:52:14]
input io_in_uop_bits_taken, // @[issue-slot.scala:52:14]
input io_in_uop_bits_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_op2_sel, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:52:14]
input io_in_uop_bits_exception, // @[issue-slot.scala:52:14]
input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:52:14]
input io_in_uop_bits_mem_signed, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_uses_stq, // @[issue-slot.scala:52:14]
input io_in_uop_bits_is_unique, // @[issue-slot.scala:52:14]
input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_csr_cmd, // @[issue-slot.scala:52:14]
input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_in_uop_bits_frs3_en, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_in_uop_bits_fcn_op, // @[issue-slot.scala:52:14]
input io_in_uop_bits_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_in_uop_bits_fp_typ, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:52:14]
input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_inst, // @[issue-slot.scala:52:14]
output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:52:14]
output io_out_uop_is_rvc, // @[issue-slot.scala:52:14]
output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_0, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_1, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_2, // @[issue-slot.scala:52:14]
output io_out_uop_iq_type_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_0, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_1, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_2, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_3, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_4, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_5, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_6, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_7, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_8, // @[issue-slot.scala:52:14]
output io_out_uop_fu_code_9, // @[issue-slot.scala:52:14]
output io_out_uop_iw_issued, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
output io_out_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_dis_col_sel, // @[issue-slot.scala:52:14]
output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:52:14]
output [3:0] io_out_uop_br_type, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfb, // @[issue-slot.scala:52:14]
output io_out_uop_is_fence, // @[issue-slot.scala:52:14]
output io_out_uop_is_fencei, // @[issue-slot.scala:52:14]
output io_out_uop_is_sfence, // @[issue-slot.scala:52:14]
output io_out_uop_is_amo, // @[issue-slot.scala:52:14]
output io_out_uop_is_eret, // @[issue-slot.scala:52:14]
output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
output io_out_uop_is_rocc, // @[issue-slot.scala:52:14]
output io_out_uop_is_mov, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:52:14]
output io_out_uop_edge_inst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:52:14]
output io_out_uop_taken, // @[issue-slot.scala:52:14]
output io_out_uop_imm_rename, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_imm_sel, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_pimm, // @[issue-slot.scala:52:14]
output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_op1_sel, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_op2_sel, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
output io_out_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_pdst, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs1, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs2, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_prs3, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_ppred, // @[issue-slot.scala:52:14]
output io_out_uop_prs1_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs2_busy, // @[issue-slot.scala:52:14]
output io_out_uop_prs3_busy, // @[issue-slot.scala:52:14]
output io_out_uop_ppred_busy, // @[issue-slot.scala:52:14]
output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:52:14]
output io_out_uop_exception, // @[issue-slot.scala:52:14]
output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:52:14]
output io_out_uop_mem_signed, // @[issue-slot.scala:52:14]
output io_out_uop_uses_ldq, // @[issue-slot.scala:52:14]
output io_out_uop_uses_stq, // @[issue-slot.scala:52:14]
output io_out_uop_is_unique, // @[issue-slot.scala:52:14]
output io_out_uop_flush_on_commit, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_csr_cmd, // @[issue-slot.scala:52:14]
output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_ldst, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:52:14]
output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
output io_out_uop_frs3_en, // @[issue-slot.scala:52:14]
output io_out_uop_fcn_dw, // @[issue-slot.scala:52:14]
output [4:0] io_out_uop_fcn_op, // @[issue-slot.scala:52:14]
output io_out_uop_fp_val, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_fp_rm, // @[issue-slot.scala:52:14]
output [1:0] io_out_uop_fp_typ, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_debug_if, // @[issue-slot.scala:52:14]
output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:52:14]
output [2:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_brupdate_b2_uop_br_type, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_eret, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_taken, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_brupdate_b2_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_brupdate_b2_mispredict, // @[issue-slot.scala:52:14]
input io_brupdate_b2_taken, // @[issue-slot.scala:52:14]
input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:52:14]
input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:52:14]
input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:52:14]
input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:52:14]
input io_kill, // @[issue-slot.scala:52:14]
input io_clear, // @[issue-slot.scala:52:14]
input io_squash_grant, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_0_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_0_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_0_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_0_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_0_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_0_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_0_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_0_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_0_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_0_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_bypassable, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_0_bits_speculative_mask, // @[issue-slot.scala:52:14]
input io_wakeup_ports_0_bits_rebusy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_1_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_1_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_agen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_1_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_1_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_1_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_1_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_1_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_1_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_1_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_1_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_1_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_2_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_2_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_2_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_2_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_2_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_2_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_2_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_2_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_2_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_2_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_2_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_3_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_3_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_3_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_3_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_3_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_3_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_3_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_3_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_3_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_3_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_3_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_valid, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_4_bits_uop_inst, // @[issue-slot.scala:52:14]
input [31:0] io_wakeup_ports_4_bits_uop_debug_inst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_rvc, // @[issue-slot.scala:52:14]
input [39:0] io_wakeup_ports_4_bits_uop_debug_pc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iq_type_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_0, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_4, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_5, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_6, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_7, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_8, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fu_code_9, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_issued, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_iw_p1_speculative_child, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_iw_p2_speculative_child, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_dis_col_sel, // @[issue-slot.scala:52:14]
input [15:0] io_wakeup_ports_4_bits_uop_br_mask, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_4_bits_uop_br_tag, // @[issue-slot.scala:52:14]
input [3:0] io_wakeup_ports_4_bits_uop_br_type, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sfb, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_fence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_fencei, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sfence, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_amo, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_eret, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_sys_pc2epc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_rocc, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_mov, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ftq_idx, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_edge_inst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_pc_lob, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_taken, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_imm_rename, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_imm_sel, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_pimm, // @[issue-slot.scala:52:14]
input [19:0] io_wakeup_ports_4_bits_uop_imm_packed, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_op1_sel, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_op2_sel, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ldst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_wen, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren1, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren2, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_ren3, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_swap12, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_swap23, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fromint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_toint, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_fma, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_div, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_wflags, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_ctrl_vec, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_rob_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ldq_idx, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_stq_idx, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_rxq_idx, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_pdst, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs1, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs2, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_prs3, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_ppred, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs1_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs2_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_prs3_busy, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_ppred_busy, // @[issue-slot.scala:52:14]
input [6:0] io_wakeup_ports_4_bits_uop_stale_pdst, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_exception, // @[issue-slot.scala:52:14]
input [63:0] io_wakeup_ports_4_bits_uop_exc_cause, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_mem_cmd, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_mem_size, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_mem_signed, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_uses_ldq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_uses_stq, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_is_unique, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_flush_on_commit, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_csr_cmd, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_ldst_is_rs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_ldst, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs1, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs2, // @[issue-slot.scala:52:14]
input [5:0] io_wakeup_ports_4_bits_uop_lrs3, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_dst_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_lrs1_rtype, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_lrs2_rtype, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_frs3_en, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fcn_dw, // @[issue-slot.scala:52:14]
input [4:0] io_wakeup_ports_4_bits_uop_fcn_op, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_fp_val, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_fp_rm, // @[issue-slot.scala:52:14]
input [1:0] io_wakeup_ports_4_bits_uop_fp_typ, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_pf_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_ae_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_xcpt_ma_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_bp_debug_if, // @[issue-slot.scala:52:14]
input io_wakeup_ports_4_bits_uop_bp_xcpt_if, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_debug_fsrc, // @[issue-slot.scala:52:14]
input [2:0] io_wakeup_ports_4_bits_uop_debug_tsrc, // @[issue-slot.scala:52:14]
input io_pred_wakeup_port_valid, // @[issue-slot.scala:52:14]
input [4:0] io_pred_wakeup_port_bits, // @[issue-slot.scala:52:14]
input [2:0] io_child_rebusys // @[issue-slot.scala:52:14]
);
wire [15:0] next_uop_out_br_mask; // @[util.scala:104:23]
wire io_grant_0 = io_grant; // @[issue-slot.scala:49:7]
wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_0_0 = io_in_uop_bits_iq_type_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_1_0 = io_in_uop_bits_iq_type_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_2_0 = io_in_uop_bits_iq_type_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iq_type_3_0 = io_in_uop_bits_iq_type_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_0_0 = io_in_uop_bits_fu_code_0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_1_0 = io_in_uop_bits_fu_code_1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_2_0 = io_in_uop_bits_fu_code_2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_3_0 = io_in_uop_bits_fu_code_3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_4_0 = io_in_uop_bits_fu_code_4; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_5_0 = io_in_uop_bits_fu_code_5; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_6_0 = io_in_uop_bits_fu_code_6; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_7_0 = io_in_uop_bits_fu_code_7; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_8_0 = io_in_uop_bits_fu_code_8; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fu_code_9_0 = io_in_uop_bits_fu_code_9; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_0 = io_in_uop_bits_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_iw_p1_speculative_child_0 = io_in_uop_bits_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_iw_p2_speculative_child_0 = io_in_uop_bits_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p1_bypass_hint_0 = io_in_uop_bits_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p2_bypass_hint_0 = io_in_uop_bits_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_p3_bypass_hint_0 = io_in_uop_bits_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_dis_col_sel_0 = io_in_uop_bits_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_in_uop_bits_br_type_0 = io_in_uop_bits_br_type; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sfence_0 = io_in_uop_bits_is_sfence; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_eret_0 = io_in_uop_bits_is_eret; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_rocc_0 = io_in_uop_bits_is_rocc; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_mov_0 = io_in_uop_bits_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_imm_rename_0 = io_in_uop_bits_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_imm_sel_0 = io_in_uop_bits_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_pimm_0 = io_in_uop_bits_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_op1_sel_0 = io_in_uop_bits_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_op2_sel_0 = io_in_uop_bits_op2_sel; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ldst_0 = io_in_uop_bits_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wen_0 = io_in_uop_bits_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren1_0 = io_in_uop_bits_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren2_0 = io_in_uop_bits_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_ren3_0 = io_in_uop_bits_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap12_0 = io_in_uop_bits_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_swap23_0 = io_in_uop_bits_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagIn_0 = io_in_uop_bits_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_ctrl_typeTagOut_0 = io_in_uop_bits_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fromint_0 = io_in_uop_bits_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_toint_0 = io_in_uop_bits_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fastpipe_0 = io_in_uop_bits_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_fma_0 = io_in_uop_bits_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_div_0 = io_in_uop_bits_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_sqrt_0 = io_in_uop_bits_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_wflags_0 = io_in_uop_bits_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_ctrl_vec_0 = io_in_uop_bits_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_csr_cmd_0 = io_in_uop_bits_csr_cmd; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fcn_dw_0 = io_in_uop_bits_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_in_uop_bits_fcn_op_0 = io_in_uop_bits_fcn_op; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_fp_rm_0 = io_in_uop_bits_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_in_uop_bits_fp_typ_0 = io_in_uop_bits_fp_typ; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_0_0 = io_brupdate_b2_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_1_0 = io_brupdate_b2_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_2_0 = io_brupdate_b2_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iq_type_3_0 = io_brupdate_b2_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_0_0 = io_brupdate_b2_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_1_0 = io_brupdate_b2_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_2_0 = io_brupdate_b2_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_3_0 = io_brupdate_b2_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_4_0 = io_brupdate_b2_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_5_0 = io_brupdate_b2_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_6_0 = io_brupdate_b2_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_7_0 = io_brupdate_b2_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_8_0 = io_brupdate_b2_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fu_code_9_0 = io_brupdate_b2_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_0 = io_brupdate_b2_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_agen_0 = io_brupdate_b2_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_issued_partial_dgen_0 = io_brupdate_b2_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_iw_p1_speculative_child_0 = io_brupdate_b2_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_iw_p2_speculative_child_0 = io_brupdate_b2_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p1_bypass_hint_0 = io_brupdate_b2_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p2_bypass_hint_0 = io_brupdate_b2_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_iw_p3_bypass_hint_0 = io_brupdate_b2_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_dis_col_sel_0 = io_brupdate_b2_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_brupdate_b2_uop_br_type_0 = io_brupdate_b2_uop_br_type; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sfence_0 = io_brupdate_b2_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_eret_0 = io_brupdate_b2_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_rocc_0 = io_brupdate_b2_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_mov_0 = io_brupdate_b2_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_imm_rename_0 = io_brupdate_b2_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_imm_sel_0 = io_brupdate_b2_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_pimm_0 = io_brupdate_b2_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_op1_sel_0 = io_brupdate_b2_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_op2_sel_0 = io_brupdate_b2_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ldst_0 = io_brupdate_b2_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wen_0 = io_brupdate_b2_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren1_0 = io_brupdate_b2_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren2_0 = io_brupdate_b2_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_ren3_0 = io_brupdate_b2_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap12_0 = io_brupdate_b2_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_swap23_0 = io_brupdate_b2_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagIn_0 = io_brupdate_b2_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_ctrl_typeTagOut_0 = io_brupdate_b2_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fromint_0 = io_brupdate_b2_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_toint_0 = io_brupdate_b2_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fastpipe_0 = io_brupdate_b2_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_fma_0 = io_brupdate_b2_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_div_0 = io_brupdate_b2_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_sqrt_0 = io_brupdate_b2_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_wflags_0 = io_brupdate_b2_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_ctrl_vec_0 = io_brupdate_b2_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_csr_cmd_0 = io_brupdate_b2_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fcn_dw_0 = io_brupdate_b2_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_brupdate_b2_uop_fcn_op_0 = io_brupdate_b2_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_fp_rm_0 = io_brupdate_b2_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_uop_fp_typ_0 = io_brupdate_b2_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:49:7]
wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:49:7]
wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:49:7]
wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:49:7]
wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:49:7]
wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:49:7]
wire io_kill_0 = io_kill; // @[issue-slot.scala:49:7]
wire io_clear_0 = io_clear; // @[issue-slot.scala:49:7]
wire io_squash_grant_0 = io_squash_grant; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_inst_0 = io_wakeup_ports_0_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_0_bits_uop_debug_inst_0 = io_wakeup_ports_0_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rvc_0 = io_wakeup_ports_0_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_0_bits_uop_debug_pc_0 = io_wakeup_ports_0_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_0_0 = io_wakeup_ports_0_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_1_0 = io_wakeup_ports_0_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_2_0 = io_wakeup_ports_0_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iq_type_3_0 = io_wakeup_ports_0_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_0_0 = io_wakeup_ports_0_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_1_0 = io_wakeup_ports_0_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_2_0 = io_wakeup_ports_0_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_3_0 = io_wakeup_ports_0_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_4_0 = io_wakeup_ports_0_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_5_0 = io_wakeup_ports_0_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_6_0 = io_wakeup_ports_0_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_7_0 = io_wakeup_ports_0_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_8_0 = io_wakeup_ports_0_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fu_code_9_0 = io_wakeup_ports_0_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_0 = io_wakeup_ports_0_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_0_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_0_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_0_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_dis_col_sel_0 = io_wakeup_ports_0_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_0_bits_uop_br_mask_0 = io_wakeup_ports_0_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_tag_0 = io_wakeup_ports_0_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_0_bits_uop_br_type_0 = io_wakeup_ports_0_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfb_0 = io_wakeup_ports_0_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fence_0 = io_wakeup_ports_0_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_fencei_0 = io_wakeup_ports_0_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sfence_0 = io_wakeup_ports_0_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_amo_0 = io_wakeup_ports_0_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_eret_0 = io_wakeup_ports_0_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_0_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_rocc_0 = io_wakeup_ports_0_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_mov_0 = io_wakeup_ports_0_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ftq_idx_0 = io_wakeup_ports_0_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_edge_inst_0 = io_wakeup_ports_0_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_pc_lob_0 = io_wakeup_ports_0_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_taken_0 = io_wakeup_ports_0_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_imm_rename_0 = io_wakeup_ports_0_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_imm_sel_0 = io_wakeup_ports_0_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_pimm_0 = io_wakeup_ports_0_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_0_bits_uop_imm_packed_0 = io_wakeup_ports_0_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_op1_sel_0 = io_wakeup_ports_0_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_op2_sel_0 = io_wakeup_ports_0_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_0_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_rob_idx_0 = io_wakeup_ports_0_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ldq_idx_0 = io_wakeup_ports_0_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_stq_idx_0 = io_wakeup_ports_0_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_rxq_idx_0 = io_wakeup_ports_0_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_pdst_0 = io_wakeup_ports_0_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs1_0 = io_wakeup_ports_0_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs2_0 = io_wakeup_ports_0_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_prs3_0 = io_wakeup_ports_0_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_ppred_0 = io_wakeup_ports_0_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs1_busy_0 = io_wakeup_ports_0_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs2_busy_0 = io_wakeup_ports_0_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_prs3_busy_0 = io_wakeup_ports_0_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ppred_busy_0 = io_wakeup_ports_0_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_0_bits_uop_stale_pdst_0 = io_wakeup_ports_0_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_exception_0 = io_wakeup_ports_0_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_0_bits_uop_exc_cause_0 = io_wakeup_ports_0_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_mem_cmd_0 = io_wakeup_ports_0_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_mem_size_0 = io_wakeup_ports_0_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_mem_signed_0 = io_wakeup_ports_0_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_ldq_0 = io_wakeup_ports_0_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_uses_stq_0 = io_wakeup_ports_0_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_is_unique_0 = io_wakeup_ports_0_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_flush_on_commit_0 = io_wakeup_ports_0_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_csr_cmd_0 = io_wakeup_ports_0_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_0_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_ldst_0 = io_wakeup_ports_0_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs1_0 = io_wakeup_ports_0_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs2_0 = io_wakeup_ports_0_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_0_bits_uop_lrs3_0 = io_wakeup_ports_0_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_dst_rtype_0 = io_wakeup_ports_0_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs1_rtype_0 = io_wakeup_ports_0_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_lrs2_rtype_0 = io_wakeup_ports_0_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_frs3_en_0 = io_wakeup_ports_0_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fcn_dw_0 = io_wakeup_ports_0_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_0_bits_uop_fcn_op_0 = io_wakeup_ports_0_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_fp_val_0 = io_wakeup_ports_0_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_fp_rm_0 = io_wakeup_ports_0_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_0_bits_uop_fp_typ_0 = io_wakeup_ports_0_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_0_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_0_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_debug_if_0 = io_wakeup_ports_0_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_0_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_fsrc_0 = io_wakeup_ports_0_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_uop_debug_tsrc_0 = io_wakeup_ports_0_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_bypassable_0 = io_wakeup_ports_0_bits_bypassable; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_0_bits_speculative_mask_0 = io_wakeup_ports_0_bits_speculative_mask; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_0_bits_rebusy_0 = io_wakeup_ports_0_bits_rebusy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_inst_0 = io_wakeup_ports_1_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_1_bits_uop_debug_inst_0 = io_wakeup_ports_1_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rvc_0 = io_wakeup_ports_1_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_1_bits_uop_debug_pc_0 = io_wakeup_ports_1_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_0_0 = io_wakeup_ports_1_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_1_0 = io_wakeup_ports_1_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_2_0 = io_wakeup_ports_1_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iq_type_3_0 = io_wakeup_ports_1_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_0_0 = io_wakeup_ports_1_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_1_0 = io_wakeup_ports_1_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_2_0 = io_wakeup_ports_1_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_3_0 = io_wakeup_ports_1_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_4_0 = io_wakeup_ports_1_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_5_0 = io_wakeup_ports_1_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_6_0 = io_wakeup_ports_1_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_7_0 = io_wakeup_ports_1_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_8_0 = io_wakeup_ports_1_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fu_code_9_0 = io_wakeup_ports_1_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_0 = io_wakeup_ports_1_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_agen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_agen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen_0 = io_wakeup_ports_1_bits_uop_iw_issued_partial_dgen; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_1_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_1_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_dis_col_sel_0 = io_wakeup_ports_1_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_1_bits_uop_br_mask_0 = io_wakeup_ports_1_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_tag_0 = io_wakeup_ports_1_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_1_bits_uop_br_type_0 = io_wakeup_ports_1_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfb_0 = io_wakeup_ports_1_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fence_0 = io_wakeup_ports_1_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_fencei_0 = io_wakeup_ports_1_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sfence_0 = io_wakeup_ports_1_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_amo_0 = io_wakeup_ports_1_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_eret_0 = io_wakeup_ports_1_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_1_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_rocc_0 = io_wakeup_ports_1_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_mov_0 = io_wakeup_ports_1_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ftq_idx_0 = io_wakeup_ports_1_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_edge_inst_0 = io_wakeup_ports_1_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_pc_lob_0 = io_wakeup_ports_1_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_taken_0 = io_wakeup_ports_1_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_imm_rename_0 = io_wakeup_ports_1_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_imm_sel_0 = io_wakeup_ports_1_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_pimm_0 = io_wakeup_ports_1_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_1_bits_uop_imm_packed_0 = io_wakeup_ports_1_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_op1_sel_0 = io_wakeup_ports_1_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_op2_sel_0 = io_wakeup_ports_1_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_1_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_rob_idx_0 = io_wakeup_ports_1_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ldq_idx_0 = io_wakeup_ports_1_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_stq_idx_0 = io_wakeup_ports_1_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_rxq_idx_0 = io_wakeup_ports_1_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_pdst_0 = io_wakeup_ports_1_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs1_0 = io_wakeup_ports_1_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs2_0 = io_wakeup_ports_1_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_prs3_0 = io_wakeup_ports_1_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_ppred_0 = io_wakeup_ports_1_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs1_busy_0 = io_wakeup_ports_1_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs2_busy_0 = io_wakeup_ports_1_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_prs3_busy_0 = io_wakeup_ports_1_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ppred_busy_0 = io_wakeup_ports_1_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_1_bits_uop_stale_pdst_0 = io_wakeup_ports_1_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_exception_0 = io_wakeup_ports_1_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_1_bits_uop_exc_cause_0 = io_wakeup_ports_1_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_mem_cmd_0 = io_wakeup_ports_1_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_mem_size_0 = io_wakeup_ports_1_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_mem_signed_0 = io_wakeup_ports_1_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_ldq_0 = io_wakeup_ports_1_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_uses_stq_0 = io_wakeup_ports_1_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_is_unique_0 = io_wakeup_ports_1_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_flush_on_commit_0 = io_wakeup_ports_1_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_csr_cmd_0 = io_wakeup_ports_1_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_1_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_ldst_0 = io_wakeup_ports_1_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs1_0 = io_wakeup_ports_1_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs2_0 = io_wakeup_ports_1_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_1_bits_uop_lrs3_0 = io_wakeup_ports_1_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_dst_rtype_0 = io_wakeup_ports_1_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs1_rtype_0 = io_wakeup_ports_1_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_lrs2_rtype_0 = io_wakeup_ports_1_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_frs3_en_0 = io_wakeup_ports_1_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fcn_dw_0 = io_wakeup_ports_1_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_1_bits_uop_fcn_op_0 = io_wakeup_ports_1_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_fp_val_0 = io_wakeup_ports_1_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_fp_rm_0 = io_wakeup_ports_1_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_1_bits_uop_fp_typ_0 = io_wakeup_ports_1_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_1_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_1_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_debug_if_0 = io_wakeup_ports_1_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_1_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_fsrc_0 = io_wakeup_ports_1_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_1_bits_uop_debug_tsrc_0 = io_wakeup_ports_1_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_inst_0 = io_wakeup_ports_2_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_2_bits_uop_debug_inst_0 = io_wakeup_ports_2_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rvc_0 = io_wakeup_ports_2_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_2_bits_uop_debug_pc_0 = io_wakeup_ports_2_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_0_0 = io_wakeup_ports_2_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_1_0 = io_wakeup_ports_2_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_2_0 = io_wakeup_ports_2_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iq_type_3_0 = io_wakeup_ports_2_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_0_0 = io_wakeup_ports_2_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_1_0 = io_wakeup_ports_2_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_2_0 = io_wakeup_ports_2_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_3_0 = io_wakeup_ports_2_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_4_0 = io_wakeup_ports_2_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_5_0 = io_wakeup_ports_2_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_6_0 = io_wakeup_ports_2_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_7_0 = io_wakeup_ports_2_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_8_0 = io_wakeup_ports_2_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fu_code_9_0 = io_wakeup_ports_2_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_0 = io_wakeup_ports_2_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_2_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_2_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_dis_col_sel_0 = io_wakeup_ports_2_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_2_bits_uop_br_mask_0 = io_wakeup_ports_2_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_tag_0 = io_wakeup_ports_2_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_2_bits_uop_br_type_0 = io_wakeup_ports_2_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfb_0 = io_wakeup_ports_2_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fence_0 = io_wakeup_ports_2_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_fencei_0 = io_wakeup_ports_2_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sfence_0 = io_wakeup_ports_2_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_amo_0 = io_wakeup_ports_2_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_eret_0 = io_wakeup_ports_2_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_2_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_rocc_0 = io_wakeup_ports_2_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_mov_0 = io_wakeup_ports_2_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ftq_idx_0 = io_wakeup_ports_2_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_edge_inst_0 = io_wakeup_ports_2_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_pc_lob_0 = io_wakeup_ports_2_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_taken_0 = io_wakeup_ports_2_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_imm_rename_0 = io_wakeup_ports_2_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_imm_sel_0 = io_wakeup_ports_2_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_pimm_0 = io_wakeup_ports_2_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_2_bits_uop_imm_packed_0 = io_wakeup_ports_2_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_op1_sel_0 = io_wakeup_ports_2_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_op2_sel_0 = io_wakeup_ports_2_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_2_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_rob_idx_0 = io_wakeup_ports_2_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ldq_idx_0 = io_wakeup_ports_2_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_stq_idx_0 = io_wakeup_ports_2_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_rxq_idx_0 = io_wakeup_ports_2_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_pdst_0 = io_wakeup_ports_2_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs1_0 = io_wakeup_ports_2_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs2_0 = io_wakeup_ports_2_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_prs3_0 = io_wakeup_ports_2_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_ppred_0 = io_wakeup_ports_2_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs1_busy_0 = io_wakeup_ports_2_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs2_busy_0 = io_wakeup_ports_2_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_prs3_busy_0 = io_wakeup_ports_2_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ppred_busy_0 = io_wakeup_ports_2_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_2_bits_uop_stale_pdst_0 = io_wakeup_ports_2_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_exception_0 = io_wakeup_ports_2_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_2_bits_uop_exc_cause_0 = io_wakeup_ports_2_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_mem_cmd_0 = io_wakeup_ports_2_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_mem_size_0 = io_wakeup_ports_2_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_mem_signed_0 = io_wakeup_ports_2_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_ldq_0 = io_wakeup_ports_2_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_uses_stq_0 = io_wakeup_ports_2_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_is_unique_0 = io_wakeup_ports_2_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_flush_on_commit_0 = io_wakeup_ports_2_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_csr_cmd_0 = io_wakeup_ports_2_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_2_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_ldst_0 = io_wakeup_ports_2_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs1_0 = io_wakeup_ports_2_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs2_0 = io_wakeup_ports_2_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_2_bits_uop_lrs3_0 = io_wakeup_ports_2_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_dst_rtype_0 = io_wakeup_ports_2_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs1_rtype_0 = io_wakeup_ports_2_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_lrs2_rtype_0 = io_wakeup_ports_2_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_frs3_en_0 = io_wakeup_ports_2_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fcn_dw_0 = io_wakeup_ports_2_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_2_bits_uop_fcn_op_0 = io_wakeup_ports_2_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_fp_val_0 = io_wakeup_ports_2_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_fp_rm_0 = io_wakeup_ports_2_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_2_bits_uop_fp_typ_0 = io_wakeup_ports_2_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_2_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_2_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_debug_if_0 = io_wakeup_ports_2_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_2_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_fsrc_0 = io_wakeup_ports_2_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_2_bits_uop_debug_tsrc_0 = io_wakeup_ports_2_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_valid_0 = io_wakeup_ports_3_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_inst_0 = io_wakeup_ports_3_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_3_bits_uop_debug_inst_0 = io_wakeup_ports_3_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rvc_0 = io_wakeup_ports_3_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_3_bits_uop_debug_pc_0 = io_wakeup_ports_3_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_0_0 = io_wakeup_ports_3_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_1_0 = io_wakeup_ports_3_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_2_0 = io_wakeup_ports_3_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iq_type_3_0 = io_wakeup_ports_3_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_0_0 = io_wakeup_ports_3_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_1_0 = io_wakeup_ports_3_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_2_0 = io_wakeup_ports_3_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_3_0 = io_wakeup_ports_3_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_4_0 = io_wakeup_ports_3_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_5_0 = io_wakeup_ports_3_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_6_0 = io_wakeup_ports_3_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_7_0 = io_wakeup_ports_3_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_8_0 = io_wakeup_ports_3_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fu_code_9_0 = io_wakeup_ports_3_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_0 = io_wakeup_ports_3_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_3_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_3_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_dis_col_sel_0 = io_wakeup_ports_3_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_3_bits_uop_br_mask_0 = io_wakeup_ports_3_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_tag_0 = io_wakeup_ports_3_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_3_bits_uop_br_type_0 = io_wakeup_ports_3_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfb_0 = io_wakeup_ports_3_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fence_0 = io_wakeup_ports_3_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_fencei_0 = io_wakeup_ports_3_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sfence_0 = io_wakeup_ports_3_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_amo_0 = io_wakeup_ports_3_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_eret_0 = io_wakeup_ports_3_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_3_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_rocc_0 = io_wakeup_ports_3_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_mov_0 = io_wakeup_ports_3_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ftq_idx_0 = io_wakeup_ports_3_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_edge_inst_0 = io_wakeup_ports_3_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_pc_lob_0 = io_wakeup_ports_3_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_taken_0 = io_wakeup_ports_3_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_imm_rename_0 = io_wakeup_ports_3_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_imm_sel_0 = io_wakeup_ports_3_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_pimm_0 = io_wakeup_ports_3_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_3_bits_uop_imm_packed_0 = io_wakeup_ports_3_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_op1_sel_0 = io_wakeup_ports_3_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_op2_sel_0 = io_wakeup_ports_3_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_3_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_rob_idx_0 = io_wakeup_ports_3_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ldq_idx_0 = io_wakeup_ports_3_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_stq_idx_0 = io_wakeup_ports_3_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_rxq_idx_0 = io_wakeup_ports_3_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_pdst_0 = io_wakeup_ports_3_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs1_0 = io_wakeup_ports_3_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs2_0 = io_wakeup_ports_3_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_prs3_0 = io_wakeup_ports_3_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_ppred_0 = io_wakeup_ports_3_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs1_busy_0 = io_wakeup_ports_3_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs2_busy_0 = io_wakeup_ports_3_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_prs3_busy_0 = io_wakeup_ports_3_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ppred_busy_0 = io_wakeup_ports_3_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_3_bits_uop_stale_pdst_0 = io_wakeup_ports_3_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_exception_0 = io_wakeup_ports_3_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_3_bits_uop_exc_cause_0 = io_wakeup_ports_3_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_mem_cmd_0 = io_wakeup_ports_3_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_mem_size_0 = io_wakeup_ports_3_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_mem_signed_0 = io_wakeup_ports_3_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_ldq_0 = io_wakeup_ports_3_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_uses_stq_0 = io_wakeup_ports_3_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_is_unique_0 = io_wakeup_ports_3_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_flush_on_commit_0 = io_wakeup_ports_3_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_csr_cmd_0 = io_wakeup_ports_3_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_3_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_ldst_0 = io_wakeup_ports_3_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs1_0 = io_wakeup_ports_3_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs2_0 = io_wakeup_ports_3_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_3_bits_uop_lrs3_0 = io_wakeup_ports_3_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_dst_rtype_0 = io_wakeup_ports_3_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs1_rtype_0 = io_wakeup_ports_3_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_lrs2_rtype_0 = io_wakeup_ports_3_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_frs3_en_0 = io_wakeup_ports_3_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fcn_dw_0 = io_wakeup_ports_3_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_3_bits_uop_fcn_op_0 = io_wakeup_ports_3_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_fp_val_0 = io_wakeup_ports_3_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_fp_rm_0 = io_wakeup_ports_3_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_3_bits_uop_fp_typ_0 = io_wakeup_ports_3_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_3_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_3_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_debug_if_0 = io_wakeup_ports_3_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_3_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_fsrc_0 = io_wakeup_ports_3_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_uop_debug_tsrc_0 = io_wakeup_ports_3_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_valid_0 = io_wakeup_ports_4_valid; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_4_bits_uop_inst_0 = io_wakeup_ports_4_bits_uop_inst; // @[issue-slot.scala:49:7]
wire [31:0] io_wakeup_ports_4_bits_uop_debug_inst_0 = io_wakeup_ports_4_bits_uop_debug_inst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_rvc_0 = io_wakeup_ports_4_bits_uop_is_rvc; // @[issue-slot.scala:49:7]
wire [39:0] io_wakeup_ports_4_bits_uop_debug_pc_0 = io_wakeup_ports_4_bits_uop_debug_pc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_0_0 = io_wakeup_ports_4_bits_uop_iq_type_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_1_0 = io_wakeup_ports_4_bits_uop_iq_type_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_2_0 = io_wakeup_ports_4_bits_uop_iq_type_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iq_type_3_0 = io_wakeup_ports_4_bits_uop_iq_type_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_0_0 = io_wakeup_ports_4_bits_uop_fu_code_0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_1_0 = io_wakeup_ports_4_bits_uop_fu_code_1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_2_0 = io_wakeup_ports_4_bits_uop_fu_code_2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_3_0 = io_wakeup_ports_4_bits_uop_fu_code_3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_4_0 = io_wakeup_ports_4_bits_uop_fu_code_4; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_5_0 = io_wakeup_ports_4_bits_uop_fu_code_5; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_6_0 = io_wakeup_ports_4_bits_uop_fu_code_6; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_7_0 = io_wakeup_ports_4_bits_uop_fu_code_7; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_8_0 = io_wakeup_ports_4_bits_uop_fu_code_8; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fu_code_9_0 = io_wakeup_ports_4_bits_uop_fu_code_9; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_0 = io_wakeup_ports_4_bits_uop_iw_issued; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_iw_p1_speculative_child_0 = io_wakeup_ports_4_bits_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_iw_p2_speculative_child_0 = io_wakeup_ports_4_bits_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint_0 = io_wakeup_ports_4_bits_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_dis_col_sel_0 = io_wakeup_ports_4_bits_uop_dis_col_sel; // @[issue-slot.scala:49:7]
wire [15:0] io_wakeup_ports_4_bits_uop_br_mask_0 = io_wakeup_ports_4_bits_uop_br_mask; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_4_bits_uop_br_tag_0 = io_wakeup_ports_4_bits_uop_br_tag; // @[issue-slot.scala:49:7]
wire [3:0] io_wakeup_ports_4_bits_uop_br_type_0 = io_wakeup_ports_4_bits_uop_br_type; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sfb_0 = io_wakeup_ports_4_bits_uop_is_sfb; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_fence_0 = io_wakeup_ports_4_bits_uop_is_fence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_fencei_0 = io_wakeup_ports_4_bits_uop_is_fencei; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sfence_0 = io_wakeup_ports_4_bits_uop_is_sfence; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_amo_0 = io_wakeup_ports_4_bits_uop_is_amo; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_eret_0 = io_wakeup_ports_4_bits_uop_is_eret; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_sys_pc2epc_0 = io_wakeup_ports_4_bits_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_rocc_0 = io_wakeup_ports_4_bits_uop_is_rocc; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_mov_0 = io_wakeup_ports_4_bits_uop_is_mov; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ftq_idx_0 = io_wakeup_ports_4_bits_uop_ftq_idx; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_edge_inst_0 = io_wakeup_ports_4_bits_uop_edge_inst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_pc_lob_0 = io_wakeup_ports_4_bits_uop_pc_lob; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_taken_0 = io_wakeup_ports_4_bits_uop_taken; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_imm_rename_0 = io_wakeup_ports_4_bits_uop_imm_rename; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_imm_sel_0 = io_wakeup_ports_4_bits_uop_imm_sel; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_pimm_0 = io_wakeup_ports_4_bits_uop_pimm; // @[issue-slot.scala:49:7]
wire [19:0] io_wakeup_ports_4_bits_uop_imm_packed_0 = io_wakeup_ports_4_bits_uop_imm_packed; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_op1_sel_0 = io_wakeup_ports_4_bits_uop_op1_sel; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_op2_sel_0 = io_wakeup_ports_4_bits_uop_op2_sel; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ldst_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_wen_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren1_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren2_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_ren3_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_swap12_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_swap23_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fromint_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_toint_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_fma_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_div_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_div; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_wflags_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_ctrl_vec_0 = io_wakeup_ports_4_bits_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_rob_idx_0 = io_wakeup_ports_4_bits_uop_rob_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ldq_idx_0 = io_wakeup_ports_4_bits_uop_ldq_idx; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_stq_idx_0 = io_wakeup_ports_4_bits_uop_stq_idx; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_rxq_idx_0 = io_wakeup_ports_4_bits_uop_rxq_idx; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_pdst_0 = io_wakeup_ports_4_bits_uop_pdst; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs1_0 = io_wakeup_ports_4_bits_uop_prs1; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs2_0 = io_wakeup_ports_4_bits_uop_prs2; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_prs3_0 = io_wakeup_ports_4_bits_uop_prs3; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_ppred_0 = io_wakeup_ports_4_bits_uop_ppred; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs1_busy_0 = io_wakeup_ports_4_bits_uop_prs1_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs2_busy_0 = io_wakeup_ports_4_bits_uop_prs2_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_prs3_busy_0 = io_wakeup_ports_4_bits_uop_prs3_busy; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_ppred_busy_0 = io_wakeup_ports_4_bits_uop_ppred_busy; // @[issue-slot.scala:49:7]
wire [6:0] io_wakeup_ports_4_bits_uop_stale_pdst_0 = io_wakeup_ports_4_bits_uop_stale_pdst; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_exception_0 = io_wakeup_ports_4_bits_uop_exception; // @[issue-slot.scala:49:7]
wire [63:0] io_wakeup_ports_4_bits_uop_exc_cause_0 = io_wakeup_ports_4_bits_uop_exc_cause; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_mem_cmd_0 = io_wakeup_ports_4_bits_uop_mem_cmd; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_mem_size_0 = io_wakeup_ports_4_bits_uop_mem_size; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_mem_signed_0 = io_wakeup_ports_4_bits_uop_mem_signed; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_uses_ldq_0 = io_wakeup_ports_4_bits_uop_uses_ldq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_uses_stq_0 = io_wakeup_ports_4_bits_uop_uses_stq; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_is_unique_0 = io_wakeup_ports_4_bits_uop_is_unique; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_flush_on_commit_0 = io_wakeup_ports_4_bits_uop_flush_on_commit; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_csr_cmd_0 = io_wakeup_ports_4_bits_uop_csr_cmd; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_ldst_is_rs1_0 = io_wakeup_ports_4_bits_uop_ldst_is_rs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_ldst_0 = io_wakeup_ports_4_bits_uop_ldst; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs1_0 = io_wakeup_ports_4_bits_uop_lrs1; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs2_0 = io_wakeup_ports_4_bits_uop_lrs2; // @[issue-slot.scala:49:7]
wire [5:0] io_wakeup_ports_4_bits_uop_lrs3_0 = io_wakeup_ports_4_bits_uop_lrs3; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_dst_rtype_0 = io_wakeup_ports_4_bits_uop_dst_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_lrs1_rtype_0 = io_wakeup_ports_4_bits_uop_lrs1_rtype; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_lrs2_rtype_0 = io_wakeup_ports_4_bits_uop_lrs2_rtype; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_frs3_en_0 = io_wakeup_ports_4_bits_uop_frs3_en; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fcn_dw_0 = io_wakeup_ports_4_bits_uop_fcn_dw; // @[issue-slot.scala:49:7]
wire [4:0] io_wakeup_ports_4_bits_uop_fcn_op_0 = io_wakeup_ports_4_bits_uop_fcn_op; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_fp_val_0 = io_wakeup_ports_4_bits_uop_fp_val; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_fp_rm_0 = io_wakeup_ports_4_bits_uop_fp_rm; // @[issue-slot.scala:49:7]
wire [1:0] io_wakeup_ports_4_bits_uop_fp_typ_0 = io_wakeup_ports_4_bits_uop_fp_typ; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_pf_if_0 = io_wakeup_ports_4_bits_uop_xcpt_pf_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_ae_if_0 = io_wakeup_ports_4_bits_uop_xcpt_ae_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_xcpt_ma_if_0 = io_wakeup_ports_4_bits_uop_xcpt_ma_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_bp_debug_if_0 = io_wakeup_ports_4_bits_uop_bp_debug_if; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_bp_xcpt_if_0 = io_wakeup_ports_4_bits_uop_bp_xcpt_if; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_debug_fsrc_0 = io_wakeup_ports_4_bits_uop_debug_fsrc; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_uop_debug_tsrc_0 = io_wakeup_ports_4_bits_uop_debug_tsrc; // @[issue-slot.scala:49:7]
wire io_pred_wakeup_port_valid_0 = io_pred_wakeup_port_valid; // @[issue-slot.scala:49:7]
wire [4:0] io_pred_wakeup_port_bits_0 = io_pred_wakeup_port_bits; // @[issue-slot.scala:49:7]
wire [2:0] io_child_rebusys_0 = io_child_rebusys; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_in_uop_bits_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_bypassable = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_1_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_2_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_rebusy = 1'h0; // @[issue-slot.scala:49:7]
wire next_uop_out_iw_issued_partial_agen = 1'h0; // @[util.scala:104:23]
wire next_uop_out_iw_issued_partial_dgen = 1'h0; // @[util.scala:104:23]
wire next_uop_iw_issued_partial_agen = 1'h0; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued_partial_dgen = 1'h0; // @[issue-slot.scala:59:28]
wire prs1_rebusys_1 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_2 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_3 = 1'h0; // @[issue-slot.scala:102:91]
wire prs1_rebusys_4 = 1'h0; // @[issue-slot.scala:102:91]
wire prs2_rebusys_1 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_2 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_3 = 1'h0; // @[issue-slot.scala:103:91]
wire prs2_rebusys_4 = 1'h0; // @[issue-slot.scala:103:91]
wire _next_uop_iw_p1_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p2_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _next_uop_iw_p3_bypass_hint_T_1 = 1'h0; // @[Mux.scala:30:73]
wire _iss_ready_T_6 = 1'h0; // @[issue-slot.scala:136:131]
wire agen_ready = 1'h0; // @[issue-slot.scala:137:114]
wire dgen_ready = 1'h0; // @[issue-slot.scala:138:114]
wire [2:0] io_wakeup_ports_1_bits_speculative_mask = 3'h0; // @[issue-slot.scala:49:7]
wire [2:0] _next_uop_iw_p1_speculative_child_T_1 = 3'h0; // @[Mux.scala:30:73]
wire [2:0] _next_uop_iw_p2_speculative_child_T_1 = 3'h0; // @[Mux.scala:30:73]
wire io_wakeup_ports_2_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_3_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire io_wakeup_ports_4_bits_bypassable = 1'h1; // @[issue-slot.scala:49:7]
wire _iss_ready_T_7 = 1'h1; // @[issue-slot.scala:136:110]
wire [2:0] io_wakeup_ports_2_bits_speculative_mask = 3'h1; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_3_bits_speculative_mask = 3'h2; // @[issue-slot.scala:49:7]
wire [2:0] io_wakeup_ports_4_bits_speculative_mask = 3'h4; // @[issue-slot.scala:49:7]
wire _io_will_be_valid_T_1; // @[issue-slot.scala:65:34]
wire _io_request_T_4; // @[issue-slot.scala:140:51]
wire [31:0] next_uop_inst; // @[issue-slot.scala:59:28]
wire [31:0] next_uop_debug_inst; // @[issue-slot.scala:59:28]
wire next_uop_is_rvc; // @[issue-slot.scala:59:28]
wire [39:0] next_uop_debug_pc; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_0; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_1; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_2; // @[issue-slot.scala:59:28]
wire next_uop_iq_type_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_0; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_1; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_2; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_3; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_4; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_5; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_6; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_7; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_8; // @[issue-slot.scala:59:28]
wire next_uop_fu_code_9; // @[issue-slot.scala:59:28]
wire next_uop_iw_issued; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_iw_p1_speculative_child; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_iw_p2_speculative_child; // @[issue-slot.scala:59:28]
wire next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:59:28]
wire next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_dis_col_sel; // @[issue-slot.scala:59:28]
wire [15:0] next_uop_br_mask; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_tag; // @[issue-slot.scala:59:28]
wire [3:0] next_uop_br_type; // @[issue-slot.scala:59:28]
wire next_uop_is_sfb; // @[issue-slot.scala:59:28]
wire next_uop_is_fence; // @[issue-slot.scala:59:28]
wire next_uop_is_fencei; // @[issue-slot.scala:59:28]
wire next_uop_is_sfence; // @[issue-slot.scala:59:28]
wire next_uop_is_amo; // @[issue-slot.scala:59:28]
wire next_uop_is_eret; // @[issue-slot.scala:59:28]
wire next_uop_is_sys_pc2epc; // @[issue-slot.scala:59:28]
wire next_uop_is_rocc; // @[issue-slot.scala:59:28]
wire next_uop_is_mov; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ftq_idx; // @[issue-slot.scala:59:28]
wire next_uop_edge_inst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_pc_lob; // @[issue-slot.scala:59:28]
wire next_uop_taken; // @[issue-slot.scala:59:28]
wire next_uop_imm_rename; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_imm_sel; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_pimm; // @[issue-slot.scala:59:28]
wire [19:0] next_uop_imm_packed; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_op1_sel; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_op2_sel; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ldst; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wen; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren1; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren2; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_ren3; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap12; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_swap23; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fromint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_toint; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_fma; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_div; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_wflags; // @[issue-slot.scala:59:28]
wire next_uop_fp_ctrl_vec; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_rob_idx; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ldq_idx; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_stq_idx; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_rxq_idx; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_pdst; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs1; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs2; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_prs3; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_ppred; // @[issue-slot.scala:59:28]
wire next_uop_prs1_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs2_busy; // @[issue-slot.scala:59:28]
wire next_uop_prs3_busy; // @[issue-slot.scala:59:28]
wire next_uop_ppred_busy; // @[issue-slot.scala:59:28]
wire [6:0] next_uop_stale_pdst; // @[issue-slot.scala:59:28]
wire next_uop_exception; // @[issue-slot.scala:59:28]
wire [63:0] next_uop_exc_cause; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_mem_cmd; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_mem_size; // @[issue-slot.scala:59:28]
wire next_uop_mem_signed; // @[issue-slot.scala:59:28]
wire next_uop_uses_ldq; // @[issue-slot.scala:59:28]
wire next_uop_uses_stq; // @[issue-slot.scala:59:28]
wire next_uop_is_unique; // @[issue-slot.scala:59:28]
wire next_uop_flush_on_commit; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_csr_cmd; // @[issue-slot.scala:59:28]
wire next_uop_ldst_is_rs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_ldst; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs1; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs2; // @[issue-slot.scala:59:28]
wire [5:0] next_uop_lrs3; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_dst_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs1_rtype; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_lrs2_rtype; // @[issue-slot.scala:59:28]
wire next_uop_frs3_en; // @[issue-slot.scala:59:28]
wire next_uop_fcn_dw; // @[issue-slot.scala:59:28]
wire [4:0] next_uop_fcn_op; // @[issue-slot.scala:59:28]
wire next_uop_fp_val; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_fp_rm; // @[issue-slot.scala:59:28]
wire [1:0] next_uop_fp_typ; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_pf_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ae_if; // @[issue-slot.scala:59:28]
wire next_uop_xcpt_ma_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_debug_if; // @[issue-slot.scala:59:28]
wire next_uop_bp_xcpt_if; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_fsrc; // @[issue-slot.scala:59:28]
wire [2:0] next_uop_debug_tsrc; // @[issue-slot.scala:59:28]
wire io_iss_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_iss_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_iss_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [15:0] io_iss_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_iss_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_iss_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_iss_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_iss_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_iss_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_iss_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_iss_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_iss_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_iss_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iq_type_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_0_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_4_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_5_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_6_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_7_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_8_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fu_code_9_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ldst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wen_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren1_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren2_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_ren3_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap12_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_swap23_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagIn_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_ctrl_typeTagOut_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fromint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_toint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fastpipe_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_fma_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_div_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_sqrt_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_wflags_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_ctrl_vec_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:49:7]
wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rvc_0; // @[issue-slot.scala:49:7]
wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_issued_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_iw_p1_speculative_child_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_iw_p2_speculative_child_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p1_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p2_bypass_hint_0; // @[issue-slot.scala:49:7]
wire io_out_uop_iw_p3_bypass_hint_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_dis_col_sel_0; // @[issue-slot.scala:49:7]
wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:49:7]
wire [3:0] io_out_uop_br_type_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfb_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_fencei_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sfence_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_amo_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_eret_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_rocc_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_mov_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:49:7]
wire io_out_uop_edge_inst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:49:7]
wire io_out_uop_taken_0; // @[issue-slot.scala:49:7]
wire io_out_uop_imm_rename_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_imm_sel_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_pimm_0; // @[issue-slot.scala:49:7]
wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_op1_sel_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_op2_sel_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:49:7]
wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:49:7]
wire io_out_uop_exception_0; // @[issue-slot.scala:49:7]
wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:49:7]
wire io_out_uop_mem_signed_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_uses_stq_0; // @[issue-slot.scala:49:7]
wire io_out_uop_is_unique_0; // @[issue-slot.scala:49:7]
wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_csr_cmd_0; // @[issue-slot.scala:49:7]
wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:49:7]
wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:49:7]
wire io_out_uop_frs3_en_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fcn_dw_0; // @[issue-slot.scala:49:7]
wire [4:0] io_out_uop_fcn_op_0; // @[issue-slot.scala:49:7]
wire io_out_uop_fp_val_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_fp_rm_0; // @[issue-slot.scala:49:7]
wire [1:0] io_out_uop_fp_typ_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:49:7]
wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:49:7]
wire [2:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:49:7]
wire io_valid_0; // @[issue-slot.scala:49:7]
wire io_will_be_valid_0; // @[issue-slot.scala:49:7]
wire io_request_0; // @[issue-slot.scala:49:7]
reg slot_valid; // @[issue-slot.scala:55:27]
assign io_valid_0 = slot_valid; // @[issue-slot.scala:49:7, :55:27]
reg [31:0] slot_uop_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_inst = slot_uop_inst; // @[util.scala:104:23]
reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:49:7, :56:21]
wire [31:0] next_uop_out_debug_inst = slot_uop_debug_inst; // @[util.scala:104:23]
reg slot_uop_is_rvc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rvc = slot_uop_is_rvc; // @[util.scala:104:23]
reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:49:7, :56:21]
wire [39:0] next_uop_out_debug_pc = slot_uop_debug_pc; // @[util.scala:104:23]
reg slot_uop_iq_type_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_0_0 = slot_uop_iq_type_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_0 = slot_uop_iq_type_0; // @[util.scala:104:23]
reg slot_uop_iq_type_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_1_0 = slot_uop_iq_type_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_1 = slot_uop_iq_type_1; // @[util.scala:104:23]
reg slot_uop_iq_type_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_2_0 = slot_uop_iq_type_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_2 = slot_uop_iq_type_2; // @[util.scala:104:23]
reg slot_uop_iq_type_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_iq_type_3_0 = slot_uop_iq_type_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iq_type_3 = slot_uop_iq_type_3; // @[util.scala:104:23]
reg slot_uop_fu_code_0; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_0_0 = slot_uop_fu_code_0; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_0 = slot_uop_fu_code_0; // @[util.scala:104:23]
reg slot_uop_fu_code_1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_1_0 = slot_uop_fu_code_1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_1 = slot_uop_fu_code_1; // @[util.scala:104:23]
reg slot_uop_fu_code_2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_2_0 = slot_uop_fu_code_2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_2 = slot_uop_fu_code_2; // @[util.scala:104:23]
reg slot_uop_fu_code_3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_3_0 = slot_uop_fu_code_3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_3 = slot_uop_fu_code_3; // @[util.scala:104:23]
reg slot_uop_fu_code_4; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_4_0 = slot_uop_fu_code_4; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_4 = slot_uop_fu_code_4; // @[util.scala:104:23]
reg slot_uop_fu_code_5; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_5_0 = slot_uop_fu_code_5; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_5 = slot_uop_fu_code_5; // @[util.scala:104:23]
reg slot_uop_fu_code_6; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_6_0 = slot_uop_fu_code_6; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_6 = slot_uop_fu_code_6; // @[util.scala:104:23]
reg slot_uop_fu_code_7; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_7_0 = slot_uop_fu_code_7; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_7 = slot_uop_fu_code_7; // @[util.scala:104:23]
reg slot_uop_fu_code_8; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_8_0 = slot_uop_fu_code_8; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_8 = slot_uop_fu_code_8; // @[util.scala:104:23]
reg slot_uop_fu_code_9; // @[issue-slot.scala:56:21]
assign io_iss_uop_fu_code_9_0 = slot_uop_fu_code_9; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fu_code_9 = slot_uop_fu_code_9; // @[util.scala:104:23]
reg slot_uop_iw_issued; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_issued_0 = slot_uop_iw_issued; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_issued = slot_uop_iw_issued; // @[util.scala:104:23]
reg [2:0] slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_speculative_child_0 = slot_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_iw_p1_speculative_child = slot_uop_iw_p1_speculative_child; // @[util.scala:104:23]
reg [2:0] slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_speculative_child_0 = slot_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_iw_p2_speculative_child = slot_uop_iw_p2_speculative_child; // @[util.scala:104:23]
reg slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p1_bypass_hint_0 = slot_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p1_bypass_hint = slot_uop_iw_p1_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p2_bypass_hint_0 = slot_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p2_bypass_hint = slot_uop_iw_p2_bypass_hint; // @[util.scala:104:23]
reg slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:56:21]
assign io_iss_uop_iw_p3_bypass_hint_0 = slot_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_iw_p3_bypass_hint = slot_uop_iw_p3_bypass_hint; // @[util.scala:104:23]
reg [2:0] slot_uop_dis_col_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_dis_col_sel_0 = slot_uop_dis_col_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_dis_col_sel = slot_uop_dis_col_sel; // @[util.scala:104:23]
reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:49:7, :56:21]
reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_tag = slot_uop_br_tag; // @[util.scala:104:23]
reg [3:0] slot_uop_br_type; // @[issue-slot.scala:56:21]
assign io_iss_uop_br_type_0 = slot_uop_br_type; // @[issue-slot.scala:49:7, :56:21]
wire [3:0] next_uop_out_br_type = slot_uop_br_type; // @[util.scala:104:23]
reg slot_uop_is_sfb; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfb = slot_uop_is_sfb; // @[util.scala:104:23]
reg slot_uop_is_fence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fence = slot_uop_is_fence; // @[util.scala:104:23]
reg slot_uop_is_fencei; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_fencei = slot_uop_is_fencei; // @[util.scala:104:23]
reg slot_uop_is_sfence; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sfence_0 = slot_uop_is_sfence; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sfence = slot_uop_is_sfence; // @[util.scala:104:23]
reg slot_uop_is_amo; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_amo = slot_uop_is_amo; // @[util.scala:104:23]
reg slot_uop_is_eret; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_eret_0 = slot_uop_is_eret; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_eret = slot_uop_is_eret; // @[util.scala:104:23]
reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_sys_pc2epc = slot_uop_is_sys_pc2epc; // @[util.scala:104:23]
reg slot_uop_is_rocc; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_rocc_0 = slot_uop_is_rocc; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_rocc = slot_uop_is_rocc; // @[util.scala:104:23]
reg slot_uop_is_mov; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_mov_0 = slot_uop_is_mov; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_mov = slot_uop_is_mov; // @[util.scala:104:23]
reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ftq_idx = slot_uop_ftq_idx; // @[util.scala:104:23]
reg slot_uop_edge_inst; // @[issue-slot.scala:56:21]
assign io_iss_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_edge_inst = slot_uop_edge_inst; // @[util.scala:104:23]
reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:56:21]
assign io_iss_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_pc_lob = slot_uop_pc_lob; // @[util.scala:104:23]
reg slot_uop_taken; // @[issue-slot.scala:56:21]
assign io_iss_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_taken = slot_uop_taken; // @[util.scala:104:23]
reg slot_uop_imm_rename; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_rename_0 = slot_uop_imm_rename; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_imm_rename = slot_uop_imm_rename; // @[util.scala:104:23]
reg [2:0] slot_uop_imm_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_sel_0 = slot_uop_imm_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_imm_sel = slot_uop_imm_sel; // @[util.scala:104:23]
reg [4:0] slot_uop_pimm; // @[issue-slot.scala:56:21]
assign io_iss_uop_pimm_0 = slot_uop_pimm; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_pimm = slot_uop_pimm; // @[util.scala:104:23]
reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:56:21]
assign io_iss_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:49:7, :56:21]
wire [19:0] next_uop_out_imm_packed = slot_uop_imm_packed; // @[util.scala:104:23]
reg [1:0] slot_uop_op1_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op1_sel_0 = slot_uop_op1_sel; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_op1_sel = slot_uop_op1_sel; // @[util.scala:104:23]
reg [2:0] slot_uop_op2_sel; // @[issue-slot.scala:56:21]
assign io_iss_uop_op2_sel_0 = slot_uop_op2_sel; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_op2_sel = slot_uop_op2_sel; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ldst_0 = slot_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ldst = slot_uop_fp_ctrl_ldst; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wen; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wen_0 = slot_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wen = slot_uop_fp_ctrl_wen; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren1_0 = slot_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren1 = slot_uop_fp_ctrl_ren1; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren2_0 = slot_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren2 = slot_uop_fp_ctrl_ren2; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_ren3_0 = slot_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_ren3 = slot_uop_fp_ctrl_ren3; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap12_0 = slot_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap12 = slot_uop_fp_ctrl_swap12; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_swap23_0 = slot_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_swap23 = slot_uop_fp_ctrl_swap23; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagIn_0 = slot_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagIn = slot_uop_fp_ctrl_typeTagIn; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_typeTagOut_0 = slot_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_ctrl_typeTagOut = slot_uop_fp_ctrl_typeTagOut; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fromint_0 = slot_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fromint = slot_uop_fp_ctrl_fromint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_toint; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_toint_0 = slot_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_toint = slot_uop_fp_ctrl_toint; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fastpipe_0 = slot_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fastpipe = slot_uop_fp_ctrl_fastpipe; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_fma; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_fma_0 = slot_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_fma = slot_uop_fp_ctrl_fma; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_div; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_div_0 = slot_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_div = slot_uop_fp_ctrl_div; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_sqrt_0 = slot_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_sqrt = slot_uop_fp_ctrl_sqrt; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_wflags_0 = slot_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_wflags = slot_uop_fp_ctrl_wflags; // @[util.scala:104:23]
reg slot_uop_fp_ctrl_vec; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_ctrl_vec_0 = slot_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_ctrl_vec = slot_uop_fp_ctrl_vec; // @[util.scala:104:23]
reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_rob_idx = slot_uop_rob_idx; // @[util.scala:104:23]
reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ldq_idx = slot_uop_ldq_idx; // @[util.scala:104:23]
reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_stq_idx = slot_uop_stq_idx; // @[util.scala:104:23]
reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:56:21]
assign io_iss_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_rxq_idx = slot_uop_rxq_idx; // @[util.scala:104:23]
reg [6:0] slot_uop_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_pdst = slot_uop_pdst; // @[util.scala:104:23]
reg [6:0] slot_uop_prs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs1 = slot_uop_prs1; // @[util.scala:104:23]
reg [6:0] slot_uop_prs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs2 = slot_uop_prs2; // @[util.scala:104:23]
reg [6:0] slot_uop_prs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_prs3 = slot_uop_prs3; // @[util.scala:104:23]
reg [4:0] slot_uop_ppred; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_ppred = slot_uop_ppred; // @[util.scala:104:23]
reg slot_uop_prs1_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs1_busy = slot_uop_prs1_busy; // @[util.scala:104:23]
reg slot_uop_prs2_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs2_busy = slot_uop_prs2_busy; // @[util.scala:104:23]
reg slot_uop_prs3_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_prs3_busy = slot_uop_prs3_busy; // @[util.scala:104:23]
reg slot_uop_ppred_busy; // @[issue-slot.scala:56:21]
assign io_iss_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ppred_busy = slot_uop_ppred_busy; // @[util.scala:104:23]
wire _iss_ready_T_3 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :136:88]
wire _agen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :137:95]
wire _dgen_ready_T_2 = slot_uop_ppred_busy; // @[issue-slot.scala:56:21, :138:95]
reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:56:21]
assign io_iss_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:49:7, :56:21]
wire [6:0] next_uop_out_stale_pdst = slot_uop_stale_pdst; // @[util.scala:104:23]
reg slot_uop_exception; // @[issue-slot.scala:56:21]
assign io_iss_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_exception = slot_uop_exception; // @[util.scala:104:23]
reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:56:21]
assign io_iss_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:49:7, :56:21]
wire [63:0] next_uop_out_exc_cause = slot_uop_exc_cause; // @[util.scala:104:23]
reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_mem_cmd = slot_uop_mem_cmd; // @[util.scala:104:23]
reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_mem_size = slot_uop_mem_size; // @[util.scala:104:23]
reg slot_uop_mem_signed; // @[issue-slot.scala:56:21]
assign io_iss_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_mem_signed = slot_uop_mem_signed; // @[util.scala:104:23]
reg slot_uop_uses_ldq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_ldq = slot_uop_uses_ldq; // @[util.scala:104:23]
reg slot_uop_uses_stq; // @[issue-slot.scala:56:21]
assign io_iss_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_uses_stq = slot_uop_uses_stq; // @[util.scala:104:23]
reg slot_uop_is_unique; // @[issue-slot.scala:56:21]
assign io_iss_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_is_unique = slot_uop_is_unique; // @[util.scala:104:23]
reg slot_uop_flush_on_commit; // @[issue-slot.scala:56:21]
assign io_iss_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_flush_on_commit = slot_uop_flush_on_commit; // @[util.scala:104:23]
reg [2:0] slot_uop_csr_cmd; // @[issue-slot.scala:56:21]
assign io_iss_uop_csr_cmd_0 = slot_uop_csr_cmd; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_csr_cmd = slot_uop_csr_cmd; // @[util.scala:104:23]
reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_ldst_is_rs1 = slot_uop_ldst_is_rs1; // @[util.scala:104:23]
reg [5:0] slot_uop_ldst; // @[issue-slot.scala:56:21]
assign io_iss_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_ldst = slot_uop_ldst; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs1 = slot_uop_lrs1; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs2 = slot_uop_lrs2; // @[util.scala:104:23]
reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:49:7, :56:21]
wire [5:0] next_uop_out_lrs3 = slot_uop_lrs3; // @[util.scala:104:23]
reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_dst_rtype = slot_uop_dst_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs1_rtype_0 = slot_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs1_rtype = slot_uop_lrs1_rtype; // @[util.scala:104:23]
reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:56:21]
assign io_iss_uop_lrs2_rtype_0 = slot_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_lrs2_rtype = slot_uop_lrs2_rtype; // @[util.scala:104:23]
reg slot_uop_frs3_en; // @[issue-slot.scala:56:21]
assign io_iss_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_frs3_en = slot_uop_frs3_en; // @[util.scala:104:23]
reg slot_uop_fcn_dw; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_dw_0 = slot_uop_fcn_dw; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fcn_dw = slot_uop_fcn_dw; // @[util.scala:104:23]
reg [4:0] slot_uop_fcn_op; // @[issue-slot.scala:56:21]
assign io_iss_uop_fcn_op_0 = slot_uop_fcn_op; // @[issue-slot.scala:49:7, :56:21]
wire [4:0] next_uop_out_fcn_op = slot_uop_fcn_op; // @[util.scala:104:23]
reg slot_uop_fp_val; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_fp_val = slot_uop_fp_val; // @[util.scala:104:23]
reg [2:0] slot_uop_fp_rm; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_rm_0 = slot_uop_fp_rm; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_fp_rm = slot_uop_fp_rm; // @[util.scala:104:23]
reg [1:0] slot_uop_fp_typ; // @[issue-slot.scala:56:21]
assign io_iss_uop_fp_typ_0 = slot_uop_fp_typ; // @[issue-slot.scala:49:7, :56:21]
wire [1:0] next_uop_out_fp_typ = slot_uop_fp_typ; // @[util.scala:104:23]
reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_pf_if = slot_uop_xcpt_pf_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ae_if = slot_uop_xcpt_ae_if; // @[util.scala:104:23]
reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_xcpt_ma_if = slot_uop_xcpt_ma_if; // @[util.scala:104:23]
reg slot_uop_bp_debug_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_debug_if = slot_uop_bp_debug_if; // @[util.scala:104:23]
reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:56:21]
assign io_iss_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :56:21]
wire next_uop_out_bp_xcpt_if = slot_uop_bp_xcpt_if; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_fsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_fsrc = slot_uop_debug_fsrc; // @[util.scala:104:23]
reg [2:0] slot_uop_debug_tsrc; // @[issue-slot.scala:56:21]
assign io_iss_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:49:7, :56:21]
wire [2:0] next_uop_out_debug_tsrc = slot_uop_debug_tsrc; // @[util.scala:104:23]
wire next_valid; // @[issue-slot.scala:58:28]
assign next_uop_inst = next_uop_out_inst; // @[util.scala:104:23]
assign next_uop_debug_inst = next_uop_out_debug_inst; // @[util.scala:104:23]
assign next_uop_is_rvc = next_uop_out_is_rvc; // @[util.scala:104:23]
assign next_uop_debug_pc = next_uop_out_debug_pc; // @[util.scala:104:23]
assign next_uop_iq_type_0 = next_uop_out_iq_type_0; // @[util.scala:104:23]
assign next_uop_iq_type_1 = next_uop_out_iq_type_1; // @[util.scala:104:23]
assign next_uop_iq_type_2 = next_uop_out_iq_type_2; // @[util.scala:104:23]
assign next_uop_iq_type_3 = next_uop_out_iq_type_3; // @[util.scala:104:23]
assign next_uop_fu_code_0 = next_uop_out_fu_code_0; // @[util.scala:104:23]
assign next_uop_fu_code_1 = next_uop_out_fu_code_1; // @[util.scala:104:23]
assign next_uop_fu_code_2 = next_uop_out_fu_code_2; // @[util.scala:104:23]
assign next_uop_fu_code_3 = next_uop_out_fu_code_3; // @[util.scala:104:23]
assign next_uop_fu_code_4 = next_uop_out_fu_code_4; // @[util.scala:104:23]
assign next_uop_fu_code_5 = next_uop_out_fu_code_5; // @[util.scala:104:23]
assign next_uop_fu_code_6 = next_uop_out_fu_code_6; // @[util.scala:104:23]
assign next_uop_fu_code_7 = next_uop_out_fu_code_7; // @[util.scala:104:23]
assign next_uop_fu_code_8 = next_uop_out_fu_code_8; // @[util.scala:104:23]
assign next_uop_fu_code_9 = next_uop_out_fu_code_9; // @[util.scala:104:23]
wire [15:0] _next_uop_out_br_mask_T_1; // @[util.scala:93:25]
assign next_uop_dis_col_sel = next_uop_out_dis_col_sel; // @[util.scala:104:23]
assign next_uop_br_mask = next_uop_out_br_mask; // @[util.scala:104:23]
assign next_uop_br_tag = next_uop_out_br_tag; // @[util.scala:104:23]
assign next_uop_br_type = next_uop_out_br_type; // @[util.scala:104:23]
assign next_uop_is_sfb = next_uop_out_is_sfb; // @[util.scala:104:23]
assign next_uop_is_fence = next_uop_out_is_fence; // @[util.scala:104:23]
assign next_uop_is_fencei = next_uop_out_is_fencei; // @[util.scala:104:23]
assign next_uop_is_sfence = next_uop_out_is_sfence; // @[util.scala:104:23]
assign next_uop_is_amo = next_uop_out_is_amo; // @[util.scala:104:23]
assign next_uop_is_eret = next_uop_out_is_eret; // @[util.scala:104:23]
assign next_uop_is_sys_pc2epc = next_uop_out_is_sys_pc2epc; // @[util.scala:104:23]
assign next_uop_is_rocc = next_uop_out_is_rocc; // @[util.scala:104:23]
assign next_uop_is_mov = next_uop_out_is_mov; // @[util.scala:104:23]
assign next_uop_ftq_idx = next_uop_out_ftq_idx; // @[util.scala:104:23]
assign next_uop_edge_inst = next_uop_out_edge_inst; // @[util.scala:104:23]
assign next_uop_pc_lob = next_uop_out_pc_lob; // @[util.scala:104:23]
assign next_uop_taken = next_uop_out_taken; // @[util.scala:104:23]
assign next_uop_imm_rename = next_uop_out_imm_rename; // @[util.scala:104:23]
assign next_uop_imm_sel = next_uop_out_imm_sel; // @[util.scala:104:23]
assign next_uop_pimm = next_uop_out_pimm; // @[util.scala:104:23]
assign next_uop_imm_packed = next_uop_out_imm_packed; // @[util.scala:104:23]
assign next_uop_op1_sel = next_uop_out_op1_sel; // @[util.scala:104:23]
assign next_uop_op2_sel = next_uop_out_op2_sel; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ldst = next_uop_out_fp_ctrl_ldst; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wen = next_uop_out_fp_ctrl_wen; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren1 = next_uop_out_fp_ctrl_ren1; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren2 = next_uop_out_fp_ctrl_ren2; // @[util.scala:104:23]
assign next_uop_fp_ctrl_ren3 = next_uop_out_fp_ctrl_ren3; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap12 = next_uop_out_fp_ctrl_swap12; // @[util.scala:104:23]
assign next_uop_fp_ctrl_swap23 = next_uop_out_fp_ctrl_swap23; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagIn = next_uop_out_fp_ctrl_typeTagIn; // @[util.scala:104:23]
assign next_uop_fp_ctrl_typeTagOut = next_uop_out_fp_ctrl_typeTagOut; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fromint = next_uop_out_fp_ctrl_fromint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_toint = next_uop_out_fp_ctrl_toint; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fastpipe = next_uop_out_fp_ctrl_fastpipe; // @[util.scala:104:23]
assign next_uop_fp_ctrl_fma = next_uop_out_fp_ctrl_fma; // @[util.scala:104:23]
assign next_uop_fp_ctrl_div = next_uop_out_fp_ctrl_div; // @[util.scala:104:23]
assign next_uop_fp_ctrl_sqrt = next_uop_out_fp_ctrl_sqrt; // @[util.scala:104:23]
assign next_uop_fp_ctrl_wflags = next_uop_out_fp_ctrl_wflags; // @[util.scala:104:23]
assign next_uop_fp_ctrl_vec = next_uop_out_fp_ctrl_vec; // @[util.scala:104:23]
assign next_uop_rob_idx = next_uop_out_rob_idx; // @[util.scala:104:23]
assign next_uop_ldq_idx = next_uop_out_ldq_idx; // @[util.scala:104:23]
assign next_uop_stq_idx = next_uop_out_stq_idx; // @[util.scala:104:23]
assign next_uop_rxq_idx = next_uop_out_rxq_idx; // @[util.scala:104:23]
assign next_uop_pdst = next_uop_out_pdst; // @[util.scala:104:23]
assign next_uop_prs1 = next_uop_out_prs1; // @[util.scala:104:23]
assign next_uop_prs2 = next_uop_out_prs2; // @[util.scala:104:23]
assign next_uop_prs3 = next_uop_out_prs3; // @[util.scala:104:23]
assign next_uop_ppred = next_uop_out_ppred; // @[util.scala:104:23]
assign next_uop_stale_pdst = next_uop_out_stale_pdst; // @[util.scala:104:23]
assign next_uop_exception = next_uop_out_exception; // @[util.scala:104:23]
assign next_uop_exc_cause = next_uop_out_exc_cause; // @[util.scala:104:23]
assign next_uop_mem_cmd = next_uop_out_mem_cmd; // @[util.scala:104:23]
assign next_uop_mem_size = next_uop_out_mem_size; // @[util.scala:104:23]
assign next_uop_mem_signed = next_uop_out_mem_signed; // @[util.scala:104:23]
assign next_uop_uses_ldq = next_uop_out_uses_ldq; // @[util.scala:104:23]
assign next_uop_uses_stq = next_uop_out_uses_stq; // @[util.scala:104:23]
assign next_uop_is_unique = next_uop_out_is_unique; // @[util.scala:104:23]
assign next_uop_flush_on_commit = next_uop_out_flush_on_commit; // @[util.scala:104:23]
assign next_uop_csr_cmd = next_uop_out_csr_cmd; // @[util.scala:104:23]
assign next_uop_ldst_is_rs1 = next_uop_out_ldst_is_rs1; // @[util.scala:104:23]
assign next_uop_ldst = next_uop_out_ldst; // @[util.scala:104:23]
assign next_uop_lrs1 = next_uop_out_lrs1; // @[util.scala:104:23]
assign next_uop_lrs2 = next_uop_out_lrs2; // @[util.scala:104:23]
assign next_uop_lrs3 = next_uop_out_lrs3; // @[util.scala:104:23]
assign next_uop_dst_rtype = next_uop_out_dst_rtype; // @[util.scala:104:23]
assign next_uop_lrs1_rtype = next_uop_out_lrs1_rtype; // @[util.scala:104:23]
assign next_uop_lrs2_rtype = next_uop_out_lrs2_rtype; // @[util.scala:104:23]
assign next_uop_frs3_en = next_uop_out_frs3_en; // @[util.scala:104:23]
assign next_uop_fcn_dw = next_uop_out_fcn_dw; // @[util.scala:104:23]
assign next_uop_fcn_op = next_uop_out_fcn_op; // @[util.scala:104:23]
assign next_uop_fp_val = next_uop_out_fp_val; // @[util.scala:104:23]
assign next_uop_fp_rm = next_uop_out_fp_rm; // @[util.scala:104:23]
assign next_uop_fp_typ = next_uop_out_fp_typ; // @[util.scala:104:23]
assign next_uop_xcpt_pf_if = next_uop_out_xcpt_pf_if; // @[util.scala:104:23]
assign next_uop_xcpt_ae_if = next_uop_out_xcpt_ae_if; // @[util.scala:104:23]
assign next_uop_xcpt_ma_if = next_uop_out_xcpt_ma_if; // @[util.scala:104:23]
assign next_uop_bp_debug_if = next_uop_out_bp_debug_if; // @[util.scala:104:23]
assign next_uop_bp_xcpt_if = next_uop_out_bp_xcpt_if; // @[util.scala:104:23]
assign next_uop_debug_fsrc = next_uop_out_debug_fsrc; // @[util.scala:104:23]
assign next_uop_debug_tsrc = next_uop_out_debug_tsrc; // @[util.scala:104:23]
wire [15:0] _next_uop_out_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:93:27]
assign _next_uop_out_br_mask_T_1 = slot_uop_br_mask & _next_uop_out_br_mask_T; // @[util.scala:93:{25,27}]
assign next_uop_out_br_mask = _next_uop_out_br_mask_T_1; // @[util.scala:93:25, :104:23]
assign io_out_uop_inst_0 = next_uop_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_inst_0 = next_uop_debug_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rvc_0 = next_uop_is_rvc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_pc_0 = next_uop_debug_pc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_0_0 = next_uop_iq_type_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_1_0 = next_uop_iq_type_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_2_0 = next_uop_iq_type_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iq_type_3_0 = next_uop_iq_type_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_0_0 = next_uop_fu_code_0; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_1_0 = next_uop_fu_code_1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_2_0 = next_uop_fu_code_2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_3_0 = next_uop_fu_code_3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_4_0 = next_uop_fu_code_4; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_5_0 = next_uop_fu_code_5; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_6_0 = next_uop_fu_code_6; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_7_0 = next_uop_fu_code_7; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_8_0 = next_uop_fu_code_8; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fu_code_9_0 = next_uop_fu_code_9; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_issued_0 = next_uop_iw_issued; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_speculative_child_0 = next_uop_iw_p1_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_speculative_child_0 = next_uop_iw_p2_speculative_child; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p1_bypass_hint_0 = next_uop_iw_p1_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p2_bypass_hint_0 = next_uop_iw_p2_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_iw_p3_bypass_hint_0 = next_uop_iw_p3_bypass_hint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dis_col_sel_0 = next_uop_dis_col_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_mask_0 = next_uop_br_mask; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_tag_0 = next_uop_br_tag; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_br_type_0 = next_uop_br_type; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfb_0 = next_uop_is_sfb; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fence_0 = next_uop_is_fence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_fencei_0 = next_uop_is_fencei; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sfence_0 = next_uop_is_sfence; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_amo_0 = next_uop_is_amo; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_eret_0 = next_uop_is_eret; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_sys_pc2epc_0 = next_uop_is_sys_pc2epc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_rocc_0 = next_uop_is_rocc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_mov_0 = next_uop_is_mov; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ftq_idx_0 = next_uop_ftq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_edge_inst_0 = next_uop_edge_inst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pc_lob_0 = next_uop_pc_lob; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_taken_0 = next_uop_taken; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_rename_0 = next_uop_imm_rename; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_sel_0 = next_uop_imm_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pimm_0 = next_uop_pimm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_imm_packed_0 = next_uop_imm_packed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op1_sel_0 = next_uop_op1_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_op2_sel_0 = next_uop_op2_sel; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ldst_0 = next_uop_fp_ctrl_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wen_0 = next_uop_fp_ctrl_wen; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren1_0 = next_uop_fp_ctrl_ren1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren2_0 = next_uop_fp_ctrl_ren2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_ren3_0 = next_uop_fp_ctrl_ren3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap12_0 = next_uop_fp_ctrl_swap12; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_swap23_0 = next_uop_fp_ctrl_swap23; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagIn_0 = next_uop_fp_ctrl_typeTagIn; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_typeTagOut_0 = next_uop_fp_ctrl_typeTagOut; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fromint_0 = next_uop_fp_ctrl_fromint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_toint_0 = next_uop_fp_ctrl_toint; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fastpipe_0 = next_uop_fp_ctrl_fastpipe; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_fma_0 = next_uop_fp_ctrl_fma; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_div_0 = next_uop_fp_ctrl_div; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_sqrt_0 = next_uop_fp_ctrl_sqrt; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_wflags_0 = next_uop_fp_ctrl_wflags; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_ctrl_vec_0 = next_uop_fp_ctrl_vec; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rob_idx_0 = next_uop_rob_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldq_idx_0 = next_uop_ldq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stq_idx_0 = next_uop_stq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_rxq_idx_0 = next_uop_rxq_idx; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_pdst_0 = next_uop_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_0 = next_uop_prs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_0 = next_uop_prs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_0 = next_uop_prs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_0 = next_uop_ppred; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs1_busy_0 = next_uop_prs1_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs2_busy_0 = next_uop_prs2_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_prs3_busy_0 = next_uop_prs3_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ppred_busy_0 = next_uop_ppred_busy; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_stale_pdst_0 = next_uop_stale_pdst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exception_0 = next_uop_exception; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_exc_cause_0 = next_uop_exc_cause; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_cmd_0 = next_uop_mem_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_size_0 = next_uop_mem_size; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_mem_signed_0 = next_uop_mem_signed; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_ldq_0 = next_uop_uses_ldq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_uses_stq_0 = next_uop_uses_stq; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_is_unique_0 = next_uop_is_unique; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_flush_on_commit_0 = next_uop_flush_on_commit; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_csr_cmd_0 = next_uop_csr_cmd; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_is_rs1_0 = next_uop_ldst_is_rs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_ldst_0 = next_uop_ldst; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_0 = next_uop_lrs1; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_0 = next_uop_lrs2; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs3_0 = next_uop_lrs3; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_dst_rtype_0 = next_uop_dst_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs1_rtype_0 = next_uop_lrs1_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_lrs2_rtype_0 = next_uop_lrs2_rtype; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_frs3_en_0 = next_uop_frs3_en; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_dw_0 = next_uop_fcn_dw; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fcn_op_0 = next_uop_fcn_op; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_val_0 = next_uop_fp_val; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_rm_0 = next_uop_fp_rm; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_fp_typ_0 = next_uop_fp_typ; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_pf_if_0 = next_uop_xcpt_pf_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ae_if_0 = next_uop_xcpt_ae_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_xcpt_ma_if_0 = next_uop_xcpt_ma_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_debug_if_0 = next_uop_bp_debug_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_bp_xcpt_if_0 = next_uop_bp_xcpt_if; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_fsrc_0 = next_uop_debug_fsrc; // @[issue-slot.scala:49:7, :59:28]
assign io_out_uop_debug_tsrc_0 = next_uop_debug_tsrc; // @[issue-slot.scala:49:7, :59:28]
wire [15:0] _killed_T = io_brupdate_b1_mispredict_mask_0 & slot_uop_br_mask; // @[util.scala:126:51]
wire _killed_T_1 = |_killed_T; // @[util.scala:126:{51,59}]
wire killed = _killed_T_1 | io_kill_0; // @[util.scala:61:61, :126:59]
wire _io_will_be_valid_T = ~killed; // @[util.scala:61:61]
assign _io_will_be_valid_T_1 = next_valid & _io_will_be_valid_T; // @[issue-slot.scala:58:28, :65:{34,37}]
assign io_will_be_valid_0 = _io_will_be_valid_T_1; // @[issue-slot.scala:49:7, :65:34]
wire _slot_valid_T = ~killed; // @[util.scala:61:61]
wire _slot_valid_T_1 = next_valid & _slot_valid_T; // @[issue-slot.scala:58:28, :74:{30,33}] |
Generate the Verilog code corresponding to the following Chisel files.
File MulRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to
Chisel by Andrew Waterman).
Copyright 2019, 2020 The Regents of the University of California. All rights
reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
import consts._
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1))
})
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf)
val notNaN_isInfOut = io.a.isInf || io.b.isInf
val notNaN_isZeroOut = io.a.isZero || io.b.isZero
val notNaN_signOut = io.a.sign ^ io.b.sign
val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S
val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0)
/*------------------------------------------------------------------------
*------------------------------------------------------------------------*/
io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc
io.rawOut.isInf := notNaN_isInfOut
io.rawOut.isZero := notNaN_isZeroOut
io.rawOut.sExp := common_sExpOut
io.rawOut.isNaN := io.a.isNaN || io.b.isNaN
io.rawOut.sign := notNaN_signOut
io.rawOut.sig := common_sigOut
}
class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(new RawFloat(expWidth, sigWidth))
val b = Input(new RawFloat(expWidth, sigWidth))
val invalidExc = Output(Bool())
val rawOut = Output(new RawFloat(expWidth, sigWidth + 2))
})
val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth))
mulFullRaw.io.a := io.a
mulFullRaw.io.b := io.b
io.invalidExc := mulFullRaw.io.invalidExc
io.rawOut := mulFullRaw.io.rawOut
io.rawOut.sig := {
val sig = mulFullRaw.io.rawOut.sig
Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR)
}
}
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule
{
val io = IO(new Bundle {
val a = Input(UInt((expWidth + sigWidth + 1).W))
val b = Input(UInt((expWidth + sigWidth + 1).W))
val roundingMode = Input(UInt(3.W))
val detectTininess = Input(Bool())
val out = Output(UInt((expWidth + sigWidth + 1).W))
val exceptionFlags = Output(UInt(5.W))
})
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val mulRawFN = Module(new MulRawFN(expWidth, sigWidth))
mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a)
mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b)
//------------------------------------------------------------------------
//------------------------------------------------------------------------
val roundRawFNToRecFN =
Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0))
roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc
roundRawFNToRecFN.io.infiniteExc := false.B
roundRawFNToRecFN.io.in := mulRawFN.io.rawOut
roundRawFNToRecFN.io.roundingMode := io.roundingMode
roundRawFNToRecFN.io.detectTininess := io.detectTininess
io.out := roundRawFNToRecFN.io.out
io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags
}
File rawFloatFromRecFN.scala:
/*============================================================================
This Chisel source file is part of a pre-release version of the HardFloat IEEE
Floating-Point Arithmetic Package, by John R. Hauser (with some contributions
from Yunsup Lee and Andrew Waterman, mainly concerning testing).
Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the
University of California. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions, and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions, and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the University nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=============================================================================*/
package hardfloat
import chisel3._
import chisel3.util._
/*----------------------------------------------------------------------------
| In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be
| set.
*----------------------------------------------------------------------------*/
object rawFloatFromRecFN
{
def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat =
{
val exp = in(expWidth + sigWidth - 1, sigWidth - 1)
val isZero = exp(expWidth, expWidth - 2) === 0.U
val isSpecial = exp(expWidth, expWidth - 1) === 3.U
val out = Wire(new RawFloat(expWidth, sigWidth))
out.isNaN := isSpecial && exp(expWidth - 2)
out.isInf := isSpecial && ! exp(expWidth - 2)
out.isZero := isZero
out.sign := in(expWidth + sigWidth)
out.sExp := exp.zext
out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0)
out
}
}
| module MulRecFN_42( // @[MulRecFN.scala:100:7]
input [32:0] io_a, // @[MulRecFN.scala:102:16]
input [32:0] io_b, // @[MulRecFN.scala:102:16]
output [32:0] io_out // @[MulRecFN.scala:102:16]
);
wire _mulRawFN_io_invalidExc; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_isNaN; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_isInf; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_isZero; // @[MulRecFN.scala:113:26]
wire _mulRawFN_io_rawOut_sign; // @[MulRecFN.scala:113:26]
wire [9:0] _mulRawFN_io_rawOut_sExp; // @[MulRecFN.scala:113:26]
wire [26:0] _mulRawFN_io_rawOut_sig; // @[MulRecFN.scala:113:26]
wire [32:0] io_a_0 = io_a; // @[MulRecFN.scala:100:7]
wire [32:0] io_b_0 = io_b; // @[MulRecFN.scala:100:7]
wire io_detectTininess = 1'h1; // @[MulRecFN.scala:100:7, :102:16, :121:15]
wire [2:0] io_roundingMode = 3'h0; // @[MulRecFN.scala:100:7, :102:16, :121:15]
wire [32:0] io_out_0; // @[MulRecFN.scala:100:7]
wire [4:0] io_exceptionFlags; // @[MulRecFN.scala:100:7]
wire [8:0] mulRawFN_io_a_exp = io_a_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _mulRawFN_io_a_isZero_T = mulRawFN_io_a_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire mulRawFN_io_a_isZero = _mulRawFN_io_a_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire mulRawFN_io_a_out_isZero = mulRawFN_io_a_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _mulRawFN_io_a_isSpecial_T = mulRawFN_io_a_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire mulRawFN_io_a_isSpecial = &_mulRawFN_io_a_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _mulRawFN_io_a_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _mulRawFN_io_a_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _mulRawFN_io_a_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _mulRawFN_io_a_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _mulRawFN_io_a_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire mulRawFN_io_a_out_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_a_out_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_a_out_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] mulRawFN_io_a_out_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] mulRawFN_io_a_out_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _mulRawFN_io_a_out_isNaN_T = mulRawFN_io_a_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _mulRawFN_io_a_out_isInf_T = mulRawFN_io_a_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _mulRawFN_io_a_out_isNaN_T_1 = mulRawFN_io_a_isSpecial & _mulRawFN_io_a_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign mulRawFN_io_a_out_isNaN = _mulRawFN_io_a_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _mulRawFN_io_a_out_isInf_T_1 = ~_mulRawFN_io_a_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _mulRawFN_io_a_out_isInf_T_2 = mulRawFN_io_a_isSpecial & _mulRawFN_io_a_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign mulRawFN_io_a_out_isInf = _mulRawFN_io_a_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _mulRawFN_io_a_out_sign_T = io_a_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign mulRawFN_io_a_out_sign = _mulRawFN_io_a_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _mulRawFN_io_a_out_sExp_T = {1'h0, mulRawFN_io_a_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign mulRawFN_io_a_out_sExp = _mulRawFN_io_a_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _mulRawFN_io_a_out_sig_T = ~mulRawFN_io_a_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _mulRawFN_io_a_out_sig_T_1 = {1'h0, _mulRawFN_io_a_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _mulRawFN_io_a_out_sig_T_2 = io_a_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _mulRawFN_io_a_out_sig_T_3 = {_mulRawFN_io_a_out_sig_T_1, _mulRawFN_io_a_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign mulRawFN_io_a_out_sig = _mulRawFN_io_a_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
wire [8:0] mulRawFN_io_b_exp = io_b_0[31:23]; // @[rawFloatFromRecFN.scala:51:21]
wire [2:0] _mulRawFN_io_b_isZero_T = mulRawFN_io_b_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28]
wire mulRawFN_io_b_isZero = _mulRawFN_io_b_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}]
wire mulRawFN_io_b_out_isZero = mulRawFN_io_b_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23]
wire [1:0] _mulRawFN_io_b_isSpecial_T = mulRawFN_io_b_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28]
wire mulRawFN_io_b_isSpecial = &_mulRawFN_io_b_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}]
wire _mulRawFN_io_b_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33]
wire _mulRawFN_io_b_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33]
wire _mulRawFN_io_b_out_sign_T; // @[rawFloatFromRecFN.scala:59:25]
wire [9:0] _mulRawFN_io_b_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27]
wire [24:0] _mulRawFN_io_b_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44]
wire mulRawFN_io_b_out_isNaN; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_b_out_isInf; // @[rawFloatFromRecFN.scala:55:23]
wire mulRawFN_io_b_out_sign; // @[rawFloatFromRecFN.scala:55:23]
wire [9:0] mulRawFN_io_b_out_sExp; // @[rawFloatFromRecFN.scala:55:23]
wire [24:0] mulRawFN_io_b_out_sig; // @[rawFloatFromRecFN.scala:55:23]
wire _mulRawFN_io_b_out_isNaN_T = mulRawFN_io_b_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41]
wire _mulRawFN_io_b_out_isInf_T = mulRawFN_io_b_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41]
assign _mulRawFN_io_b_out_isNaN_T_1 = mulRawFN_io_b_isSpecial & _mulRawFN_io_b_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}]
assign mulRawFN_io_b_out_isNaN = _mulRawFN_io_b_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33]
wire _mulRawFN_io_b_out_isInf_T_1 = ~_mulRawFN_io_b_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}]
assign _mulRawFN_io_b_out_isInf_T_2 = mulRawFN_io_b_isSpecial & _mulRawFN_io_b_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}]
assign mulRawFN_io_b_out_isInf = _mulRawFN_io_b_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33]
assign _mulRawFN_io_b_out_sign_T = io_b_0[32]; // @[rawFloatFromRecFN.scala:59:25]
assign mulRawFN_io_b_out_sign = _mulRawFN_io_b_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25]
assign _mulRawFN_io_b_out_sExp_T = {1'h0, mulRawFN_io_b_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27]
assign mulRawFN_io_b_out_sExp = _mulRawFN_io_b_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27]
wire _mulRawFN_io_b_out_sig_T = ~mulRawFN_io_b_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35]
wire [1:0] _mulRawFN_io_b_out_sig_T_1 = {1'h0, _mulRawFN_io_b_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}]
wire [22:0] _mulRawFN_io_b_out_sig_T_2 = io_b_0[22:0]; // @[rawFloatFromRecFN.scala:61:49]
assign _mulRawFN_io_b_out_sig_T_3 = {_mulRawFN_io_b_out_sig_T_1, _mulRawFN_io_b_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}]
assign mulRawFN_io_b_out_sig = _mulRawFN_io_b_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44]
MulRawFN_42 mulRawFN ( // @[MulRecFN.scala:113:26]
.io_a_isNaN (mulRawFN_io_a_out_isNaN), // @[rawFloatFromRecFN.scala:55:23]
.io_a_isInf (mulRawFN_io_a_out_isInf), // @[rawFloatFromRecFN.scala:55:23]
.io_a_isZero (mulRawFN_io_a_out_isZero), // @[rawFloatFromRecFN.scala:55:23]
.io_a_sign (mulRawFN_io_a_out_sign), // @[rawFloatFromRecFN.scala:55:23]
.io_a_sExp (mulRawFN_io_a_out_sExp), // @[rawFloatFromRecFN.scala:55:23]
.io_a_sig (mulRawFN_io_a_out_sig), // @[rawFloatFromRecFN.scala:55:23]
.io_b_isNaN (mulRawFN_io_b_out_isNaN), // @[rawFloatFromRecFN.scala:55:23]
.io_b_isInf (mulRawFN_io_b_out_isInf), // @[rawFloatFromRecFN.scala:55:23]
.io_b_isZero (mulRawFN_io_b_out_isZero), // @[rawFloatFromRecFN.scala:55:23]
.io_b_sign (mulRawFN_io_b_out_sign), // @[rawFloatFromRecFN.scala:55:23]
.io_b_sExp (mulRawFN_io_b_out_sExp), // @[rawFloatFromRecFN.scala:55:23]
.io_b_sig (mulRawFN_io_b_out_sig), // @[rawFloatFromRecFN.scala:55:23]
.io_invalidExc (_mulRawFN_io_invalidExc),
.io_rawOut_isNaN (_mulRawFN_io_rawOut_isNaN),
.io_rawOut_isInf (_mulRawFN_io_rawOut_isInf),
.io_rawOut_isZero (_mulRawFN_io_rawOut_isZero),
.io_rawOut_sign (_mulRawFN_io_rawOut_sign),
.io_rawOut_sExp (_mulRawFN_io_rawOut_sExp),
.io_rawOut_sig (_mulRawFN_io_rawOut_sig)
); // @[MulRecFN.scala:113:26]
RoundRawFNToRecFN_e8_s24_116 roundRawFNToRecFN ( // @[MulRecFN.scala:121:15]
.io_invalidExc (_mulRawFN_io_invalidExc), // @[MulRecFN.scala:113:26]
.io_in_isNaN (_mulRawFN_io_rawOut_isNaN), // @[MulRecFN.scala:113:26]
.io_in_isInf (_mulRawFN_io_rawOut_isInf), // @[MulRecFN.scala:113:26]
.io_in_isZero (_mulRawFN_io_rawOut_isZero), // @[MulRecFN.scala:113:26]
.io_in_sign (_mulRawFN_io_rawOut_sign), // @[MulRecFN.scala:113:26]
.io_in_sExp (_mulRawFN_io_rawOut_sExp), // @[MulRecFN.scala:113:26]
.io_in_sig (_mulRawFN_io_rawOut_sig), // @[MulRecFN.scala:113:26]
.io_out (io_out_0),
.io_exceptionFlags (io_exceptionFlags)
); // @[MulRecFN.scala:121:15]
assign io_out = io_out_0; // @[MulRecFN.scala:100:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File Nodes.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import org.chipsalliance.diplomacy.nodes._
import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection}
case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args))
object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle]
{
def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo)
def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo)
def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle)
def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle)
def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString)
override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = {
val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge)))
monitor.io.in := bundle
}
override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters =
pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })
override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters =
pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })
}
trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut]
case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode
case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode
case class TLAdapterNode(
clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s },
managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLJunctionNode(
clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters],
managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])(
implicit valName: ValName)
extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode
case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode
object TLNameNode {
def apply(name: ValName) = TLIdentityNode()(name)
def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLIdentityNode = apply(Some(name))
}
case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)()
object TLTempNode {
def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp"))
}
case class TLNexusNode(
clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters,
managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)(
implicit valName: ValName)
extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode
abstract class TLCustomNode(implicit valName: ValName)
extends CustomNode(TLImp) with TLFormatNode
// Asynchronous crossings
trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters]
object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle]
{
def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle)
def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString)
override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLAsyncAdapterNode(
clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s },
managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode
case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode
object TLAsyncNameNode {
def apply(name: ValName) = TLAsyncIdentityNode()(name)
def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLAsyncIdentityNode = apply(Some(name))
}
case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLAsyncImp)(
dFn = { p => TLAsyncClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain
case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName)
extends MixedAdapterNode(TLAsyncImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) },
uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut]
// Rationally related crossings
trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters]
object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle]
{
def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle)
def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */)
override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLRationalAdapterNode(
clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s },
managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode
case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode
object TLRationalNameNode {
def apply(name: ValName) = TLRationalIdentityNode()(name)
def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLRationalIdentityNode = apply(Some(name))
}
case class TLRationalSourceNode()(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLRationalImp)(
dFn = { p => TLRationalClientPortParameters(p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain
case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName)
extends MixedAdapterNode(TLRationalImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut]
// Credited version of TileLink channels
trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters]
object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle]
{
def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo)
def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle)
def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString)
override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters =
pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }))
override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters =
pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }))
}
case class TLCreditedAdapterNode(
clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s },
managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })(
implicit valName: ValName)
extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode
case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode
object TLCreditedNameNode {
def apply(name: ValName) = TLCreditedIdentityNode()(name)
def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name")))
def apply(name: String): TLCreditedIdentityNode = apply(Some(name))
}
case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLImp, TLCreditedImp)(
dFn = { p => TLCreditedClientPortParameters(delay, p) },
uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain
case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName)
extends MixedAdapterNode(TLCreditedImp, TLImp)(
dFn = { p => p.base.v1copy(minLatency = 1) },
uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut]
File WidthWidget.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.diplomacy.AddressSet
import freechips.rocketchip.util.{Repeater, UIntToOH1}
// innBeatBytes => the new client-facing bus width
class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule
{
private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes
val node = new TLAdapterNode(
clientFn = { case c => c },
managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){
override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired)
}
override lazy val desiredName = s"TLWidthWidget$innerBeatBytes"
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = outBytes / inBytes
val keepBits = log2Ceil(outBytes)
val dropBits = log2Ceil(inBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR }
val corrupt_reg = RegInit(false.B)
val corrupt_in = edgeIn.corrupt(in.bits)
val corrupt_out = corrupt_in || corrupt_reg
when (in.fire) {
count := count + 1.U
corrupt_reg := corrupt_out
when (last) {
count := 0.U
corrupt_reg := false.B
}
}
def helper(idata: UInt): UInt = {
// rdata is X until the first time a multi-beat write occurs.
// Prevent the X from leaking outside by jamming the mux control until
// the first time rdata is written (and hence no longer X).
val rdata_written_once = RegInit(false.B)
val masked_enable = enable.map(_ || !rdata_written_once)
val odata = Seq.fill(ratio) { WireInit(idata) }
val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata)))
val pdata = rdata :+ idata
val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) }
when (in.fire && !last) {
rdata_written_once := true.B
(rdata zip mdata) foreach { case (r, m) => r := m }
}
Cat(mdata.reverse)
}
in.ready := out.ready || !last
out.valid := in.valid && last
out.bits := in.bits
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits)))
edgeOut.corrupt(out.bits) := corrupt_out
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W))
case (o: TLBundleC, i: TLBundleC) => ()
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossible bundle combination in WidthWidget")
}
}
def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
val inBytes = edgeIn.manager.beatBytes
val outBytes = edgeOut.manager.beatBytes
val ratio = inBytes / outBytes
val keepBits = log2Ceil(inBytes)
val dropBits = log2Ceil(outBytes)
val countBits = log2Ceil(ratio)
val size = edgeIn.size(in.bits)
val hasData = edgeIn.hasData(in.bits)
val limit = UIntToOH1(size, keepBits) >> dropBits
val count = RegInit(0.U(countBits.W))
val first = count === 0.U
val last = count === limit || !hasData
when (out.fire) {
count := count + 1.U
when (last) { count := 0.U }
}
// For sub-beat transfer, extract which part matters
val sel = in.bits match {
case a: TLBundleA => a.address(keepBits-1, dropBits)
case b: TLBundleB => b.address(keepBits-1, dropBits)
case c: TLBundleC => c.address(keepBits-1, dropBits)
case d: TLBundleD => {
val sel = sourceMap(d.source)
val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer
hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway
}
}
val index = sel | count
def helper(idata: UInt, width: Int): UInt = {
val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) }
mux(index)
}
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
// Don't put down hardware if we never carry data
edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8))
(out.bits, in.bits) match {
case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1)
case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1)
case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok
case (o: TLBundleD, i: TLBundleD) => ()
case _ => require(false, "Impossbile bundle combination in WidthWidget")
}
// Repeat the input if we're not last
!last
}
def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = {
if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) {
// nothing to do; pass it through
out.bits := in.bits
out.valid := in.valid
in.ready := out.ready
} else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) {
// split input to output
val repeat = Wire(Bool())
val repeated = Repeater(in, repeat)
val cated = Wire(chiselTypeOf(repeated))
cated <> repeated
edgeIn.data(cated.bits) := Cat(
edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8),
edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0))
repeat := split(edgeIn, cated, edgeOut, out, sourceMap)
} else {
// merge input to output
merge(edgeIn, in, edgeOut, out)
}
}
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
// If the master is narrower than the slave, the D channel must be narrowed.
// This is tricky, because the D channel has no address data.
// Thus, you don't know which part of a sub-beat transfer to extract.
// To fix this, we record the relevant address bits for all sources.
// The assumption is that this sort of situation happens only where
// you connect a narrow master to the system bus, so there are few sources.
def sourceMap(source_bits: UInt) = {
val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits
require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes)
val keepBits = log2Ceil(edgeOut.manager.beatBytes)
val dropBits = log2Ceil(edgeIn.manager.beatBytes)
val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W)))
val a_sel = in.a.bits.address(keepBits-1, dropBits)
when (in.a.fire) {
if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning
sources(0) := a_sel
} else {
sources(in.a.bits.source) := a_sel
}
}
// depopulate unused source registers:
edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U }
val bypass = in.a.valid && in.a.bits.source === source
if (edgeIn.manager.minLatency > 0) sources(source)
else Mux(bypass, a_sel, sources(source))
}
splice(edgeIn, in.a, edgeOut, out.a, sourceMap)
splice(edgeOut, out.d, edgeIn, in.d, sourceMap)
if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) {
splice(edgeOut, out.b, edgeIn, in.b, sourceMap)
splice(edgeIn, in.c, edgeOut, out.c, sourceMap)
out.e.valid := in.e.valid
out.e.bits := in.e.bits
in.e.ready := out.e.ready
} else {
in.b.valid := false.B
in.c.ready := true.B
in.e.ready := true.B
out.b.ready := true.B
out.c.valid := false.B
out.e.valid := false.B
}
}
}
}
object TLWidthWidget
{
def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode =
{
val widget = LazyModule(new TLWidthWidget(innerBeatBytes))
widget.node
}
def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes)
}
// Synthesizable unit tests
import freechips.rocketchip.unittest._
class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule {
val fuzz = LazyModule(new TLFuzzer(txns))
val model = LazyModule(new TLRAMModel("WidthWidget"))
val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff)))
(ram.node
:= TLDelayer(0.1)
:= TLFragmenter(4, 256)
:= TLWidthWidget(second)
:= TLWidthWidget(first)
:= TLDelayer(0.1)
:= model.node
:= fuzz.node)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) with UnitTestModule {
io.finished := fuzz.module.io.finished
}
}
class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) {
val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module)
dut.io.start := DontCare
io.finished := dut.io.finished
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File Repeater.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{Decoupled, DecoupledIO}
// A Repeater passes its input to its output, unless repeat is asserted.
// When repeat is asserted, the Repeater copies the input and repeats it next cycle.
class Repeater[T <: Data](gen: T) extends Module
{
override def desiredName = s"Repeater_${gen.typeName}"
val io = IO( new Bundle {
val repeat = Input(Bool())
val full = Output(Bool())
val enq = Flipped(Decoupled(gen.cloneType))
val deq = Decoupled(gen.cloneType)
} )
val full = RegInit(false.B)
val saved = Reg(gen.cloneType)
// When !full, a repeater is pass-through
io.deq.valid := io.enq.valid || full
io.enq.ready := io.deq.ready && !full
io.deq.bits := Mux(full, saved, io.enq.bits)
io.full := full
when (io.enq.fire && io.repeat) { full := true.B; saved := io.enq.bits }
when (io.deq.fire && !io.repeat) { full := false.B }
}
object Repeater
{
def apply[T <: Data](enq: DecoupledIO[T], repeat: Bool): DecoupledIO[T] = {
val repeater = Module(new Repeater(chiselTypeOf(enq.bits)))
repeater.io.repeat := repeat
repeater.io.enq <> enq
repeater.io.deq
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module TLWidthWidget4( // @[WidthWidget.scala:27:9]
input clock, // @[WidthWidget.scala:27:9]
input reset, // @[WidthWidget.scala:27:9]
output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25]
input [28:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25]
input [31:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [1:0] auto_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25]
output [31:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_a_ready, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_valid, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25]
output [2:0] auto_anon_out_a_bits_param, // @[LazyModuleImp.scala:107:25]
output [3:0] auto_anon_out_a_bits_size, // @[LazyModuleImp.scala:107:25]
output [5:0] auto_anon_out_a_bits_source, // @[LazyModuleImp.scala:107:25]
output [28:0] auto_anon_out_a_bits_address, // @[LazyModuleImp.scala:107:25]
output [7:0] auto_anon_out_a_bits_mask, // @[LazyModuleImp.scala:107:25]
output [63:0] auto_anon_out_a_bits_data, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25]
output auto_anon_out_d_ready, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_valid, // @[LazyModuleImp.scala:107:25]
input [2:0] auto_anon_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25]
input [1:0] auto_anon_out_d_bits_param, // @[LazyModuleImp.scala:107:25]
input [3:0] auto_anon_out_d_bits_size, // @[LazyModuleImp.scala:107:25]
input [5:0] auto_anon_out_d_bits_source, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_sink, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_denied, // @[LazyModuleImp.scala:107:25]
input [63:0] auto_anon_out_d_bits_data, // @[LazyModuleImp.scala:107:25]
input auto_anon_out_d_bits_corrupt // @[LazyModuleImp.scala:107:25]
);
wire [63:0] _repeated_repeater_io_deq_bits_data; // @[Repeater.scala:36:26]
wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_in_a_bits_param_0 = auto_anon_in_a_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_in_a_bits_size_0 = auto_anon_in_a_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] auto_anon_in_a_bits_source_0 = auto_anon_in_a_bits_source; // @[WidthWidget.scala:27:9]
wire [28:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_in_a_bits_mask_0 = auto_anon_in_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [31:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[WidthWidget.scala:27:9]
wire auto_anon_in_a_bits_corrupt_0 = auto_anon_in_a_bits_corrupt; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[WidthWidget.scala:27:9]
wire auto_anon_out_a_ready_0 = auto_anon_out_a_ready; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_valid_0 = auto_anon_out_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_out_d_bits_opcode_0 = auto_anon_out_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] auto_anon_out_d_bits_param_0 = auto_anon_out_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_out_d_bits_size_0 = auto_anon_out_d_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] auto_anon_out_d_bits_source_0 = auto_anon_out_d_bits_source; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_bits_sink_0 = auto_anon_out_d_bits_sink; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_bits_denied_0 = auto_anon_out_d_bits_denied; // @[WidthWidget.scala:27:9]
wire [63:0] auto_anon_out_d_bits_data_0 = auto_anon_out_d_bits_data; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_bits_corrupt_0 = auto_anon_out_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire [7:0] _anonOut_a_bits_mask_T_5 = 8'hFF; // @[WidthWidget.scala:85:119]
wire anonIn_a_ready; // @[MixedNode.scala:551:17]
wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[WidthWidget.scala:27:9]
wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [2:0] anonIn_a_bits_param = auto_anon_in_a_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] anonIn_a_bits_size = auto_anon_in_a_bits_size_0; // @[WidthWidget.scala:27:9]
wire [5:0] anonIn_a_bits_source = auto_anon_in_a_bits_source_0; // @[WidthWidget.scala:27:9]
wire [28:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[WidthWidget.scala:27:9]
wire [3:0] anonIn_a_bits_mask = auto_anon_in_a_bits_mask_0; // @[WidthWidget.scala:27:9]
wire [31:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[WidthWidget.scala:27:9]
wire anonIn_a_bits_corrupt = auto_anon_in_a_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[WidthWidget.scala:27:9]
wire anonIn_d_valid; // @[MixedNode.scala:551:17]
wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17]
wire [1:0] anonIn_d_bits_param; // @[MixedNode.scala:551:17]
wire [3:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17]
wire [5:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_sink; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_denied; // @[MixedNode.scala:551:17]
wire [31:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17]
wire anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17]
wire anonOut_a_ready = auto_anon_out_a_ready_0; // @[WidthWidget.scala:27:9]
wire anonOut_a_valid; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17]
wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17]
wire [3:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17]
wire [5:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17]
wire [28:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17]
wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17]
wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17]
wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17]
wire anonOut_d_ready; // @[MixedNode.scala:542:17]
wire anonOut_d_valid = auto_anon_out_d_valid_0; // @[WidthWidget.scala:27:9]
wire [2:0] anonOut_d_bits_opcode = auto_anon_out_d_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [1:0] anonOut_d_bits_param = auto_anon_out_d_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] anonOut_d_bits_size = auto_anon_out_d_bits_size_0; // @[WidthWidget.scala:27:9]
wire [5:0] anonOut_d_bits_source = auto_anon_out_d_bits_source_0; // @[WidthWidget.scala:27:9]
wire anonOut_d_bits_sink = auto_anon_out_d_bits_sink_0; // @[WidthWidget.scala:27:9]
wire anonOut_d_bits_denied = auto_anon_out_d_bits_denied_0; // @[WidthWidget.scala:27:9]
wire [63:0] anonOut_d_bits_data = auto_anon_out_d_bits_data_0; // @[WidthWidget.scala:27:9]
wire anonOut_d_bits_corrupt = auto_anon_out_d_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_a_ready_0; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_in_d_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [1:0] auto_anon_in_d_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_in_d_bits_size_0; // @[WidthWidget.scala:27:9]
wire [5:0] auto_anon_in_d_bits_source_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_bits_sink_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_bits_denied_0; // @[WidthWidget.scala:27:9]
wire [31:0] auto_anon_in_d_bits_data_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire auto_anon_in_d_valid_0; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_out_a_bits_opcode_0; // @[WidthWidget.scala:27:9]
wire [2:0] auto_anon_out_a_bits_param_0; // @[WidthWidget.scala:27:9]
wire [3:0] auto_anon_out_a_bits_size_0; // @[WidthWidget.scala:27:9]
wire [5:0] auto_anon_out_a_bits_source_0; // @[WidthWidget.scala:27:9]
wire [28:0] auto_anon_out_a_bits_address_0; // @[WidthWidget.scala:27:9]
wire [7:0] auto_anon_out_a_bits_mask_0; // @[WidthWidget.scala:27:9]
wire [63:0] auto_anon_out_a_bits_data_0; // @[WidthWidget.scala:27:9]
wire auto_anon_out_a_bits_corrupt_0; // @[WidthWidget.scala:27:9]
wire auto_anon_out_a_valid_0; // @[WidthWidget.scala:27:9]
wire auto_anon_out_d_ready_0; // @[WidthWidget.scala:27:9]
wire _anonIn_a_ready_T_1; // @[WidthWidget.scala:76:29]
assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[WidthWidget.scala:27:9]
assign anonOut_a_bits_opcode = anonIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17]
assign anonOut_a_bits_param = anonIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17]
assign anonOut_a_bits_size = anonIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17]
assign anonOut_a_bits_source = anonIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17]
assign anonOut_a_bits_address = anonIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17]
wire [3:0] anonOut_a_bits_mask_odata_0 = anonIn_a_bits_mask; // @[WidthWidget.scala:65:47]
wire [3:0] anonOut_a_bits_mask_odata_1 = anonIn_a_bits_mask; // @[WidthWidget.scala:65:47]
wire [31:0] anonOut_a_bits_data_odata_0 = anonIn_a_bits_data; // @[WidthWidget.scala:65:47]
wire [31:0] anonOut_a_bits_data_odata_1 = anonIn_a_bits_data; // @[WidthWidget.scala:65:47]
wire cated_ready = anonIn_d_ready; // @[WidthWidget.scala:161:25]
wire cated_valid; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[WidthWidget.scala:27:9]
wire [2:0] cated_bits_opcode; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[WidthWidget.scala:27:9]
wire [1:0] cated_bits_param; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_bits_param_0 = anonIn_d_bits_param; // @[WidthWidget.scala:27:9]
wire [3:0] cated_bits_size; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[WidthWidget.scala:27:9]
wire [5:0] cated_bits_source; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_bits_source_0 = anonIn_d_bits_source; // @[WidthWidget.scala:27:9]
wire cated_bits_sink; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_bits_sink_0 = anonIn_d_bits_sink; // @[WidthWidget.scala:27:9]
wire cated_bits_denied; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_bits_denied_0 = anonIn_d_bits_denied; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_data_0 = anonIn_d_bits_data; // @[WidthWidget.scala:27:9]
wire cated_bits_corrupt; // @[WidthWidget.scala:161:25]
assign auto_anon_in_d_bits_corrupt_0 = anonIn_d_bits_corrupt; // @[WidthWidget.scala:27:9]
wire _anonOut_a_valid_T; // @[WidthWidget.scala:77:29]
assign auto_anon_out_a_valid_0 = anonOut_a_valid; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_param_0 = anonOut_a_bits_param; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_size_0 = anonOut_a_bits_size; // @[WidthWidget.scala:27:9]
wire [3:0] _anonOut_a_bits_mask_sizeOH_T = anonOut_a_bits_size; // @[Misc.scala:202:34]
assign auto_anon_out_a_bits_source_0 = anonOut_a_bits_source; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_address_0 = anonOut_a_bits_address; // @[WidthWidget.scala:27:9]
wire [7:0] _anonOut_a_bits_mask_T_7; // @[WidthWidget.scala:85:88]
assign auto_anon_out_a_bits_mask_0 = anonOut_a_bits_mask; // @[WidthWidget.scala:27:9]
wire [63:0] _anonOut_a_bits_data_T_3; // @[WidthWidget.scala:73:12]
assign auto_anon_out_a_bits_data_0 = anonOut_a_bits_data; // @[WidthWidget.scala:27:9]
wire corrupt_out; // @[WidthWidget.scala:47:36]
assign auto_anon_out_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[WidthWidget.scala:27:9]
assign auto_anon_out_d_ready_0 = anonOut_d_ready; // @[WidthWidget.scala:27:9]
wire _hasData_opdata_T = anonIn_a_bits_opcode[2]; // @[Edges.scala:92:37]
wire hasData = ~_hasData_opdata_T; // @[Edges.scala:92:{28,37}]
wire [17:0] _limit_T = 18'h7 << anonIn_a_bits_size; // @[package.scala:243:71]
wire [2:0] _limit_T_1 = _limit_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _limit_T_2 = ~_limit_T_1; // @[package.scala:243:{46,76}]
wire limit = _limit_T_2[2]; // @[package.scala:243:46]
reg count; // @[WidthWidget.scala:40:27]
wire _enable_T = count; // @[WidthWidget.scala:40:27, :43:56]
wire first = ~count; // @[WidthWidget.scala:40:27, :41:26]
wire _last_T = count == limit; // @[WidthWidget.scala:38:47, :40:27, :42:26]
wire _last_T_1 = ~hasData; // @[WidthWidget.scala:42:39]
wire last = _last_T | _last_T_1; // @[WidthWidget.scala:42:{26,36,39}]
wire _enable_T_1 = _enable_T & limit; // @[WidthWidget.scala:38:47, :43:{56,63}]
wire _enable_T_2 = _enable_T_1; // @[WidthWidget.scala:43:{63,72}]
wire enable_0 = ~_enable_T_2; // @[WidthWidget.scala:43:{47,72}]
wire _enable_T_3 = ~count; // @[WidthWidget.scala:40:27, :41:26, :43:56]
wire _enable_T_4 = _enable_T_3 & limit; // @[WidthWidget.scala:38:47, :43:{56,63}]
wire _enable_T_5 = _enable_T_4; // @[WidthWidget.scala:43:{63,72}]
wire enable_1 = ~_enable_T_5; // @[WidthWidget.scala:43:{47,72}]
reg corrupt_reg; // @[WidthWidget.scala:45:32]
assign corrupt_out = anonIn_a_bits_corrupt | corrupt_reg; // @[WidthWidget.scala:45:32, :47:36]
assign anonOut_a_bits_corrupt = corrupt_out; // @[WidthWidget.scala:47:36]
wire _T = anonIn_a_ready & anonIn_a_valid; // @[Decoupled.scala:51:35]
wire _anonOut_a_bits_data_T; // @[Decoupled.scala:51:35]
assign _anonOut_a_bits_data_T = _T; // @[Decoupled.scala:51:35]
wire _anonOut_a_bits_mask_T_1; // @[Decoupled.scala:51:35]
assign _anonOut_a_bits_mask_T_1 = _T; // @[Decoupled.scala:51:35]
wire _repeat_sel_sel_T; // @[Decoupled.scala:51:35]
assign _repeat_sel_sel_T = _T; // @[Decoupled.scala:51:35]
wire [1:0] _count_T = {1'h0, count} + 2'h1; // @[WidthWidget.scala:40:27, :50:24]
wire _count_T_1 = _count_T[0]; // @[WidthWidget.scala:50:24]
wire _anonIn_a_ready_T = ~last; // @[WidthWidget.scala:42:36, :76:32]
assign _anonIn_a_ready_T_1 = anonOut_a_ready | _anonIn_a_ready_T; // @[WidthWidget.scala:76:{29,32}]
assign anonIn_a_ready = _anonIn_a_ready_T_1; // @[WidthWidget.scala:76:29]
assign _anonOut_a_valid_T = anonIn_a_valid & last; // @[WidthWidget.scala:42:36, :77:29]
assign anonOut_a_valid = _anonOut_a_valid_T; // @[WidthWidget.scala:77:29]
reg anonOut_a_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41]
wire _anonOut_a_bits_data_masked_enable_T = ~anonOut_a_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonOut_a_bits_data_masked_enable_0 = enable_0 | _anonOut_a_bits_data_masked_enable_T; // @[WidthWidget.scala:43:47, :63:{42,45}]
wire _anonOut_a_bits_data_masked_enable_T_1 = ~anonOut_a_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonOut_a_bits_data_masked_enable_1 = enable_1 | _anonOut_a_bits_data_masked_enable_T_1; // @[WidthWidget.scala:43:47, :63:{42,45}]
reg [31:0] anonOut_a_bits_data_rdata_0; // @[WidthWidget.scala:66:24]
wire [31:0] anonOut_a_bits_data_mdata_0 = anonOut_a_bits_data_masked_enable_0 ? anonOut_a_bits_data_odata_0 : anonOut_a_bits_data_rdata_0; // @[WidthWidget.scala:63:42, :65:47, :66:24, :68:88]
wire [31:0] anonOut_a_bits_data_mdata_1 = anonOut_a_bits_data_masked_enable_1 ? anonOut_a_bits_data_odata_1 : anonIn_a_bits_data; // @[WidthWidget.scala:63:42, :65:47, :68:88]
wire _anonOut_a_bits_data_T_1 = ~last; // @[WidthWidget.scala:42:36, :69:26, :76:32]
wire _anonOut_a_bits_data_T_2 = _anonOut_a_bits_data_T & _anonOut_a_bits_data_T_1; // @[Decoupled.scala:51:35]
assign _anonOut_a_bits_data_T_3 = {anonOut_a_bits_data_mdata_1, anonOut_a_bits_data_mdata_0}; // @[WidthWidget.scala:68:88, :73:12]
assign anonOut_a_bits_data = _anonOut_a_bits_data_T_3; // @[WidthWidget.scala:73:12]
wire [1:0] anonOut_a_bits_mask_sizeOH_shiftAmount = _anonOut_a_bits_mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49]
wire [3:0] _anonOut_a_bits_mask_sizeOH_T_1 = 4'h1 << anonOut_a_bits_mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12]
wire [2:0] _anonOut_a_bits_mask_sizeOH_T_2 = _anonOut_a_bits_mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}]
wire [2:0] anonOut_a_bits_mask_sizeOH = {_anonOut_a_bits_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27]
wire anonOut_a_bits_mask_sub_sub_sub_0_1 = anonOut_a_bits_size > 4'h2; // @[Misc.scala:206:21]
wire anonOut_a_bits_mask_sub_sub_size = anonOut_a_bits_mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26]
wire anonOut_a_bits_mask_sub_sub_bit = anonOut_a_bits_address[2]; // @[Misc.scala:210:26]
wire anonOut_a_bits_mask_sub_sub_1_2 = anonOut_a_bits_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27]
wire anonOut_a_bits_mask_sub_sub_nbit = ~anonOut_a_bits_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20]
wire anonOut_a_bits_mask_sub_sub_0_2 = anonOut_a_bits_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _anonOut_a_bits_mask_sub_sub_acc_T = anonOut_a_bits_mask_sub_sub_size & anonOut_a_bits_mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_sub_sub_0_1 = anonOut_a_bits_mask_sub_sub_sub_0_1 | _anonOut_a_bits_mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}]
wire _anonOut_a_bits_mask_sub_sub_acc_T_1 = anonOut_a_bits_mask_sub_sub_size & anonOut_a_bits_mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_sub_sub_1_1 = anonOut_a_bits_mask_sub_sub_sub_0_1 | _anonOut_a_bits_mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}]
wire anonOut_a_bits_mask_sub_size = anonOut_a_bits_mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26]
wire anonOut_a_bits_mask_sub_bit = anonOut_a_bits_address[1]; // @[Misc.scala:210:26]
wire anonOut_a_bits_mask_sub_nbit = ~anonOut_a_bits_mask_sub_bit; // @[Misc.scala:210:26, :211:20]
wire anonOut_a_bits_mask_sub_0_2 = anonOut_a_bits_mask_sub_sub_0_2 & anonOut_a_bits_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _anonOut_a_bits_mask_sub_acc_T = anonOut_a_bits_mask_sub_size & anonOut_a_bits_mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_sub_0_1 = anonOut_a_bits_mask_sub_sub_0_1 | _anonOut_a_bits_mask_sub_acc_T; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_sub_1_2 = anonOut_a_bits_mask_sub_sub_0_2 & anonOut_a_bits_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _anonOut_a_bits_mask_sub_acc_T_1 = anonOut_a_bits_mask_sub_size & anonOut_a_bits_mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_sub_1_1 = anonOut_a_bits_mask_sub_sub_0_1 | _anonOut_a_bits_mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_sub_2_2 = anonOut_a_bits_mask_sub_sub_1_2 & anonOut_a_bits_mask_sub_nbit; // @[Misc.scala:211:20, :214:27]
wire _anonOut_a_bits_mask_sub_acc_T_2 = anonOut_a_bits_mask_sub_size & anonOut_a_bits_mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_sub_2_1 = anonOut_a_bits_mask_sub_sub_1_1 | _anonOut_a_bits_mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_sub_3_2 = anonOut_a_bits_mask_sub_sub_1_2 & anonOut_a_bits_mask_sub_bit; // @[Misc.scala:210:26, :214:27]
wire _anonOut_a_bits_mask_sub_acc_T_3 = anonOut_a_bits_mask_sub_size & anonOut_a_bits_mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_sub_3_1 = anonOut_a_bits_mask_sub_sub_1_1 | _anonOut_a_bits_mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_size = anonOut_a_bits_mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26]
wire anonOut_a_bits_mask_bit = anonOut_a_bits_address[0]; // @[Misc.scala:210:26]
wire anonOut_a_bits_mask_nbit = ~anonOut_a_bits_mask_bit; // @[Misc.scala:210:26, :211:20]
wire anonOut_a_bits_mask_eq = anonOut_a_bits_mask_sub_0_2 & anonOut_a_bits_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _anonOut_a_bits_mask_acc_T = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc = anonOut_a_bits_mask_sub_0_1 | _anonOut_a_bits_mask_acc_T; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_eq_1 = anonOut_a_bits_mask_sub_0_2 & anonOut_a_bits_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _anonOut_a_bits_mask_acc_T_1 = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc_1 = anonOut_a_bits_mask_sub_0_1 | _anonOut_a_bits_mask_acc_T_1; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_eq_2 = anonOut_a_bits_mask_sub_1_2 & anonOut_a_bits_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _anonOut_a_bits_mask_acc_T_2 = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc_2 = anonOut_a_bits_mask_sub_1_1 | _anonOut_a_bits_mask_acc_T_2; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_eq_3 = anonOut_a_bits_mask_sub_1_2 & anonOut_a_bits_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _anonOut_a_bits_mask_acc_T_3 = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc_3 = anonOut_a_bits_mask_sub_1_1 | _anonOut_a_bits_mask_acc_T_3; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_eq_4 = anonOut_a_bits_mask_sub_2_2 & anonOut_a_bits_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _anonOut_a_bits_mask_acc_T_4 = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc_4 = anonOut_a_bits_mask_sub_2_1 | _anonOut_a_bits_mask_acc_T_4; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_eq_5 = anonOut_a_bits_mask_sub_2_2 & anonOut_a_bits_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _anonOut_a_bits_mask_acc_T_5 = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc_5 = anonOut_a_bits_mask_sub_2_1 | _anonOut_a_bits_mask_acc_T_5; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_eq_6 = anonOut_a_bits_mask_sub_3_2 & anonOut_a_bits_mask_nbit; // @[Misc.scala:211:20, :214:27]
wire _anonOut_a_bits_mask_acc_T_6 = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc_6 = anonOut_a_bits_mask_sub_3_1 | _anonOut_a_bits_mask_acc_T_6; // @[Misc.scala:215:{29,38}]
wire anonOut_a_bits_mask_eq_7 = anonOut_a_bits_mask_sub_3_2 & anonOut_a_bits_mask_bit; // @[Misc.scala:210:26, :214:27]
wire _anonOut_a_bits_mask_acc_T_7 = anonOut_a_bits_mask_size & anonOut_a_bits_mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38]
wire anonOut_a_bits_mask_acc_7 = anonOut_a_bits_mask_sub_3_1 | _anonOut_a_bits_mask_acc_T_7; // @[Misc.scala:215:{29,38}]
wire [1:0] anonOut_a_bits_mask_lo_lo = {anonOut_a_bits_mask_acc_1, anonOut_a_bits_mask_acc}; // @[Misc.scala:215:29, :222:10]
wire [1:0] anonOut_a_bits_mask_lo_hi = {anonOut_a_bits_mask_acc_3, anonOut_a_bits_mask_acc_2}; // @[Misc.scala:215:29, :222:10]
wire [3:0] anonOut_a_bits_mask_lo = {anonOut_a_bits_mask_lo_hi, anonOut_a_bits_mask_lo_lo}; // @[Misc.scala:222:10]
wire [1:0] anonOut_a_bits_mask_hi_lo = {anonOut_a_bits_mask_acc_5, anonOut_a_bits_mask_acc_4}; // @[Misc.scala:215:29, :222:10]
wire [1:0] anonOut_a_bits_mask_hi_hi = {anonOut_a_bits_mask_acc_7, anonOut_a_bits_mask_acc_6}; // @[Misc.scala:215:29, :222:10]
wire [3:0] anonOut_a_bits_mask_hi = {anonOut_a_bits_mask_hi_hi, anonOut_a_bits_mask_hi_lo}; // @[Misc.scala:222:10]
wire [7:0] _anonOut_a_bits_mask_T = {anonOut_a_bits_mask_hi, anonOut_a_bits_mask_lo}; // @[Misc.scala:222:10]
reg anonOut_a_bits_mask_rdata_written_once; // @[WidthWidget.scala:62:41]
wire _anonOut_a_bits_mask_masked_enable_T = ~anonOut_a_bits_mask_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonOut_a_bits_mask_masked_enable_0 = enable_0 | _anonOut_a_bits_mask_masked_enable_T; // @[WidthWidget.scala:43:47, :63:{42,45}]
wire _anonOut_a_bits_mask_masked_enable_T_1 = ~anonOut_a_bits_mask_rdata_written_once; // @[WidthWidget.scala:62:41, :63:45]
wire anonOut_a_bits_mask_masked_enable_1 = enable_1 | _anonOut_a_bits_mask_masked_enable_T_1; // @[WidthWidget.scala:43:47, :63:{42,45}]
reg [3:0] anonOut_a_bits_mask_rdata_0; // @[WidthWidget.scala:66:24]
wire [3:0] anonOut_a_bits_mask_mdata_0 = anonOut_a_bits_mask_masked_enable_0 ? anonOut_a_bits_mask_odata_0 : anonOut_a_bits_mask_rdata_0; // @[WidthWidget.scala:63:42, :65:47, :66:24, :68:88]
wire [3:0] anonOut_a_bits_mask_mdata_1 = anonOut_a_bits_mask_masked_enable_1 ? anonOut_a_bits_mask_odata_1 : anonIn_a_bits_mask; // @[WidthWidget.scala:63:42, :65:47, :68:88]
wire _anonOut_a_bits_mask_T_2 = ~last; // @[WidthWidget.scala:42:36, :69:26, :76:32]
wire _anonOut_a_bits_mask_T_3 = _anonOut_a_bits_mask_T_1 & _anonOut_a_bits_mask_T_2; // @[Decoupled.scala:51:35]
wire [7:0] _anonOut_a_bits_mask_T_4 = {anonOut_a_bits_mask_mdata_1, anonOut_a_bits_mask_mdata_0}; // @[WidthWidget.scala:68:88, :73:12]
wire [7:0] _anonOut_a_bits_mask_T_6 = hasData ? _anonOut_a_bits_mask_T_4 : 8'hFF; // @[WidthWidget.scala:73:12, :85:93]
assign _anonOut_a_bits_mask_T_7 = _anonOut_a_bits_mask_T & _anonOut_a_bits_mask_T_6; // @[Misc.scala:222:10]
assign anonOut_a_bits_mask = _anonOut_a_bits_mask_T_7; // @[WidthWidget.scala:85:88]
wire _repeat_T_1; // @[WidthWidget.scala:148:7]
wire repeat_0; // @[WidthWidget.scala:159:26]
assign anonIn_d_valid = cated_valid; // @[WidthWidget.scala:161:25]
assign anonIn_d_bits_opcode = cated_bits_opcode; // @[WidthWidget.scala:161:25]
assign anonIn_d_bits_param = cated_bits_param; // @[WidthWidget.scala:161:25]
assign anonIn_d_bits_size = cated_bits_size; // @[WidthWidget.scala:161:25]
assign anonIn_d_bits_source = cated_bits_source; // @[WidthWidget.scala:161:25]
assign anonIn_d_bits_sink = cated_bits_sink; // @[WidthWidget.scala:161:25]
assign anonIn_d_bits_denied = cated_bits_denied; // @[WidthWidget.scala:161:25]
wire [63:0] _cated_bits_data_T_2; // @[WidthWidget.scala:163:39]
assign anonIn_d_bits_corrupt = cated_bits_corrupt; // @[WidthWidget.scala:161:25]
wire [63:0] cated_bits_data; // @[WidthWidget.scala:161:25]
wire [31:0] _cated_bits_data_T = _repeated_repeater_io_deq_bits_data[63:32]; // @[Repeater.scala:36:26]
wire [31:0] _cated_bits_data_T_1 = anonOut_d_bits_data[31:0]; // @[WidthWidget.scala:165:31]
assign _cated_bits_data_T_2 = {_cated_bits_data_T, _cated_bits_data_T_1}; // @[WidthWidget.scala:163:39, :164:37, :165:31]
assign cated_bits_data = _cated_bits_data_T_2; // @[WidthWidget.scala:161:25, :163:39]
wire repeat_hasData = cated_bits_opcode[0]; // @[WidthWidget.scala:161:25]
wire [17:0] _repeat_limit_T = 18'h7 << cated_bits_size; // @[package.scala:243:71]
wire [2:0] _repeat_limit_T_1 = _repeat_limit_T[2:0]; // @[package.scala:243:{71,76}]
wire [2:0] _repeat_limit_T_2 = ~_repeat_limit_T_1; // @[package.scala:243:{46,76}]
wire repeat_limit = _repeat_limit_T_2[2]; // @[package.scala:243:46]
reg repeat_count; // @[WidthWidget.scala:105:26]
wire repeat_first = ~repeat_count; // @[WidthWidget.scala:105:26, :106:25]
wire _repeat_last_T = repeat_count == repeat_limit; // @[WidthWidget.scala:103:47, :105:26, :107:25]
wire _repeat_last_T_1 = ~repeat_hasData; // @[WidthWidget.scala:107:38]
wire repeat_last = _repeat_last_T | _repeat_last_T_1; // @[WidthWidget.scala:107:{25,35,38}]
wire _repeat_T = anonIn_d_ready & anonIn_d_valid; // @[Decoupled.scala:51:35]
wire [1:0] _repeat_count_T = {1'h0, repeat_count} + 2'h1; // @[WidthWidget.scala:105:26, :110:24]
wire _repeat_count_T_1 = _repeat_count_T[0]; // @[WidthWidget.scala:110:24]
reg repeat_sel_sel_sources_0; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_1; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_2; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_3; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_4; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_5; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_6; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_7; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_8; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_9; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_10; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_11; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_12; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_13; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_14; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_15; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_16; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_32; // @[WidthWidget.scala:187:27]
reg repeat_sel_sel_sources_33; // @[WidthWidget.scala:187:27]
wire repeat_sel_sel_a_sel = anonIn_a_bits_address[2]; // @[WidthWidget.scala:188:38]
wire _repeat_sel_sel_bypass_T = anonIn_a_bits_source == cated_bits_source; // @[WidthWidget.scala:161:25, :200:53]
wire repeat_sel_sel_bypass = anonIn_a_valid & _repeat_sel_sel_bypass_T; // @[WidthWidget.scala:200:{33,53}]
reg repeat_sel_hold_r; // @[WidthWidget.scala:121:47]
wire [63:0] _GEN = {{repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_0}, {repeat_sel_sel_sources_33}, {repeat_sel_sel_sources_32}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {1'h0}, {repeat_sel_sel_sources_16}, {repeat_sel_sel_sources_15}, {repeat_sel_sel_sources_14}, {repeat_sel_sel_sources_13}, {repeat_sel_sel_sources_12}, {repeat_sel_sel_sources_11}, {repeat_sel_sel_sources_10}, {repeat_sel_sel_sources_9}, {repeat_sel_sel_sources_8}, {repeat_sel_sel_sources_7}, {repeat_sel_sel_sources_6}, {repeat_sel_sel_sources_5}, {repeat_sel_sel_sources_4}, {repeat_sel_sel_sources_3}, {repeat_sel_sel_sources_2}, {repeat_sel_sel_sources_1}, {repeat_sel_sel_sources_0}}; // @[WidthWidget.scala:121:47, :187:27]
wire repeat_sel_hold = repeat_first ? _GEN[cated_bits_source] : repeat_sel_hold_r; // @[WidthWidget.scala:106:25, :121:{25,47}, :161:25]
wire _repeat_sel_T = ~repeat_limit; // @[WidthWidget.scala:103:47, :122:18]
wire repeat_sel = repeat_sel_hold & _repeat_sel_T; // @[WidthWidget.scala:121:25, :122:{16,18}]
wire repeat_index = repeat_sel | repeat_count; // @[WidthWidget.scala:105:26, :122:16, :126:24]
wire [31:0] _repeat_anonIn_d_bits_data_mux_T = cated_bits_data[31:0]; // @[WidthWidget.scala:128:55, :161:25]
wire [31:0] repeat_anonIn_d_bits_data_mux_0 = _repeat_anonIn_d_bits_data_mux_T; // @[WidthWidget.scala:128:{43,55}]
wire [31:0] _repeat_anonIn_d_bits_data_mux_T_1 = cated_bits_data[63:32]; // @[WidthWidget.scala:128:55, :161:25]
wire [31:0] repeat_anonIn_d_bits_data_mux_1 = _repeat_anonIn_d_bits_data_mux_T_1; // @[WidthWidget.scala:128:{43,55}]
assign anonIn_d_bits_data = repeat_index ? repeat_anonIn_d_bits_data_mux_1 : repeat_anonIn_d_bits_data_mux_0; // @[WidthWidget.scala:126:24, :128:43, :137:30]
assign _repeat_T_1 = ~repeat_last; // @[WidthWidget.scala:107:35, :148:7]
assign repeat_0 = _repeat_T_1; // @[WidthWidget.scala:148:7, :159:26]
always @(posedge clock) begin // @[WidthWidget.scala:27:9]
if (reset) begin // @[WidthWidget.scala:27:9]
count <= 1'h0; // @[WidthWidget.scala:40:27]
corrupt_reg <= 1'h0; // @[WidthWidget.scala:45:32]
anonOut_a_bits_data_rdata_written_once <= 1'h0; // @[WidthWidget.scala:62:41]
anonOut_a_bits_mask_rdata_written_once <= 1'h0; // @[WidthWidget.scala:62:41]
repeat_count <= 1'h0; // @[WidthWidget.scala:105:26]
end
else begin // @[WidthWidget.scala:27:9]
if (_T) begin // @[Decoupled.scala:51:35]
count <= ~last & _count_T_1; // @[WidthWidget.scala:40:27, :42:36, :50:{15,24}, :52:21, :53:17]
corrupt_reg <= ~last & corrupt_out; // @[WidthWidget.scala:42:36, :45:32, :47:36, :50:15, :51:21, :52:21, :53:17, :54:23]
end
anonOut_a_bits_data_rdata_written_once <= _anonOut_a_bits_data_T_2 | anonOut_a_bits_data_rdata_written_once; // @[WidthWidget.scala:62:41, :69:{23,33}, :70:30]
anonOut_a_bits_mask_rdata_written_once <= _anonOut_a_bits_mask_T_3 | anonOut_a_bits_mask_rdata_written_once; // @[WidthWidget.scala:62:41, :69:{23,33}, :70:30]
if (_repeat_T) // @[Decoupled.scala:51:35]
repeat_count <= ~repeat_last & _repeat_count_T_1; // @[WidthWidget.scala:105:26, :107:35, :110:{15,24}, :111:{21,29}]
end
if (_anonOut_a_bits_data_T_2) // @[WidthWidget.scala:69:23]
anonOut_a_bits_data_rdata_0 <= anonOut_a_bits_data_mdata_0; // @[WidthWidget.scala:66:24, :68:88]
if (_anonOut_a_bits_mask_T_3) // @[WidthWidget.scala:69:23]
anonOut_a_bits_mask_rdata_0 <= anonOut_a_bits_mask_mdata_0; // @[WidthWidget.scala:66:24, :68:88]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h0) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_0 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h1) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_1 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h2) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_2 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h3) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_3 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h4) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_4 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h5) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_5 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h6) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_6 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h7) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_7 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h8) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_8 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h9) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_9 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'hA) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_10 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'hB) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_11 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'hC) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_12 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'hD) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_13 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'hE) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_14 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'hF) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_15 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h10) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_16 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h20) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_32 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (_repeat_sel_sel_T & anonIn_a_bits_source == 6'h21) // @[Decoupled.scala:51:35]
repeat_sel_sel_sources_33 <= repeat_sel_sel_a_sel; // @[WidthWidget.scala:187:27, :188:38]
if (repeat_first) // @[WidthWidget.scala:106:25]
repeat_sel_hold_r <= _GEN[cated_bits_source]; // @[WidthWidget.scala:121:47, :161:25]
always @(posedge)
TLMonitor_2 monitor ( // @[Nodes.scala:27:25]
.clock (clock),
.reset (reset),
.io_in_a_ready (anonIn_a_ready), // @[MixedNode.scala:551:17]
.io_in_a_valid (anonIn_a_valid), // @[MixedNode.scala:551:17]
.io_in_a_bits_opcode (anonIn_a_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_a_bits_param (anonIn_a_bits_param), // @[MixedNode.scala:551:17]
.io_in_a_bits_size (anonIn_a_bits_size), // @[MixedNode.scala:551:17]
.io_in_a_bits_source (anonIn_a_bits_source), // @[MixedNode.scala:551:17]
.io_in_a_bits_address (anonIn_a_bits_address), // @[MixedNode.scala:551:17]
.io_in_a_bits_mask (anonIn_a_bits_mask), // @[MixedNode.scala:551:17]
.io_in_a_bits_data (anonIn_a_bits_data), // @[MixedNode.scala:551:17]
.io_in_a_bits_corrupt (anonIn_a_bits_corrupt), // @[MixedNode.scala:551:17]
.io_in_d_ready (anonIn_d_ready), // @[MixedNode.scala:551:17]
.io_in_d_valid (anonIn_d_valid), // @[MixedNode.scala:551:17]
.io_in_d_bits_opcode (anonIn_d_bits_opcode), // @[MixedNode.scala:551:17]
.io_in_d_bits_param (anonIn_d_bits_param), // @[MixedNode.scala:551:17]
.io_in_d_bits_size (anonIn_d_bits_size), // @[MixedNode.scala:551:17]
.io_in_d_bits_source (anonIn_d_bits_source), // @[MixedNode.scala:551:17]
.io_in_d_bits_sink (anonIn_d_bits_sink), // @[MixedNode.scala:551:17]
.io_in_d_bits_denied (anonIn_d_bits_denied), // @[MixedNode.scala:551:17]
.io_in_d_bits_data (anonIn_d_bits_data), // @[MixedNode.scala:551:17]
.io_in_d_bits_corrupt (anonIn_d_bits_corrupt) // @[MixedNode.scala:551:17]
); // @[Nodes.scala:27:25]
Repeater_TLBundleD_a29d64s6k1z4u repeated_repeater ( // @[Repeater.scala:36:26]
.clock (clock),
.reset (reset),
.io_repeat (repeat_0), // @[WidthWidget.scala:159:26]
.io_enq_ready (anonOut_d_ready),
.io_enq_valid (anonOut_d_valid), // @[MixedNode.scala:542:17]
.io_enq_bits_opcode (anonOut_d_bits_opcode), // @[MixedNode.scala:542:17]
.io_enq_bits_param (anonOut_d_bits_param), // @[MixedNode.scala:542:17]
.io_enq_bits_size (anonOut_d_bits_size), // @[MixedNode.scala:542:17]
.io_enq_bits_source (anonOut_d_bits_source), // @[MixedNode.scala:542:17]
.io_enq_bits_sink (anonOut_d_bits_sink), // @[MixedNode.scala:542:17]
.io_enq_bits_denied (anonOut_d_bits_denied), // @[MixedNode.scala:542:17]
.io_enq_bits_data (anonOut_d_bits_data), // @[MixedNode.scala:542:17]
.io_enq_bits_corrupt (anonOut_d_bits_corrupt), // @[MixedNode.scala:542:17]
.io_deq_ready (cated_ready), // @[WidthWidget.scala:161:25]
.io_deq_valid (cated_valid),
.io_deq_bits_opcode (cated_bits_opcode),
.io_deq_bits_param (cated_bits_param),
.io_deq_bits_size (cated_bits_size),
.io_deq_bits_source (cated_bits_source),
.io_deq_bits_sink (cated_bits_sink),
.io_deq_bits_denied (cated_bits_denied),
.io_deq_bits_data (_repeated_repeater_io_deq_bits_data),
.io_deq_bits_corrupt (cated_bits_corrupt)
); // @[Repeater.scala:36:26]
assign auto_anon_in_a_ready = auto_anon_in_a_ready_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_valid = auto_anon_in_d_valid_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_opcode = auto_anon_in_d_bits_opcode_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_param = auto_anon_in_d_bits_param_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_size = auto_anon_in_d_bits_size_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_source = auto_anon_in_d_bits_source_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_sink = auto_anon_in_d_bits_sink_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_denied = auto_anon_in_d_bits_denied_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_data = auto_anon_in_d_bits_data_0; // @[WidthWidget.scala:27:9]
assign auto_anon_in_d_bits_corrupt = auto_anon_in_d_bits_corrupt_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_valid = auto_anon_out_a_valid_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_opcode = auto_anon_out_a_bits_opcode_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_param = auto_anon_out_a_bits_param_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_size = auto_anon_out_a_bits_size_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_source = auto_anon_out_a_bits_source_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_address = auto_anon_out_a_bits_address_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_mask = auto_anon_out_a_bits_mask_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_data = auto_anon_out_a_bits_data_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_a_bits_corrupt = auto_anon_out_a_bits_corrupt_0; // @[WidthWidget.scala:27:9]
assign auto_anon_out_d_ready = auto_anon_out_d_ready_0; // @[WidthWidget.scala:27:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftRegisterPriorityQueue.scala:
package compressacc
import chisel3._
import chisel3.util._
import chisel3.util._
// TODO : support enq & deq at the same cycle
class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle {
val output_prev = KeyValue(keyWidth, value)
val output_nxt = KeyValue(keyWidth, value)
val input_prev = Flipped(KeyValue(keyWidth, value))
val input_nxt = Flipped(KeyValue(keyWidth, value))
val cmd = Flipped(Valid(UInt(1.W)))
val insert_here = Input(Bool())
val cur_input_keyval = Flipped(KeyValue(keyWidth, value))
val cur_output_keyval = KeyValue(keyWidth, value)
}
class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module {
val io = IO(new PriorityQueueStageIO(keyWidth, value))
dontTouch(io)
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val MAX_VALUE = (1 << keyWidth) - 1
val key_reg = RegInit(MAX_VALUE.U(keyWidth.W))
val value_reg = Reg(value)
io.output_prev.key := key_reg
io.output_prev.value := value_reg
io.output_nxt.key := key_reg
io.output_nxt.value := value_reg
io.cur_output_keyval.key := key_reg
io.cur_output_keyval.value := value_reg
when (io.cmd.valid) {
switch (io.cmd.bits) {
is (CMD_DEQ) {
key_reg := io.input_nxt.key
value_reg := io.input_nxt.value
}
is (CMD_ENQ) {
when (io.insert_here) {
key_reg := io.cur_input_keyval.key
value_reg := io.cur_input_keyval.value
} .elsewhen (key_reg >= io.cur_input_keyval.key) {
key_reg := io.input_prev.key
value_reg := io.input_prev.value
} .otherwise {
// do nothing
}
}
}
}
}
object PriorityQueueStage {
def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v)
}
// TODO
// - This design is not scalable as the enqued_keyval is broadcasted to all the stages
// - Add pipeline registers later
class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle {
val cnt_bits = log2Ceil(queSize+1)
val counter = Output(UInt(cnt_bits.W))
val enq = Flipped(Decoupled(KeyValue(keyWidth, value)))
val deq = Decoupled(KeyValue(keyWidth, value))
}
class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module {
val keyWidthInternal = keyWidth + 1
val CMD_DEQ = 0.U
val CMD_ENQ = 1.U
val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value))
dontTouch(io)
val MAX_VALUE = ((1 << keyWidthInternal) - 1).U
val cnt_bits = log2Ceil(queSize+1)
// do not consider cases where we are inserting more entries then the queSize
val counter = RegInit(0.U(cnt_bits.W))
io.counter := counter
val full = (counter === queSize.U)
val empty = (counter === 0.U)
io.deq.valid := !empty
io.enq.ready := !full
when (io.enq.fire) {
counter := counter + 1.U
}
when (io.deq.fire) {
counter := counter - 1.U
}
val cmd_valid = io.enq.valid || io.deq.ready
val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ)
assert(!(io.enq.valid && io.deq.ready))
val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value)))
for (i <- 0 until (queSize - 1)) {
stages(i+1).io.input_prev <> stages(i).io.output_nxt
stages(i).io.input_nxt <> stages(i+1).io.output_prev
}
stages(queSize-1).io.input_nxt.key := MAX_VALUE
// stages(queSize-1).io.input_nxt.value :=
stages(queSize-1).io.input_nxt.value.symbol := 0.U
// stages(queSize-1).io.input_nxt.value.child(0) := 0.U
// stages(queSize-1).io.input_nxt.value.child(1) := 0.U
stages(0).io.input_prev.key := io.enq.bits.key
stages(0).io.input_prev.value <> io.enq.bits.value
for (i <- 0 until queSize) {
stages(i).io.cmd.valid := cmd_valid
stages(i).io.cmd.bits := cmd
stages(i).io.cur_input_keyval <> io.enq.bits
}
val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B)))
for (i <- 0 until queSize) {
is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key)
}
val is_large_or_equal_cat = Wire(UInt(queSize.W))
is_large_or_equal_cat := Cat(is_large_or_equal.reverse)
val insert_here_idx = PriorityEncoder(is_large_or_equal_cat)
for (i <- 0 until queSize) {
when (i.U === insert_here_idx) {
stages(i).io.insert_here := true.B
} .otherwise {
stages(i).io.insert_here := false.B
}
}
io.deq.bits <> stages(0).io.output_prev
}
| module PriorityQueueStage_37( // @[ShiftRegisterPriorityQueue.scala:21:7]
input clock, // @[ShiftRegisterPriorityQueue.scala:21:7]
input reset, // @[ShiftRegisterPriorityQueue.scala:21:7]
output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14]
input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14]
output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14]
);
wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24]
assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22]
assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30]
always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24]
else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24]
end
if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7]
if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30]
value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
else // @[ShiftRegisterPriorityQueue.scala:21:7]
value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22]
end
always @(posedge)
assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File Crossing.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.interrupts
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy.lazymodule._
import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg}
@deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2")
class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val intnode = IntAdapterNode()
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
(intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) =>
out := SynchronizerShiftReg(in, sync)
}
}
}
object IntSyncCrossingSource
{
def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) =
{
val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered))
intsource.node
}
}
class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSourceNode(alreadyRegistered)
lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl)
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := AsyncResetReg(Cat(in.reverse)).asBools
}
}
class ImplRegistered extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0)
override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out.sync := in
}
}
}
object IntSyncCrossingSink
{
@deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2")
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(sync)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := SynchronizerShiftReg(in.sync, sync)
}
}
}
object IntSyncAsyncCrossingSink
{
def apply(sync: Int = 3)(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync))
intsink.node
}
}
class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(0)
lazy val module = new Impl
class Impl extends LazyRawModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := in.sync
}
}
}
object IntSyncSyncCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncSyncCrossingSink())
intsink.node
}
}
class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule
{
val node = IntSyncSinkNode(1)
lazy val module = new Impl
class Impl extends LazyModuleImp(this) {
def outSize = node.out.headOption.map(_._1.size).getOrElse(0)
override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}"
(node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) =>
out := RegNext(in.sync)
}
}
}
object IntSyncRationalCrossingSink
{
def apply()(implicit p: Parameters) =
{
val intsink = LazyModule(new IntSyncRationalCrossingSink())
intsink.node
}
}
File LazyModuleImp.scala:
package org.chipsalliance.diplomacy.lazymodule
import chisel3.{withClockAndReset, Module, RawModule, Reset, _}
import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo}
import firrtl.passes.InlineAnnotation
import org.chipsalliance.cde.config.Parameters
import org.chipsalliance.diplomacy.nodes.Dangle
import scala.collection.immutable.SortedMap
/** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]].
*
* This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy.
*/
sealed trait LazyModuleImpLike extends RawModule {
/** [[LazyModule]] that contains this instance. */
val wrapper: LazyModule
/** IOs that will be automatically "punched" for this instance. */
val auto: AutoBundle
/** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */
protected[diplomacy] val dangles: Seq[Dangle]
// [[wrapper.module]] had better not be accessed while LazyModules are still being built!
require(
LazyModule.scope.isEmpty,
s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}"
)
/** Set module name. Defaults to the containing LazyModule's desiredName. */
override def desiredName: String = wrapper.desiredName
suggestName(wrapper.suggestedName)
/** [[Parameters]] for chisel [[Module]]s. */
implicit val p: Parameters = wrapper.p
/** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and
* submodules.
*/
protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = {
// 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]],
// 2. return [[Dangle]]s from each module.
val childDangles = wrapper.children.reverse.flatMap { c =>
implicit val sourceInfo: SourceInfo = c.info
c.cloneProto.map { cp =>
// If the child is a clone, then recursively set cloneProto of its children as well
def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = {
require(bases.size == clones.size)
(bases.zip(clones)).map { case (l, r) =>
require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}")
l.cloneProto = Some(r)
assignCloneProtos(l.children, r.children)
}
}
assignCloneProtos(c.children, cp.children)
// Clone the child module as a record, and get its [[AutoBundle]]
val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName)
val clonedAuto = clone("auto").asInstanceOf[AutoBundle]
// Get the empty [[Dangle]]'s of the cloned child
val rawDangles = c.cloneDangles()
require(rawDangles.size == clonedAuto.elements.size)
// Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s
val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) }
dangles
}.getOrElse {
// For non-clones, instantiate the child module
val mod = try {
Module(c.module)
} catch {
case e: ChiselException => {
println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}")
throw e
}
}
mod.dangles
}
}
// Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]].
// This will result in a sequence of [[Dangle]] from these [[BaseNode]]s.
val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate())
// Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]]
val allDangles = nodeDangles ++ childDangles
// Group [[allDangles]] by their [[source]].
val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*)
// For each [[source]] set of [[Dangle]]s of size 2, ensure that these
// can be connected as a source-sink pair (have opposite flipped value).
// Make the connection and mark them as [[done]].
val done = Set() ++ pairing.values.filter(_.size == 2).map {
case Seq(a, b) =>
require(a.flipped != b.flipped)
// @todo <> in chisel3 makes directionless connection.
if (a.flipped) {
a.data <> b.data
} else {
b.data <> a.data
}
a.source
case _ => None
}
// Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module.
val forward = allDangles.filter(d => !done(d.source))
// Generate [[AutoBundle]] IO from [[forward]].
val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*))
// Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]]
val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) =>
if (d.flipped) {
d.data <> io
} else {
io <> d.data
}
d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name)
}
// Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]].
wrapper.inModuleBody.reverse.foreach {
_()
}
if (wrapper.shouldBeInlined) {
chisel3.experimental.annotate(new ChiselAnnotation {
def toFirrtl = InlineAnnotation(toNamed)
})
}
// Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]].
(auto, dangles)
}
}
/** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike {
/** Instantiate hardware of this `Module`. */
val (auto, dangles) = instantiate()
}
/** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]].
*
* @param wrapper
* the [[LazyModule]] from which the `.module` call is being made.
*/
class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike {
// These wires are the default clock+reset for all LazyModule children.
// It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the
// [[LazyRawModuleImp]] children.
// Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly.
/** drive clock explicitly. */
val childClock: Clock = Wire(Clock())
/** drive reset explicitly. */
val childReset: Reset = Wire(Reset())
// the default is that these are disabled
childClock := false.B.asClock
childReset := chisel3.DontCare
def provideImplicitClockToLazyChildren: Boolean = false
val (auto, dangles) =
if (provideImplicitClockToLazyChildren) {
withClockAndReset(childClock, childReset) { instantiate() }
} else {
instantiate()
}
}
File MixedNode.scala:
package org.chipsalliance.diplomacy.nodes
import chisel3.{Data, DontCare, Wire}
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.{Field, Parameters}
import org.chipsalliance.diplomacy.ValName
import org.chipsalliance.diplomacy.sourceLine
/** One side metadata of a [[Dangle]].
*
* Describes one side of an edge going into or out of a [[BaseNode]].
*
* @param serial
* the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to.
* @param index
* the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to.
*/
case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] {
import scala.math.Ordered.orderingToOrdered
def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that))
}
/** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]]
* connects.
*
* [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] ,
* [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]].
*
* @param source
* the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within
* that [[BaseNode]].
* @param sink
* sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that
* [[BaseNode]].
* @param flipped
* flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to
* `danglesIn`.
* @param dataOpt
* actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module
*/
case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) {
def data = dataOpt.get
}
/** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often
* derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually
* implement the protocol.
*/
case class Edges[EI, EO](in: Seq[EI], out: Seq[EO])
/** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */
case object MonitorsEnabled extends Field[Boolean](true)
/** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented.
*
* For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but
* [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink
* nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the
* [[LazyModule]].
*/
case object RenderFlipped extends Field[Boolean](false)
/** The sealed node class in the package, all node are derived from it.
*
* @param inner
* Sink interface implementation.
* @param outer
* Source interface implementation.
* @param valName
* val name of this node.
* @tparam DI
* Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters
* describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected
* [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port
* parameters.
* @tparam UI
* Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing
* the protocol parameters of a sink. For an [[InwardNode]], it is determined itself.
* @tparam EI
* Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers
* specified for a sink according to protocol.
* @tparam BI
* Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface.
* It should extends from [[chisel3.Data]], which represents the real hardware.
* @tparam DO
* Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters
* describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself.
* @tparam UO
* Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing
* the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]].
* Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters.
* @tparam EO
* Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers
* specified for a source according to protocol.
* @tparam BO
* Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source
* interface. It should extends from [[chisel3.Data]], which represents the real hardware.
*
* @note
* Call Graph of [[MixedNode]]
* - line `─`: source is process by a function and generate pass to others
* - Arrow `→`: target of arrow is generated by source
*
* {{{
* (from the other node)
* ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐
* ↓ │
* (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │
* [[InwardNode.accPI]] │ │ │
* │ │ (based on protocol) │
* │ │ [[MixedNode.inner.edgeI]] │
* │ │ ↓ │
* ↓ │ │ │
* (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │
* [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │
* │ │ ↑ │ │ │
* │ │ │ [[OutwardNode.doParams]] │ │
* │ │ │ (from the other node) │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ └────────┬──────────────┤ │
* │ │ │ │ │ │
* │ │ │ │ (based on protocol) │
* │ │ │ │ [[MixedNode.inner.edgeI]] │
* │ │ │ │ │ │
* │ │ (from the other node) │ ↓ │
* │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │
* │ ↑ ↑ │ │ ↓ │
* │ │ │ │ │ [[MixedNode.in]] │
* │ │ │ │ ↓ ↑ │
* │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │
* ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │
* │ │ │ [[MixedNode.bundleOut]]─┐ │ │
* │ │ │ ↑ ↓ │ │
* │ │ │ │ [[MixedNode.out]] │ │
* │ ↓ ↓ │ ↑ │ │
* │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │
* │ │ (from the other node) ↑ │ │
* │ │ │ │ │ │
* │ │ │ [[MixedNode.outer.edgeO]] │ │
* │ │ │ (based on protocol) │ │
* │ │ │ │ │ │
* │ │ │ ┌────────────────────────────────────────┤ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* │ │ │ │ │ │ │
* (immobilize after elaboration)│ ↓ │ │ │ │
* [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │
* ↑ (inward port from [[OutwardNode]]) │ │ │ │
* │ ┌─────────────────────────────────────────┤ │ │ │
* │ │ │ │ │ │
* │ │ │ │ │ │
* [[OutwardNode.accPO]] │ ↓ │ │ │
* (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │
* │ ↑ │ │
* │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │
* └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘
* }}}
*/
abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data](
val inner: InwardNodeImp[DI, UI, EI, BI],
val outer: OutwardNodeImp[DO, UO, EO, BO]
)(
implicit valName: ValName)
extends BaseNode
with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO]
with InwardNode[DI, UI, BI]
with OutwardNode[DO, UO, BO] {
// Generate a [[NodeHandle]] with inward and outward node are both this node.
val inward = this
val outward = this
/** Debug info of nodes binding. */
def bindingInfo: String = s"""$iBindingInfo
|$oBindingInfo
|""".stripMargin
/** Debug info of ports connecting. */
def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}]
|${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}]
|""".stripMargin
/** Debug info of parameters propagations. */
def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}]
|${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}]
|${diParams.size} downstream inward parameters: [${diParams.mkString(",")}]
|${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}]
|""".stripMargin
/** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and
* [[MixedNode.iPortMapping]].
*
* Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward
* stars and outward stars.
*
* This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type
* of node.
*
* @param iKnown
* Number of known-size ([[BIND_ONCE]]) input bindings.
* @param oKnown
* Number of known-size ([[BIND_ONCE]]) output bindings.
* @param iStar
* Number of unknown size ([[BIND_STAR]]) input bindings.
* @param oStar
* Number of unknown size ([[BIND_STAR]]) output bindings.
* @return
* A Tuple of the resolved number of input and output connections.
*/
protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int)
/** Function to generate downward-flowing outward params from the downward-flowing input params and the current output
* ports.
*
* @param n
* The size of the output sequence to generate.
* @param p
* Sequence of downward-flowing input parameters of this node.
* @return
* A `n`-sized sequence of downward-flowing output edge parameters.
*/
protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO]
/** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]].
*
* @param n
* Size of the output sequence.
* @param p
* Upward-flowing output edge parameters.
* @return
* A n-sized sequence of upward-flowing input edge parameters.
*/
protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI]
/** @return
* The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with
* [[BIND_STAR]].
*/
protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR)
/** @return
* The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of
* output bindings bound with [[BIND_STAR]].
*/
protected[diplomacy] lazy val sourceCard: Int =
iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR)
/** @return list of nodes involved in flex bindings with this node. */
protected[diplomacy] lazy val flexes: Seq[BaseNode] =
oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2)
/** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin
* greedily taking up the remaining connections.
*
* @return
* A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return
* value is not relevant.
*/
protected[diplomacy] lazy val flexOffset: Int = {
/** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex
* operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a
* connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of
* each node in the current set and decide whether they should be added to the set or not.
*
* @return
* the mapping of [[BaseNode]] indexed by their serial numbers.
*/
def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = {
if (visited.contains(v.serial) || !v.flexibleArityDirection) {
visited
} else {
v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum))
}
}
/** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node.
*
* @example
* {{{
* a :*=* b :*=* c
* d :*=* b
* e :*=* f
* }}}
*
* `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)`
*/
val flexSet = DFS(this, Map()).values
/** The total number of :*= operators where we're on the left. */
val allSink = flexSet.map(_.sinkCard).sum
/** The total number of :=* operators used when we're on the right. */
val allSource = flexSet.map(_.sourceCard).sum
require(
allSink == 0 || allSource == 0,
s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction."
)
allSink - allSource
}
/** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */
protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = {
if (flexibleArityDirection) flexOffset
else if (n.flexibleArityDirection) n.flexOffset
else 0
}
/** For a node which is connected between two nodes, select the one that will influence the direction of the flex
* resolution.
*/
protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = {
val dir = edgeArityDirection(n)
if (dir < 0) l
else if (dir > 0) r
else 1
}
/** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */
private var starCycleGuard = false
/** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star"
* connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also
* need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct
* edge parameters and later build up correct bundle connections.
*
* [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding
* operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort
* (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*=
* bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N`
*/
protected[diplomacy] lazy val (
oPortMapping: Seq[(Int, Int)],
iPortMapping: Seq[(Int, Int)],
oStar: Int,
iStar: Int
) = {
try {
if (starCycleGuard) throw StarCycleException()
starCycleGuard = true
// For a given node N...
// Number of foo :=* N
// + Number of bar :=* foo :*=* N
val oStars = oBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0)
}
// Number of N :*= foo
// + Number of N :*=* foo :*= bar
val iStars = iBindings.count { case (_, n, b, _, _) =>
b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0)
}
// 1 for foo := N
// + bar.iStar for bar :*= foo :*=* N
// + foo.iStar for foo :*= N
// + 0 for foo :=* N
val oKnown = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, 0, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => 0
}
}.sum
// 1 for N := foo
// + bar.oStar for N :*=* foo :=* bar
// + foo.oStar for N :=* foo
// + 0 for N :*= foo
val iKnown = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, 0)
case BIND_QUERY => n.oStar
case BIND_STAR => 0
}
}.sum
// Resolve star depends on the node subclass to implement the algorithm for this.
val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars)
// Cumulative list of resolved outward binding range starting points
val oSum = oBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar)
case BIND_QUERY => n.iStar
case BIND_STAR => oStar
}
}.scanLeft(0)(_ + _)
// Cumulative list of resolved inward binding range starting points
val iSum = iBindings.map { case (_, n, b, _, _) =>
b match {
case BIND_ONCE => 1
case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar)
case BIND_QUERY => n.oStar
case BIND_STAR => iStar
}
}.scanLeft(0)(_ + _)
// Create ranges for each binding based on the running sums and return
// those along with resolved values for the star operations.
(oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar)
} catch {
case c: StarCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Sequence of inward ports.
*
* This should be called after all star bindings are resolved.
*
* Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding.
* `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this
* connection was made in the source code.
*/
protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] =
oBindings.flatMap { case (i, n, _, p, s) =>
// for each binding operator in this node, look at what it connects to
val (start, end) = n.iPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
/** Sequence of outward ports.
*
* This should be called after all star bindings are resolved.
*
* `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of
* outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection
* was made in the source code.
*/
protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] =
iBindings.flatMap { case (i, n, _, p, s) =>
// query this port index range of this node in the other side of node.
val (start, end) = n.oPortMapping(i)
(start until end).map { j => (j, n, p, s) }
}
// Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree
// Thus, there must exist an Eulerian path and the below algorithms terminate
@scala.annotation.tailrec
private def oTrace(
tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)
): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.iForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => oTrace((j, m, p, s))
}
}
@scala.annotation.tailrec
private def iTrace(
tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)
): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match {
case (i, n, p, s) => n.oForward(i) match {
case None => (i, n, p, s)
case Some((j, m)) => iTrace((j, m, p, s))
}
}
/** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - Numeric index of this binding in the [[InwardNode]] on the other end.
* - [[InwardNode]] on the other end of this binding.
* - A view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace)
/** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved.
*
* Each Port is a tuple of:
* - numeric index of this binding in [[OutwardNode]] on the other end.
* - [[OutwardNode]] on the other end of this binding.
* - a view of [[Parameters]] where the binding occurred.
* - [[SourceInfo]] for source-level error reporting.
*/
lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace)
private var oParamsCycleGuard = false
protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) }
protected[diplomacy] lazy val doParams: Seq[DO] = {
try {
if (oParamsCycleGuard) throw DownwardCycleException()
oParamsCycleGuard = true
val o = mapParamsD(oPorts.size, diParams)
require(
o.size == oPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of outward ports should equal the number of produced outward parameters.
|$context
|$connectedPortsInfo
|Downstreamed inward parameters: [${diParams.mkString(",")}]
|Produced outward parameters: [${o.mkString(",")}]
|""".stripMargin
)
o.map(outer.mixO(_, this))
} catch {
case c: DownwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
private var iParamsCycleGuard = false
protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) }
protected[diplomacy] lazy val uiParams: Seq[UI] = {
try {
if (iParamsCycleGuard) throw UpwardCycleException()
iParamsCycleGuard = true
val i = mapParamsU(iPorts.size, uoParams)
require(
i.size == iPorts.size,
s"""Diplomacy has detected a problem with your graph:
|At the following node, the number of inward ports should equal the number of produced inward parameters.
|$context
|$connectedPortsInfo
|Upstreamed outward parameters: [${uoParams.mkString(",")}]
|Produced inward parameters: [${i.mkString(",")}]
|""".stripMargin
)
i.map(inner.mixI(_, this))
} catch {
case c: UpwardCycleException => throw c.copy(loop = context +: c.loop)
}
}
/** Outward edge parameters. */
protected[diplomacy] lazy val edgesOut: Seq[EO] =
(oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) }
/** Inward edge parameters. */
protected[diplomacy] lazy val edgesIn: Seq[EI] =
(iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) }
/** A tuple of the input edge parameters and output edge parameters for the edges bound to this node.
*
* If you need to access to the edges of a foreign Node, use this method (in/out create bundles).
*/
lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut)
/** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */
protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e =>
val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
/** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */
protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e =>
val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In")
// TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue,
// In the future, we should add an option to decide whether allowing unconnected in the LazyModule
x := DontCare
x
}
private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(serial, i),
sink = HalfEdge(n.serial, j),
flipped = false,
name = wirePrefix + "out",
dataOpt = None
)
}
private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) =>
Dangle(
source = HalfEdge(n.serial, j),
sink = HalfEdge(serial, i),
flipped = true,
name = wirePrefix + "in",
dataOpt = None
)
}
/** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */
protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleOut(i)))
}
/** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */
protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) =>
d.copy(dataOpt = Some(bundleIn(i)))
}
private[diplomacy] var instantiated = false
/** Gather Bundle and edge parameters of outward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def out: Seq[(BO, EO)] = {
require(
instantiated,
s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleOut.zip(edgesOut)
}
/** Gather Bundle and edge parameters of inward ports.
*
* Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within
* [[LazyModuleImp]] code or after its instantiation has completed.
*/
def in: Seq[(BI, EI)] = {
require(
instantiated,
s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun"
)
bundleIn.zip(edgesIn)
}
/** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires,
* instantiate monitors on all input ports if appropriate, and return all the dangles of this node.
*/
protected[diplomacy] def instantiate(): Seq[Dangle] = {
instantiated = true
if (!circuitIdentity) {
(iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) }
}
danglesOut ++ danglesIn
}
protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn
/** Connects the outward part of a node with the inward part of this node. */
protected[diplomacy] def bind(
h: OutwardNode[DI, UI, BI],
binding: NodeBinding
)(
implicit p: Parameters,
sourceInfo: SourceInfo
): Unit = {
val x = this // x := y
val y = h
sourceLine(sourceInfo, " at ", "")
val i = x.iPushed
val o = y.oPushed
y.oPush(
i,
x,
binding match {
case BIND_ONCE => BIND_ONCE
case BIND_FLEX => BIND_FLEX
case BIND_STAR => BIND_QUERY
case BIND_QUERY => BIND_STAR
}
)
x.iPush(o, y, binding)
}
/* Metadata for printing the node graph. */
def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) =>
val re = inner.render(e)
(n, re.copy(flipped = re.flipped != p(RenderFlipped)))
}
/** Metadata for printing the node graph */
def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) }
}
| module IntSyncAsyncCrossingSink_n1x2( // @[Crossing.scala:74:9]
input clock, // @[Crossing.scala:74:9]
input reset, // @[Crossing.scala:74:9]
input auto_in_sync_0, // @[LazyModuleImp.scala:107:25]
input auto_in_sync_1, // @[LazyModuleImp.scala:107:25]
output auto_out_0, // @[LazyModuleImp.scala:107:25]
output auto_out_1 // @[LazyModuleImp.scala:107:25]
);
wire [1:0] _chain_io_q; // @[ShiftReg.scala:45:23]
wire auto_in_sync_0_0 = auto_in_sync_0; // @[Crossing.scala:74:9]
wire auto_in_sync_1_0 = auto_in_sync_1; // @[Crossing.scala:74:9]
wire nodeIn_sync_0 = auto_in_sync_0_0; // @[Crossing.scala:74:9]
wire nodeIn_sync_1 = auto_in_sync_1_0; // @[Crossing.scala:74:9]
wire nodeOut_0; // @[MixedNode.scala:542:17]
wire nodeOut_1; // @[MixedNode.scala:542:17]
wire auto_out_0_0; // @[Crossing.scala:74:9]
wire auto_out_1_0; // @[Crossing.scala:74:9]
assign auto_out_0_0 = nodeOut_0; // @[Crossing.scala:74:9]
assign auto_out_1_0 = nodeOut_1; // @[Crossing.scala:74:9]
wire [1:0] _chain_io_d_T = {nodeIn_sync_1, nodeIn_sync_0}; // @[ShiftReg.scala:47:22]
assign nodeOut_0 = _chain_io_q[0]; // @[ShiftReg.scala:45:23, :48:24]
assign nodeOut_1 = _chain_io_q[1]; // @[ShiftReg.scala:45:23, :48:24]
SynchronizerShiftReg_w2_d3 chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (reset),
.io_d (_chain_io_d_T), // @[ShiftReg.scala:47:22]
.io_q (_chain_io_q)
); // @[ShiftReg.scala:45:23]
assign auto_out_0 = auto_out_0_0; // @[Crossing.scala:74:9]
assign auto_out_1 = auto_out_1_0; // @[Crossing.scala:74:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File SourceB.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
class SourceBRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val param = UInt(3.W)
val tag = UInt(params.tagBits.W)
val set = UInt(params.setBits.W)
val clients = UInt(params.clientBits.W)
}
class SourceB(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Flipped(Decoupled(new SourceBRequest(params)))
val b = Decoupled(new TLBundleB(params.inner.bundle))
})
if (params.firstLevel) {
// Tie off unused ports
io.req.ready := true.B
io.b.valid := false.B
io.b.bits := DontCare
} else {
val remain = RegInit(0.U(params.clientBits.W))
val remain_set = WireInit(init = 0.U(params.clientBits.W))
val remain_clr = WireInit(init = 0.U(params.clientBits.W))
remain := (remain | remain_set) & ~remain_clr
val busy = remain.orR
val todo = Mux(busy, remain, io.req.bits.clients)
val next = ~(leftOR(todo) << 1) & todo
if (params.clientBits > 1) {
params.ccover(PopCount(remain) > 1.U, "SOURCEB_MULTI_PROBE", "Had to probe more than one client")
}
assert (!io.req.valid || io.req.bits.clients =/= 0.U)
io.req.ready := !busy
when (io.req.fire) { remain_set := io.req.bits.clients }
// No restrictions on the type of buffer used here
val b = Wire(chiselTypeOf(io.b))
io.b <> params.micro.innerBuf.b(b)
b.valid := busy || io.req.valid
when (b.fire) { remain_clr := next }
params.ccover(b.valid && !b.ready, "SOURCEB_STALL", "Backpressured when issuing a probe")
val tag = Mux(!busy, io.req.bits.tag, RegEnable(io.req.bits.tag, io.req.fire))
val set = Mux(!busy, io.req.bits.set, RegEnable(io.req.bits.set, io.req.fire))
val param = Mux(!busy, io.req.bits.param, RegEnable(io.req.bits.param, io.req.fire))
b.bits.opcode := TLMessages.Probe
b.bits.param := param
b.bits.size := params.offsetBits .U
b.bits.source := params.clientSource(next)
b.bits.address := params.expandAddress(tag, set, 0.U)
b.bits.mask := ~0.U(params.inner.manager.beatBytes.W)
b.bits.data := 0.U
b.bits.corrupt := false.B
}
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module SourceB( // @[SourceB.scala:33:7]
input clock, // @[SourceB.scala:33:7]
input reset, // @[SourceB.scala:33:7]
output io_req_ready, // @[SourceB.scala:35:14]
input io_req_valid, // @[SourceB.scala:35:14]
input [2:0] io_req_bits_param, // @[SourceB.scala:35:14]
input [12:0] io_req_bits_tag, // @[SourceB.scala:35:14]
input [9:0] io_req_bits_set, // @[SourceB.scala:35:14]
input io_req_bits_clients, // @[SourceB.scala:35:14]
input io_b_ready, // @[SourceB.scala:35:14]
output io_b_valid, // @[SourceB.scala:35:14]
output [1:0] io_b_bits_param, // @[SourceB.scala:35:14]
output [31:0] io_b_bits_address // @[SourceB.scala:35:14]
);
wire io_req_valid_0 = io_req_valid; // @[SourceB.scala:33:7]
wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[SourceB.scala:33:7]
wire [12:0] io_req_bits_tag_0 = io_req_bits_tag; // @[SourceB.scala:33:7]
wire [9:0] io_req_bits_set_0 = io_req_bits_set; // @[SourceB.scala:33:7]
wire io_req_bits_clients_0 = io_req_bits_clients; // @[SourceB.scala:33:7]
wire io_b_ready_0 = io_b_ready; // @[SourceB.scala:33:7]
wire _b_bits_address_base_T_2 = reset; // @[Parameters.scala:222:12]
wire _b_bits_address_base_T_8 = reset; // @[Parameters.scala:222:12]
wire _b_bits_address_base_T_14 = reset; // @[Parameters.scala:222:12]
wire [2:0] io_b_bits_opcode = 3'h6; // @[SourceB.scala:33:7]
wire [2:0] io_b_bits_size = 3'h6; // @[SourceB.scala:33:7]
wire [2:0] b_bits_opcode = 3'h6; // @[SourceB.scala:65:17]
wire [2:0] b_bits_size = 3'h6; // @[SourceB.scala:65:17]
wire [7:0] io_b_bits_source = 8'h40; // @[SourceB.scala:33:7]
wire [7:0] b_bits_source = 8'h40; // @[SourceB.scala:65:17]
wire [7:0] io_b_bits_mask = 8'hFF; // @[SourceB.scala:33:7]
wire [7:0] b_bits_mask = 8'hFF; // @[SourceB.scala:65:17]
wire [7:0] _b_bits_mask_T = 8'hFF; // @[SourceB.scala:81:23]
wire [63:0] io_b_bits_data = 64'h0; // @[SourceB.scala:33:7]
wire [63:0] b_bits_data = 64'h0; // @[SourceB.scala:65:17]
wire io_b_bits_corrupt = 1'h0; // @[SourceB.scala:33:7]
wire b_bits_corrupt = 1'h0; // @[SourceB.scala:65:17]
wire _b_bits_address_base_T = 1'h0; // @[Parameters.scala:222:15]
wire _b_bits_address_base_T_4 = 1'h0; // @[Parameters.scala:222:12]
wire _b_bits_address_base_T_6 = 1'h0; // @[Parameters.scala:222:15]
wire _b_bits_address_base_T_10 = 1'h0; // @[Parameters.scala:222:12]
wire _b_bits_address_base_T_12 = 1'h0; // @[Parameters.scala:222:15]
wire _b_bits_address_base_T_16 = 1'h0; // @[Parameters.scala:222:12]
wire [1:0] b_bits_address_hi_hi_hi_lo = 2'h0; // @[Parameters.scala:230:8]
wire [5:0] b_bits_address_base_y_2 = 6'h0; // @[Parameters.scala:221:15]
wire [5:0] _b_bits_address_base_T_17 = 6'h0; // @[Parameters.scala:223:6]
wire _b_bits_address_base_T_1 = 1'h1; // @[Parameters.scala:222:24]
wire _b_bits_address_base_T_7 = 1'h1; // @[Parameters.scala:222:24]
wire _b_bits_address_base_T_13 = 1'h1; // @[Parameters.scala:222:24]
wire _io_req_ready_T; // @[SourceB.scala:61:21]
wire b_ready = io_b_ready_0; // @[SourceB.scala:33:7, :65:17]
wire b_valid; // @[SourceB.scala:65:17]
wire [1:0] b_bits_param; // @[SourceB.scala:65:17]
wire [31:0] b_bits_address; // @[SourceB.scala:65:17]
wire io_req_ready_0; // @[SourceB.scala:33:7]
wire [1:0] io_b_bits_param_0; // @[SourceB.scala:33:7]
wire [31:0] io_b_bits_address_0; // @[SourceB.scala:33:7]
wire io_b_valid_0; // @[SourceB.scala:33:7]
reg remain; // @[SourceB.scala:46:25]
wire busy = remain; // @[SourceB.scala:46:25, :51:23]
wire remain_set; // @[SourceB.scala:47:30]
wire remain_clr; // @[SourceB.scala:48:30]
wire _remain_T = remain | remain_set; // @[SourceB.scala:46:25, :47:30, :49:23]
wire _remain_T_1 = ~remain_clr; // @[SourceB.scala:48:30, :49:39]
wire _remain_T_2 = _remain_T & _remain_T_1; // @[SourceB.scala:49:{23,37,39}]
wire todo = busy ? remain : io_req_bits_clients_0; // @[SourceB.scala:33:7, :46:25, :51:23, :52:19]
wire _next_T = todo; // @[package.scala:254:17]
wire [1:0] _next_T_1 = {_next_T, 1'h0}; // @[package.scala:254:17]
wire [1:0] _next_T_2 = ~_next_T_1; // @[SourceB.scala:53:{16,31}]
wire [1:0] next = {1'h0, _next_T_2[0] & todo}; // @[SourceB.scala:52:19, :53:{16,37}] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_60( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [9:0] inflight; // @[Monitor.scala:614:27]
reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [39:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [9:0] inflight_1; // @[Monitor.scala:726:35]
reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_71( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [13:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [10:0] io_in_d_bits_source // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire a_first_done = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [1:0] size; // @[Monitor.scala:389:22]
reg [10:0] source; // @[Monitor.scala:390:22]
reg [13:0] address; // @[Monitor.scala:391:22]
reg d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] size_1; // @[Monitor.scala:540:22]
reg [10:0] source_1; // @[Monitor.scala:541:22]
reg [1039:0] inflight; // @[Monitor.scala:614:27]
reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33]
reg a_first_counter_1; // @[Edges.scala:229:27]
reg d_first_counter_1; // @[Edges.scala:229:27]
wire [2047:0] _GEN = {2037'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_0 = a_first_done & ~a_first_counter_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_1 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [2047:0] _GEN_2 = {2037'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [1039:0] inflight_1; // @[Monitor.scala:726:35]
reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg d_first_counter_2; // @[Edges.scala:229:27]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File EgressUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
class EgressUnit(coupleSAVA: Boolean, combineSAST: Boolean, inParams: Seq[ChannelParams], ingressParams: Seq[IngressChannelParams], cParam: EgressChannelParams)
(implicit p: Parameters) extends AbstractOutputUnit(inParams, ingressParams, cParam)(p) {
class EgressUnitIO extends AbstractOutputUnitIO(inParams, ingressParams, cParam) {
val out = Decoupled(new EgressFlit(cParam.payloadBits))
}
val io = IO(new EgressUnitIO)
val channel_empty = RegInit(true.B)
val flow = Reg(new FlowRoutingBundle)
val q = Module(new Queue(new EgressFlit(cParam.payloadBits), 3 - (if (combineSAST) 1 else 0), flow=true))
q.io.enq.valid := io.in(0).valid
q.io.enq.bits.head := io.in(0).bits.head
q.io.enq.bits.tail := io.in(0).bits.tail
val flows = cParam.possibleFlows.toSeq
if (flows.size == 0) {
q.io.enq.bits.ingress_id := 0.U(1.W)
} else {
q.io.enq.bits.ingress_id := Mux1H(
flows.map(f => (f.ingressNode.U === io.in(0).bits.flow.ingress_node &&
f.ingressNodeId.U === io.in(0).bits.flow.ingress_node_id)),
flows.map(f => f.ingressId.U(ingressIdBits.W))
)
}
q.io.enq.bits.payload := io.in(0).bits.payload
io.out <> q.io.deq
assert(!(q.io.enq.valid && !q.io.enq.ready))
io.credit_available(0) := q.io.count === 0.U
io.channel_status(0).occupied := !channel_empty
io.channel_status(0).flow := flow
when (io.credit_alloc(0).alloc && io.credit_alloc(0).tail) {
channel_empty := true.B
if (coupleSAVA) io.channel_status(0).occupied := false.B
}
when (io.allocs(0).alloc) {
channel_empty := false.B
flow := io.allocs(0).flow
}
}
| module EgressUnit_14( // @[EgressUnit.scala:12:7]
input clock, // @[EgressUnit.scala:12:7]
input reset, // @[EgressUnit.scala:12:7]
input io_in_0_valid, // @[EgressUnit.scala:18:14]
input io_in_0_bits_head, // @[EgressUnit.scala:18:14]
input io_in_0_bits_tail, // @[EgressUnit.scala:18:14]
input [72:0] io_in_0_bits_payload, // @[EgressUnit.scala:18:14]
input [3:0] io_in_0_bits_flow_ingress_node, // @[EgressUnit.scala:18:14]
input [1:0] io_in_0_bits_flow_ingress_node_id, // @[EgressUnit.scala:18:14]
output io_credit_available_0, // @[EgressUnit.scala:18:14]
output io_channel_status_0_occupied, // @[EgressUnit.scala:18:14]
input io_allocs_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_alloc, // @[EgressUnit.scala:18:14]
input io_credit_alloc_0_tail, // @[EgressUnit.scala:18:14]
input io_out_ready, // @[EgressUnit.scala:18:14]
output io_out_valid, // @[EgressUnit.scala:18:14]
output io_out_bits_head, // @[EgressUnit.scala:18:14]
output io_out_bits_tail, // @[EgressUnit.scala:18:14]
output [72:0] io_out_bits_payload // @[EgressUnit.scala:18:14]
);
wire _q_io_enq_ready; // @[EgressUnit.scala:22:17]
wire [1:0] _q_io_count; // @[EgressUnit.scala:22:17]
reg channel_empty; // @[EgressUnit.scala:20:30]
wire _q_io_enq_bits_ingress_id_T_22 = io_in_0_bits_flow_ingress_node_id == 2'h1; // @[EgressUnit.scala:32:27] |
Generate the Verilog code corresponding to the following Chisel files.
File SourceA.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.tilelink._
class SourceARequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params)
{
val tag = UInt(params.tagBits.W)
val set = UInt(params.setBits.W)
val param = UInt(3.W)
val source = UInt(params.outer.bundle.sourceBits.W)
val block = Bool()
}
class SourceA(params: InclusiveCacheParameters) extends Module
{
val io = IO(new Bundle {
val req = Flipped(Decoupled(new SourceARequest(params)))
val a = Decoupled(new TLBundleA(params.outer.bundle))
})
// ready must be a register, because we derive valid from ready
require (!params.micro.outerBuf.a.pipe && params.micro.outerBuf.a.isDefined)
val a = Wire(chiselTypeOf(io.a))
io.a <> params.micro.outerBuf.a(a)
io.req.ready := a.ready
a.valid := io.req.valid
params.ccover(a.valid && !a.ready, "SOURCEA_STALL", "Backpressured when issuing an Acquire")
a.bits.opcode := Mux(io.req.bits.block, TLMessages.AcquireBlock, TLMessages.AcquirePerm)
a.bits.param := io.req.bits.param
a.bits.size := params.offsetBits.U
a.bits.source := io.req.bits.source
a.bits.address := params.expandAddress(io.req.bits.tag, io.req.bits.set, 0.U)
a.bits.mask := ~0.U(params.outer.manager.beatBytes.W)
a.bits.data := 0.U
a.bits.corrupt := false.B
}
File Parameters.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.tilelink._
import freechips.rocketchip.util._
import freechips.rocketchip.util.property.cover
import scala.math.{min,max}
case class CacheParameters(
level: Int,
ways: Int,
sets: Int,
blockBytes: Int,
beatBytes: Int, // inner
hintsSkipProbe: Boolean)
{
require (ways > 0)
require (sets > 0)
require (blockBytes > 0 && isPow2(blockBytes))
require (beatBytes > 0 && isPow2(beatBytes))
require (blockBytes >= beatBytes)
val blocks = ways * sets
val sizeBytes = blocks * blockBytes
val blockBeats = blockBytes/beatBytes
}
case class InclusiveCachePortParameters(
a: BufferParams,
b: BufferParams,
c: BufferParams,
d: BufferParams,
e: BufferParams)
{
def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e))
}
object InclusiveCachePortParameters
{
val none = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.none)
val full = InclusiveCachePortParameters(
a = BufferParams.default,
b = BufferParams.default,
c = BufferParams.default,
d = BufferParams.default,
e = BufferParams.default)
// This removes feed-through paths from C=>A and A=>C
val fullC = InclusiveCachePortParameters(
a = BufferParams.none,
b = BufferParams.none,
c = BufferParams.default,
d = BufferParams.none,
e = BufferParams.none)
val flowAD = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.flow,
e = BufferParams.none)
val flowAE = InclusiveCachePortParameters(
a = BufferParams.flow,
b = BufferParams.none,
c = BufferParams.none,
d = BufferParams.none,
e = BufferParams.flow)
// For innerBuf:
// SinkA: no restrictions, flows into scheduler+putbuffer
// SourceB: no restrictions, flows out of scheduler
// sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore
// SourceD: no restrictions, flows out of bankedStore/regout
// SinkE: no restrictions, flows into scheduler
//
// ... so while none is possible, you probably want at least flowAC to cut ready
// from the scheduler delay and flowD to ease SourceD back-pressure
// For outerBufer:
// SourceA: must not be pipe, flows out of scheduler
// SinkB: no restrictions, flows into scheduler
// SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored
// SinkD: no restrictions, flows into scheduler & bankedStore
// SourceE: must not be pipe, flows out of scheduler
//
// ... AE take the channel ready into the scheduler, so you need at least flowAE
}
case class InclusiveCacheMicroParameters(
writeBytes: Int, // backing store update granularity
memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz)
portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes
dirReg: Boolean = false,
innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none
outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE
{
require (writeBytes > 0 && isPow2(writeBytes))
require (memCycles > 0)
require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant
}
case class InclusiveCacheControlParameters(
address: BigInt,
beatBytes: Int,
bankedControl: Boolean)
case class InclusiveCacheParameters(
cache: CacheParameters,
micro: InclusiveCacheMicroParameters,
control: Boolean,
inner: TLEdgeIn,
outer: TLEdgeOut)(implicit val p: Parameters)
{
require (cache.ways > 1)
require (cache.sets > 1 && isPow2(cache.sets))
require (micro.writeBytes <= inner.manager.beatBytes)
require (micro.writeBytes <= outer.manager.beatBytes)
require (inner.manager.beatBytes <= cache.blockBytes)
require (outer.manager.beatBytes <= cache.blockBytes)
// Require that all cached address ranges have contiguous blocks
outer.manager.managers.flatMap(_.address).foreach { a =>
require (a.alignment >= cache.blockBytes)
}
// If we are the first level cache, we do not need to support inner-BCE
val firstLevel = !inner.client.clients.exists(_.supports.probe)
// If we are the last level cache, we do not need to support outer-B
val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED)
require (lastLevel)
// Provision enough resources to achieve full throughput with missing single-beat accesses
val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro)
val secondary = max(mshrs, micro.memCycles - mshrs)
val putLists = micro.memCycles // allow every request to be single beat
val putBeats = max(2*cache.blockBeats, micro.memCycles)
val relLists = 2
val relBeats = relLists*cache.blockBeats
val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address))
val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_))
def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] =
if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail)
val addressMapping = bitOffsets(pickMask)
val addressBits = addressMapping.size
// println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}")
val allClients = inner.client.clients.size
val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size
val clientBits = max(1, clientBitsRaw)
val stateBits = 2
val wayBits = log2Ceil(cache.ways)
val setBits = log2Ceil(cache.sets)
val offsetBits = log2Ceil(cache.blockBytes)
val tagBits = addressBits - setBits - offsetBits
val putBits = log2Ceil(max(putLists, relLists))
require (tagBits > 0)
require (offsetBits > 0)
val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1
val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1
val innerMaskBits = inner.manager.beatBytes / micro.writeBytes
val outerMaskBits = outer.manager.beatBytes / micro.writeBytes
def clientBit(source: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse)
}
}
def clientSource(bit: UInt): UInt = {
if (clientBitsRaw == 0) {
0.U
} else {
Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U))
}
}
def parseAddress(x: UInt): (UInt, UInt, UInt) = {
val offset = Cat(addressMapping.map(o => x(o,o)).reverse)
val set = offset >> offsetBits
val tag = set >> setBits
(tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0))
}
def widen(x: UInt, width: Int): UInt = {
val y = x | 0.U(width.W)
assert (y >> width === 0.U)
y(width-1, 0)
}
def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = {
val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits))
val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) }
addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) }
Cat(bits.reverse)
}
def restoreAddress(expanded: UInt): UInt = {
val missingBits = flatAddresses
.map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match
.groupBy(_._1)
.view
.mapValues(_.map(_._2))
val muxMask = AddressDecoder(missingBits.values.toList)
val mux = missingBits.toList.map { case (bits, addrs) =>
val widen = addrs.map(_.widen(~muxMask))
val matches = AddressSet
.unify(widen.distinct)
.map(_.contains(expanded))
.reduce(_ || _)
(matches, bits.U)
}
expanded | Mux1H(mux)
}
def dirReg[T <: Data](x: T, en: Bool = true.B): T = {
if (micro.dirReg) RegEnable(x, en) else x
}
def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) =
cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc)
}
object MetaData
{
val stateBits = 2
def INVALID: UInt = 0.U(stateBits.W) // way is empty
def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk
def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk
def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch
// Does a request need trunk?
def needT(opcode: UInt, param: UInt): Bool = {
!opcode(2) ||
(opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) ||
((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB)
}
// Does a request prove the client need not be probed?
def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = {
// Acquire(toB) and Get => is N, so no probe
// Acquire(*toT) => is N or B, but need T, so no probe
// Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client
// Put* => is N or B, so probe IS needed
opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B)
}
def isToN(param: UInt): Bool = {
param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN
}
def isToB(param: UInt): Bool = {
param === TLPermissions.TtoB || param === TLPermissions.BtoB
}
}
object InclusiveCacheParameters
{
val lfsrBits = 10
val L2ControlAddress = 0x2010000
val L2ControlSize = 0x1000
def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = {
// We need 2-3 normal MSHRs to cover the Directory latency
// To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs
max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats)
}
def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int =
// We need a dedicated MSHR for B+C each
2 + out_mshrs(cache, micro)
}
class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
| module SourceA_7( // @[SourceA.scala:33:7]
input clock, // @[SourceA.scala:33:7]
input reset, // @[SourceA.scala:33:7]
output io_req_ready, // @[SourceA.scala:35:14]
input io_req_valid, // @[SourceA.scala:35:14]
input [8:0] io_req_bits_tag, // @[SourceA.scala:35:14]
input [10:0] io_req_bits_set, // @[SourceA.scala:35:14]
input [2:0] io_req_bits_param, // @[SourceA.scala:35:14]
input [3:0] io_req_bits_source, // @[SourceA.scala:35:14]
input io_req_bits_block, // @[SourceA.scala:35:14]
input io_a_ready, // @[SourceA.scala:35:14]
output io_a_valid, // @[SourceA.scala:35:14]
output [2:0] io_a_bits_opcode, // @[SourceA.scala:35:14]
output [2:0] io_a_bits_param, // @[SourceA.scala:35:14]
output [2:0] io_a_bits_size, // @[SourceA.scala:35:14]
output [3:0] io_a_bits_source, // @[SourceA.scala:35:14]
output [31:0] io_a_bits_address, // @[SourceA.scala:35:14]
output [7:0] io_a_bits_mask, // @[SourceA.scala:35:14]
output [63:0] io_a_bits_data, // @[SourceA.scala:35:14]
output io_a_bits_corrupt // @[SourceA.scala:35:14]
);
wire io_req_valid_0 = io_req_valid; // @[SourceA.scala:33:7]
wire [8:0] io_req_bits_tag_0 = io_req_bits_tag; // @[SourceA.scala:33:7]
wire [10:0] io_req_bits_set_0 = io_req_bits_set; // @[SourceA.scala:33:7]
wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[SourceA.scala:33:7]
wire [3:0] io_req_bits_source_0 = io_req_bits_source; // @[SourceA.scala:33:7]
wire io_req_bits_block_0 = io_req_bits_block; // @[SourceA.scala:33:7]
wire io_a_ready_0 = io_a_ready; // @[SourceA.scala:33:7]
wire _a_bits_address_base_T_2 = reset; // @[Parameters.scala:222:12]
wire _a_bits_address_base_T_8 = reset; // @[Parameters.scala:222:12]
wire _a_bits_address_base_T_14 = reset; // @[Parameters.scala:222:12]
wire [2:0] a_bits_size = 3'h6; // @[SourceA.scala:43:15]
wire [63:0] a_bits_data = 64'h0; // @[SourceA.scala:43:15]
wire a_bits_corrupt = 1'h0; // @[SourceA.scala:43:15]
wire _a_bits_address_base_T = 1'h0; // @[Parameters.scala:222:15]
wire _a_bits_address_base_T_4 = 1'h0; // @[Parameters.scala:222:12]
wire _a_bits_address_base_T_6 = 1'h0; // @[Parameters.scala:222:15]
wire _a_bits_address_base_T_10 = 1'h0; // @[Parameters.scala:222:12]
wire _a_bits_address_base_T_12 = 1'h0; // @[Parameters.scala:222:15]
wire _a_bits_address_base_T_16 = 1'h0; // @[Parameters.scala:222:12]
wire _a_bits_address_base_T_1 = 1'h1; // @[Parameters.scala:222:24]
wire _a_bits_address_base_T_7 = 1'h1; // @[Parameters.scala:222:24]
wire _a_bits_address_base_T_13 = 1'h1; // @[Parameters.scala:222:24]
wire [5:0] a_bits_address_base_y_2 = 6'h0; // @[Parameters.scala:221:15]
wire [5:0] _a_bits_address_base_T_17 = 6'h0; // @[Parameters.scala:223:6]
wire [1:0] a_bits_address_lo_lo_hi_hi = 2'h0; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_hi_hi_hi_lo = 2'h0; // @[Parameters.scala:230:8]
wire a_ready; // @[SourceA.scala:43:15]
wire [7:0] a_bits_mask = 8'hFF; // @[SourceA.scala:43:15]
wire [7:0] _a_bits_mask_T = 8'hFF; // @[SourceA.scala:55:21]
wire a_valid = io_req_valid_0; // @[SourceA.scala:33:7, :43:15]
wire [8:0] a_bits_address_base_y = io_req_bits_tag_0; // @[SourceA.scala:33:7]
wire [10:0] a_bits_address_base_y_1 = io_req_bits_set_0; // @[SourceA.scala:33:7]
wire [2:0] a_bits_param = io_req_bits_param_0; // @[SourceA.scala:33:7, :43:15]
wire [3:0] a_bits_source = io_req_bits_source_0; // @[SourceA.scala:33:7, :43:15]
wire io_req_ready_0; // @[SourceA.scala:33:7]
wire [2:0] io_a_bits_opcode_0; // @[SourceA.scala:33:7]
wire [2:0] io_a_bits_param_0; // @[SourceA.scala:33:7]
wire [2:0] io_a_bits_size_0; // @[SourceA.scala:33:7]
wire [3:0] io_a_bits_source_0; // @[SourceA.scala:33:7]
wire [31:0] io_a_bits_address_0; // @[SourceA.scala:33:7]
wire [7:0] io_a_bits_mask_0; // @[SourceA.scala:33:7]
wire [63:0] io_a_bits_data_0; // @[SourceA.scala:33:7]
wire io_a_bits_corrupt_0; // @[SourceA.scala:33:7]
wire io_a_valid_0; // @[SourceA.scala:33:7]
assign io_req_ready_0 = a_ready; // @[SourceA.scala:33:7, :43:15]
wire [2:0] _a_bits_opcode_T; // @[SourceA.scala:50:24]
wire [31:0] _a_bits_address_T_26; // @[Parameters.scala:230:8]
wire [2:0] a_bits_opcode; // @[SourceA.scala:43:15]
wire [31:0] a_bits_address; // @[SourceA.scala:43:15]
assign _a_bits_opcode_T = {2'h3, ~io_req_bits_block_0}; // @[SourceA.scala:33:7, :50:24]
assign a_bits_opcode = _a_bits_opcode_T; // @[SourceA.scala:43:15, :50:24]
wire [8:0] _a_bits_address_base_T_5 = a_bits_address_base_y; // @[Parameters.scala:221:15, :223:6]
wire _a_bits_address_base_T_3 = ~_a_bits_address_base_T_2; // @[Parameters.scala:222:12]
wire [10:0] _a_bits_address_base_T_11 = a_bits_address_base_y_1; // @[Parameters.scala:221:15, :223:6]
wire _a_bits_address_base_T_9 = ~_a_bits_address_base_T_8; // @[Parameters.scala:222:12]
wire _a_bits_address_base_T_15 = ~_a_bits_address_base_T_14; // @[Parameters.scala:222:12]
wire [19:0] a_bits_address_base_hi = {_a_bits_address_base_T_5, _a_bits_address_base_T_11}; // @[Parameters.scala:223:6, :227:19]
wire [25:0] a_bits_address_base = {a_bits_address_base_hi, 6'h0}; // @[Parameters.scala:227:19]
wire _a_bits_address_T = a_bits_address_base[0]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_1 = a_bits_address_base[1]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_2 = a_bits_address_base[2]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_3 = a_bits_address_base[3]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_4 = a_bits_address_base[4]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_5 = a_bits_address_base[5]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_6 = a_bits_address_base[6]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_7 = a_bits_address_base[7]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_8 = a_bits_address_base[8]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_9 = a_bits_address_base[9]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_10 = a_bits_address_base[10]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_11 = a_bits_address_base[11]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_12 = a_bits_address_base[12]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_13 = a_bits_address_base[13]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_14 = a_bits_address_base[14]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_15 = a_bits_address_base[15]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_16 = a_bits_address_base[16]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_17 = a_bits_address_base[17]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_18 = a_bits_address_base[18]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_19 = a_bits_address_base[19]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_20 = a_bits_address_base[20]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_21 = a_bits_address_base[21]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_22 = a_bits_address_base[22]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_23 = a_bits_address_base[23]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_24 = a_bits_address_base[24]; // @[Parameters.scala:227:19, :229:72]
wire _a_bits_address_T_25 = a_bits_address_base[25]; // @[Parameters.scala:227:19, :229:72]
wire [1:0] a_bits_address_lo_lo_lo_lo = {_a_bits_address_T_1, _a_bits_address_T}; // @[Parameters.scala:229:72, :230:8]
wire [1:0] a_bits_address_lo_lo_lo_hi = {_a_bits_address_T_3, _a_bits_address_T_2}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_lo_lo_lo = {a_bits_address_lo_lo_lo_hi, a_bits_address_lo_lo_lo_lo}; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_lo_lo_hi_lo = {_a_bits_address_T_5, _a_bits_address_T_4}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_lo_lo_hi = {2'h0, a_bits_address_lo_lo_hi_lo}; // @[Parameters.scala:230:8]
wire [7:0] a_bits_address_lo_lo = {a_bits_address_lo_lo_hi, a_bits_address_lo_lo_lo}; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_lo_hi_lo_lo = {_a_bits_address_T_6, 1'h0}; // @[Parameters.scala:229:72, :230:8]
wire [1:0] a_bits_address_lo_hi_lo_hi = {_a_bits_address_T_8, _a_bits_address_T_7}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_lo_hi_lo = {a_bits_address_lo_hi_lo_hi, a_bits_address_lo_hi_lo_lo}; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_lo_hi_hi_lo = {_a_bits_address_T_10, _a_bits_address_T_9}; // @[Parameters.scala:229:72, :230:8]
wire [1:0] a_bits_address_lo_hi_hi_hi = {_a_bits_address_T_12, _a_bits_address_T_11}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_lo_hi_hi = {a_bits_address_lo_hi_hi_hi, a_bits_address_lo_hi_hi_lo}; // @[Parameters.scala:230:8]
wire [7:0] a_bits_address_lo_hi = {a_bits_address_lo_hi_hi, a_bits_address_lo_hi_lo}; // @[Parameters.scala:230:8]
wire [15:0] a_bits_address_lo = {a_bits_address_lo_hi, a_bits_address_lo_lo}; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_hi_lo_lo_lo = {_a_bits_address_T_14, _a_bits_address_T_13}; // @[Parameters.scala:229:72, :230:8]
wire [1:0] a_bits_address_hi_lo_lo_hi = {_a_bits_address_T_16, _a_bits_address_T_15}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_hi_lo_lo = {a_bits_address_hi_lo_lo_hi, a_bits_address_hi_lo_lo_lo}; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_hi_lo_hi_lo = {_a_bits_address_T_18, _a_bits_address_T_17}; // @[Parameters.scala:229:72, :230:8]
wire [1:0] a_bits_address_hi_lo_hi_hi = {_a_bits_address_T_20, _a_bits_address_T_19}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_hi_lo_hi = {a_bits_address_hi_lo_hi_hi, a_bits_address_hi_lo_hi_lo}; // @[Parameters.scala:230:8]
wire [7:0] a_bits_address_hi_lo = {a_bits_address_hi_lo_hi, a_bits_address_hi_lo_lo}; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_hi_hi_lo_lo = {_a_bits_address_T_22, _a_bits_address_T_21}; // @[Parameters.scala:229:72, :230:8]
wire [1:0] a_bits_address_hi_hi_lo_hi = {_a_bits_address_T_24, _a_bits_address_T_23}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_hi_hi_lo = {a_bits_address_hi_hi_lo_hi, a_bits_address_hi_hi_lo_lo}; // @[Parameters.scala:230:8]
wire [1:0] a_bits_address_hi_hi_hi_hi = {_a_bits_address_T_25, 1'h0}; // @[Parameters.scala:229:72, :230:8]
wire [3:0] a_bits_address_hi_hi_hi = {a_bits_address_hi_hi_hi_hi, 2'h0}; // @[Parameters.scala:230:8]
wire [7:0] a_bits_address_hi_hi = {a_bits_address_hi_hi_hi, a_bits_address_hi_hi_lo}; // @[Parameters.scala:230:8]
wire [15:0] a_bits_address_hi = {a_bits_address_hi_hi, a_bits_address_hi_lo}; // @[Parameters.scala:230:8]
assign _a_bits_address_T_26 = {a_bits_address_hi, a_bits_address_lo}; // @[Parameters.scala:230:8]
assign a_bits_address = _a_bits_address_T_26; // @[SourceA.scala:43:15]
Queue2_TLBundleA_a32d64s4k3z3c_7 io_a_q ( // @[Decoupled.scala:362:21]
.clock (clock),
.reset (reset),
.io_enq_ready (a_ready),
.io_enq_valid (a_valid), // @[SourceA.scala:43:15]
.io_enq_bits_opcode (a_bits_opcode), // @[SourceA.scala:43:15]
.io_enq_bits_param (a_bits_param), // @[SourceA.scala:43:15]
.io_enq_bits_source (a_bits_source), // @[SourceA.scala:43:15]
.io_enq_bits_address (a_bits_address), // @[SourceA.scala:43:15]
.io_deq_ready (io_a_ready_0), // @[SourceA.scala:33:7]
.io_deq_valid (io_a_valid_0),
.io_deq_bits_opcode (io_a_bits_opcode_0),
.io_deq_bits_param (io_a_bits_param_0),
.io_deq_bits_size (io_a_bits_size_0),
.io_deq_bits_source (io_a_bits_source_0),
.io_deq_bits_address (io_a_bits_address_0),
.io_deq_bits_mask (io_a_bits_mask_0),
.io_deq_bits_data (io_a_bits_data_0),
.io_deq_bits_corrupt (io_a_bits_corrupt_0)
); // @[Decoupled.scala:362:21]
assign io_req_ready = io_req_ready_0; // @[SourceA.scala:33:7]
assign io_a_valid = io_a_valid_0; // @[SourceA.scala:33:7]
assign io_a_bits_opcode = io_a_bits_opcode_0; // @[SourceA.scala:33:7]
assign io_a_bits_param = io_a_bits_param_0; // @[SourceA.scala:33:7]
assign io_a_bits_size = io_a_bits_size_0; // @[SourceA.scala:33:7]
assign io_a_bits_source = io_a_bits_source_0; // @[SourceA.scala:33:7]
assign io_a_bits_address = io_a_bits_address_0; // @[SourceA.scala:33:7]
assign io_a_bits_mask = io_a_bits_mask_0; // @[SourceA.scala:33:7]
assign io_a_bits_data = io_a_bits_data_0; // @[SourceA.scala:33:7]
assign io_a_bits_corrupt = io_a_bits_corrupt_0; // @[SourceA.scala:33:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File TilelinkAdapters.scala:
package constellation.protocol
import chisel3._
import chisel3.util._
import constellation.channel._
import constellation.noc._
import constellation.soc.{CanAttachToGlobalNoC}
import org.chipsalliance.cde.config._
import freechips.rocketchip.diplomacy._
import freechips.rocketchip.util._
import freechips.rocketchip.tilelink._
import scala.collection.immutable.{ListMap}
abstract class TLChannelToNoC[T <: TLChannel](gen: => T, edge: TLEdge, idToEgress: Int => Int)(implicit val p: Parameters) extends Module with TLFieldHelper {
val flitWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Flipped(Decoupled(gen))
val flit = Decoupled(new IngressFlit(flitWidth))
})
def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B
// convert decoupled to irrevocable
val q = Module(new Queue(gen, 1, pipe=true, flow=true))
val protocol = q.io.deq
val has_body = Wire(Bool())
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val head = edge.first(protocol.bits, protocol.fire)
val tail = edge.last(protocol.bits, protocol.fire)
def requestOH: Seq[Bool]
val body = Cat( body_fields.filter(_.getWidth > 0).map(_.asUInt))
val const = Cat(const_fields.filter(_.getWidth > 0).map(_.asUInt))
val is_body = RegInit(false.B)
io.flit.valid := protocol.valid
protocol.ready := io.flit.ready && (is_body || !has_body)
io.flit.bits.head := head && !is_body
io.flit.bits.tail := tail && (is_body || !has_body)
io.flit.bits.egress_id := Mux1H(requestOH.zipWithIndex.map { case (r, i) =>
r -> idToEgress(i).U
})
io.flit.bits.payload := Mux(is_body, body, const)
when (io.flit.fire && io.flit.bits.head) { is_body := true.B }
when (io.flit.fire && io.flit.bits.tail) { is_body := false.B }
}
abstract class TLChannelFromNoC[T <: TLChannel](gen: => T)(implicit val p: Parameters) extends Module with TLFieldHelper {
val flitWidth = minTLPayloadWidth(gen)
val io = IO(new Bundle {
val protocol = Decoupled(gen)
val flit = Flipped(Decoupled(new EgressFlit(flitWidth)))
})
// Handle size = 1 gracefully (Chisel3 empty range is broken)
def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0)
val protocol = Wire(Decoupled(gen))
val body_fields = getBodyFields(protocol.bits)
val const_fields = getConstFields(protocol.bits)
val is_const = RegInit(true.B)
val const_reg = Reg(UInt(const_fields.map(_.getWidth).sum.W))
val const = Mux(io.flit.bits.head, io.flit.bits.payload, const_reg)
io.flit.ready := (is_const && !io.flit.bits.tail) || protocol.ready
protocol.valid := (!is_const || io.flit.bits.tail) && io.flit.valid
def assign(i: UInt, sigs: Seq[Data]) = {
var t = i
for (s <- sigs.reverse) {
s := t.asTypeOf(s.cloneType)
t = t >> s.getWidth
}
}
assign(const, const_fields)
assign(io.flit.bits.payload, body_fields)
when (io.flit.fire && io.flit.bits.head) { is_const := false.B; const_reg := io.flit.bits.payload }
when (io.flit.fire && io.flit.bits.tail) { is_const := true.B }
}
trait HasAddressDecoder {
// Filter a list to only those elements selected
def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1)
val edgeIn: TLEdge
val edgesOut: Seq[TLEdge]
lazy val reacheableIO = edgesOut.map { mp =>
edgeIn.client.clients.exists { c => mp.manager.managers.exists { m =>
c.visibility.exists { ca => m.address.exists { ma =>
ca.overlaps(ma)
}}
}}
}.toVector
lazy val releaseIO = (edgesOut zip reacheableIO).map { case (mp, reachable) =>
reachable && edgeIn.client.anySupportProbe && mp.manager.anySupportAcquireB
}.toVector
def outputPortFn(connectIO: Seq[Boolean]) = {
val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address))
val routingMask = AddressDecoder(filter(port_addrs, connectIO))
val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct))
route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_||_))
}
}
class TLAToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToAEgress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleA(bundle), edgeIn, slaveToAEgress)(p) with HasAddressDecoder {
has_body := edgeIn.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
lazy val connectAIO = reacheableIO
lazy val requestOH = outputPortFn(connectAIO).zipWithIndex.map { case (o, j) =>
connectAIO(j).B && (unique(connectAIO) || o(protocol.bits.address))
}
q.io.enq <> io.protocol
q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U
}
class TLAFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleA(bundle))(p) {
io.protocol <> protocol
when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLBToNoC(
edgeOut: TLEdge,
edgesIn: Seq[TLEdge],
bundle: TLBundleParameters,
masterToBIngress: Int => Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleB(bundle), edgeOut, masterToBIngress)(p) {
has_body := edgeOut.hasData(protocol.bits) || (~protocol.bits.mask =/= 0.U)
lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) }
q.io.enq <> io.protocol
}
class TLBFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleB(bundle))(p) {
io.protocol <> protocol
io.protocol.bits.source := trim(protocol.bits.source, sourceSize)
when (io.flit.bits.head) { io.protocol.bits.mask := ~(0.U(io.protocol.bits.mask.getWidth.W)) }
}
class TLCToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToCEgress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleC(bundle), edgeIn, slaveToCEgress)(p) with HasAddressDecoder {
has_body := edgeIn.hasData(protocol.bits)
lazy val connectCIO = releaseIO
lazy val requestOH = outputPortFn(connectCIO).zipWithIndex.map {
case (o, j) => connectCIO(j).B && (unique(connectCIO) || o(protocol.bits.address))
}
q.io.enq <> io.protocol
q.io.enq.bits.source := io.protocol.bits.source | sourceStart.U
}
class TLCFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleC(bundle))(p) {
io.protocol <> protocol
}
class TLDToNoC(
edgeOut: TLEdge,
edgesIn: Seq[TLEdge],
bundle: TLBundleParameters,
masterToDIngress: Int => Int,
sourceStart: Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleD(bundle), edgeOut, masterToDIngress)(p) {
has_body := edgeOut.hasData(protocol.bits)
lazy val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client))
lazy val requestOH = inputIdRanges.map { i => i.contains(protocol.bits.source) }
q.io.enq <> io.protocol
q.io.enq.bits.sink := io.protocol.bits.sink | sourceStart.U
}
class TLDFromNoC(edgeIn: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleD(bundle))(p)
{
io.protocol <> protocol
io.protocol.bits.source := trim(protocol.bits.source, sourceSize)
}
class TLEToNoC(
val edgeIn: TLEdge,
val edgesOut: Seq[TLEdge],
bundle: TLBundleParameters,
slaveToEEgress: Int => Int
)(implicit p: Parameters) extends TLChannelToNoC(new TLBundleE(bundle), edgeIn, slaveToEEgress)(p) {
has_body := edgeIn.hasData(protocol.bits)
lazy val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager))
lazy val requestOH = outputIdRanges.map { o => o.contains(protocol.bits.sink) }
q.io.enq <> io.protocol
}
class TLEFromNoC(edgeOut: TLEdge, bundle: TLBundleParameters, sourceSize: Int)(implicit p: Parameters) extends TLChannelFromNoC(new TLBundleE(bundle))(p) {
io.protocol <> protocol
io.protocol.bits.sink := trim(protocol.bits.sink, sourceSize)
}
| module TLEFromNoC_5( // @[TilelinkAdapters.scala:202:7]
input clock, // @[TilelinkAdapters.scala:202:7]
input reset, // @[TilelinkAdapters.scala:202:7]
output io_flit_ready, // @[TilelinkAdapters.scala:56:14]
input io_flit_valid, // @[TilelinkAdapters.scala:56:14]
input io_flit_bits_head, // @[TilelinkAdapters.scala:56:14]
input io_flit_bits_tail // @[TilelinkAdapters.scala:56:14]
);
reg is_const; // @[TilelinkAdapters.scala:68:25]
wire io_flit_ready_0 = is_const & ~io_flit_bits_tail; // @[TilelinkAdapters.scala:68:25, :71:{30,33}]
wire _GEN = io_flit_ready_0 & io_flit_valid; // @[Decoupled.scala:51:35]
always @(posedge clock) begin // @[TilelinkAdapters.scala:202:7]
if (reset) // @[TilelinkAdapters.scala:202:7]
is_const <= 1'h1; // @[TilelinkAdapters.scala:68:25, :202:7]
else // @[TilelinkAdapters.scala:202:7]
is_const <= _GEN & io_flit_bits_tail | ~(_GEN & io_flit_bits_head) & is_const; // @[Decoupled.scala:51:35]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File ListBuffer.scala:
/*
* Copyright 2019 SiFive, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You should have received a copy of LICENSE.Apache2 along with
* this software. If not, you may obtain a copy at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sifive.blocks.inclusivecache
import chisel3._
import chisel3.util._
import freechips.rocketchip.util._
case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean)
{
val queueBits = log2Up(queues)
val entryBits = log2Up(entries)
}
class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle
{
val index = UInt(params.queueBits.W)
val data = Output(params.gen)
}
class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module
{
override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}"
val io = IO(new Bundle {
// push is visible on the same cycle; flow queues
val push = Flipped(Decoupled(new ListBufferPush(params)))
val valid = UInt(params.queues.W)
val pop = Flipped(Valid(UInt(params.queueBits.W)))
val data = Output(params.gen)
})
val valid = RegInit(0.U(params.queues.W))
val head = Mem(params.queues, UInt(params.entryBits.W))
val tail = Mem(params.queues, UInt(params.entryBits.W))
val used = RegInit(0.U(params.entries.W))
val next = Mem(params.entries, UInt(params.entryBits.W))
val data = Mem(params.entries, params.gen)
val freeOH = ~(leftOR(~used) << 1) & ~used
val freeIdx = OHToUInt(freeOH)
val valid_set = WireDefault(0.U(params.queues.W))
val valid_clr = WireDefault(0.U(params.queues.W))
val used_set = WireDefault(0.U(params.entries.W))
val used_clr = WireDefault(0.U(params.entries.W))
val push_tail = tail.read(io.push.bits.index)
val push_valid = valid(io.push.bits.index)
io.push.ready := !used.andR
when (io.push.fire) {
valid_set := UIntToOH(io.push.bits.index, params.queues)
used_set := freeOH
data.write(freeIdx, io.push.bits.data)
when (push_valid) {
next.write(push_tail, freeIdx)
} .otherwise {
head.write(io.push.bits.index, freeIdx)
}
tail.write(io.push.bits.index, freeIdx)
}
val pop_head = head.read(io.pop.bits)
val pop_valid = valid(io.pop.bits)
// Bypass push data to the peek port
io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head)))
io.valid := (if (!params.bypass) valid else (valid | valid_set))
// It is an error to pop something that is not valid
assert (!io.pop.fire || (io.valid)(io.pop.bits))
when (io.pop.fire) {
used_clr := UIntToOH(pop_head, params.entries)
when (pop_head === tail.read(io.pop.bits)) {
valid_clr := UIntToOH(io.pop.bits, params.queues)
}
head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head)))
}
// Empty bypass changes no state
when ((!params.bypass).B || !io.pop.valid || pop_valid) {
used := (used & ~used_clr) | used_set
valid := (valid & ~valid_clr) | valid_set
}
}
| module tail_21x6( // @[ListBuffer.scala:49:18]
input [4:0] R0_addr,
input R0_en,
input R0_clk,
output [5:0] R0_data,
input [4:0] R1_addr,
input R1_en,
input R1_clk,
output [5:0] R1_data,
input [4:0] W0_addr,
input W0_en,
input W0_clk,
input [5:0] W0_data
);
reg [5:0] Memory[0:20]; // @[ListBuffer.scala:49:18]
always @(posedge W0_clk) begin // @[ListBuffer.scala:49:18]
if (W0_en & 1'h1) // @[ListBuffer.scala:49:18]
Memory[W0_addr] <= W0_data; // @[ListBuffer.scala:49:18]
always @(posedge)
assign R0_data = R0_en ? Memory[R0_addr] : 6'bx; // @[ListBuffer.scala:49:18]
assign R1_data = R1_en ? Memory[R1_addr] : 6'bx; // @[ListBuffer.scala:49:18]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Arithmetic.scala:
// A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own:
// implicit MyTypeArithmetic extends Arithmetic[MyType] { ... }
package gemmini
import chisel3._
import chisel3.util._
import hardfloat._
// Bundles that represent the raw bits of custom datatypes
case class Float(expWidth: Int, sigWidth: Int) extends Bundle {
val bits = UInt((expWidth + sigWidth).W)
val bias: Int = (1 << (expWidth-1)) - 1
}
case class DummySInt(w: Int) extends Bundle {
val bits = UInt(w.W)
def dontCare: DummySInt = {
val o = Wire(new DummySInt(w))
o.bits := 0.U
o
}
}
// The Arithmetic typeclass which implements various arithmetic operations on custom datatypes
abstract class Arithmetic[T <: Data] {
implicit def cast(t: T): ArithmeticOps[T]
}
abstract class ArithmeticOps[T <: Data](self: T) {
def *(t: T): T
def mac(m1: T, m2: T): T // Returns (m1 * m2 + self)
def +(t: T): T
def -(t: T): T
def >>(u: UInt): T // This is a rounding shift! Rounds away from 0
def >(t: T): Bool
def identity: T
def withWidthOf(t: T): T
def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates
def relu: T
def zero: T
def minimum: T
// Optional parameters, which only need to be defined if you want to enable various optimizations for transformers
def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None
def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None
def mult_with_reciprocal[U <: Data](reciprocal: U) = self
}
object Arithmetic {
implicit object UIntArithmetic extends Arithmetic[UInt] {
override implicit def cast(self: UInt) = new ArithmeticOps(self) {
override def *(t: UInt) = self * t
override def mac(m1: UInt, m2: UInt) = m1 * m2 + self
override def +(t: UInt) = self + t
override def -(t: UInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = point_five & (zeros | ones_digit)
(self >> u).asUInt + r
}
override def >(t: UInt): Bool = self > t
override def withWidthOf(t: UInt) = self.asTypeOf(t)
override def clippedToWidthOf(t: UInt) = {
val sat = ((1 << (t.getWidth-1))-1).U
Mux(self > sat, sat, self)(t.getWidth-1, 0)
}
override def relu: UInt = self
override def zero: UInt = 0.U
override def identity: UInt = 1.U
override def minimum: UInt = 0.U
}
}
implicit object SIntArithmetic extends Arithmetic[SInt] {
override implicit def cast(self: SInt) = new ArithmeticOps(self) {
override def *(t: SInt) = self * t
override def mac(m1: SInt, m2: SInt) = m1 * m2 + self
override def +(t: SInt) = self + t
override def -(t: SInt) = self - t
override def >>(u: UInt) = {
// The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm
// TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here?
val point_five = Mux(u === 0.U, 0.U, self(u - 1.U))
val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U
val ones_digit = self(u)
val r = (point_five & (zeros | ones_digit)).asBool
(self >> u).asSInt + Mux(r, 1.S, 0.S)
}
override def >(t: SInt): Bool = self > t
override def withWidthOf(t: SInt) = {
if (self.getWidth >= t.getWidth)
self(t.getWidth-1, 0).asSInt
else {
val sign_bits = t.getWidth - self.getWidth
val sign = self(self.getWidth-1)
Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t)
}
}
override def clippedToWidthOf(t: SInt): SInt = {
val maxsat = ((1 << (t.getWidth-1))-1).S
val minsat = (-(1 << (t.getWidth-1))).S
MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt
}
override def relu: SInt = Mux(self >= 0.S, self, 0.S)
override def zero: SInt = 0.S
override def identity: SInt = 1.S
override def minimum: SInt = (-(1 << (self.getWidth-1))).S
override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(denom_t.cloneType))
val output = Wire(Decoupled(self.cloneType))
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def sin_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def uin_to_float(x: UInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := x
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = sin_to_float(self)
val denom_rec = uin_to_float(input.bits)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := self_rec
divider.io.b := denom_rec
divider.io.roundingMode := consts.round_minMag
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := float_to_in(divider.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = {
// TODO this uses a floating point divider, but we should use an integer divider instead
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(self.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
val expWidth = log2Up(self.getWidth) + 1
val sigWidth = self.getWidth
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
// Instantiate the hardloat sqrt
val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0))
input.ready := sqrter.io.inReady
sqrter.io.inValid := input.valid
sqrter.io.sqrtOp := true.B
sqrter.io.a := self_rec
sqrter.io.b := DontCare
sqrter.io.roundingMode := consts.round_minMag
sqrter.io.detectTininess := consts.tininess_afterRounding
output.valid := sqrter.io.outValid_sqrt
output.bits := float_to_in(sqrter.io.out)
assert(!output.valid || output.ready)
Some((input, output))
}
override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match {
case Float(expWidth, sigWidth) =>
val input = Wire(Decoupled(UInt(0.W)))
val output = Wire(Decoupled(u.cloneType))
input.bits := DontCare
// We translate our integer to floating-point form so that we can use the hardfloat divider
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
val self_rec = in_to_float(self)
val one_rec = in_to_float(1.S)
// Instantiate the hardloat divider
val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options))
input.ready := divider.io.inReady
divider.io.inValid := input.valid
divider.io.sqrtOp := false.B
divider.io.a := one_rec
divider.io.b := self_rec
divider.io.roundingMode := consts.round_near_even
divider.io.detectTininess := consts.tininess_afterRounding
output.valid := divider.io.outValid_div
output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u)
assert(!output.valid || output.ready)
Some((input, output))
case _ => None
}
override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match {
case recip @ Float(expWidth, sigWidth) =>
def in_to_float(x: SInt) = {
val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth))
in_to_rec_fn.io.signedIn := true.B
in_to_rec_fn.io.in := x.asUInt
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
in_to_rec_fn.io.out
}
def float_to_in(x: UInt) = {
val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth))
rec_fn_to_in.io.signedOut := true.B
rec_fn_to_in.io.in := x
rec_fn_to_in.io.roundingMode := consts.round_minMag
rec_fn_to_in.io.out.asSInt
}
val self_rec = in_to_float(self)
val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits)
// Instantiate the hardloat divider
val muladder = Module(new MulRecFN(expWidth, sigWidth))
muladder.io.roundingMode := consts.round_near_even
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := reciprocal_rec
float_to_in(muladder.io.out)
case _ => self
}
}
}
implicit object FloatArithmetic extends Arithmetic[Float] {
// TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array
override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) {
override def *(t: Float): Float = {
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := t_rec_resized
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def mac(m1: Float, m2: Float): Float = {
// Recode all operands
val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits)
val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize m1 to self's width
val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth))
m1_resizer.io.in := m1_rec
m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m1_resizer.io.detectTininess := consts.tininess_afterRounding
val m1_rec_resized = m1_resizer.io.out
// Resize m2 to self's width
val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth))
m2_resizer.io.in := m2_rec
m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
m2_resizer.io.detectTininess := consts.tininess_afterRounding
val m2_rec_resized = m2_resizer.io.out
// Perform multiply-add
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := m1_rec_resized
muladder.io.b := m2_rec_resized
muladder.io.c := self_rec
// Convert result to standard format // TODO remove these intermediate recodings
val out = Wire(Float(self.expWidth, self.sigWidth))
out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
out
}
override def +(t: Float): Float = {
require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Generate 1 as a float
val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth))
in_to_rec_fn.io.signedIn := false.B
in_to_rec_fn.io.in := 1.U
in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding
val one_rec = in_to_rec_fn.io.out
// Resize t
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
// Perform addition
val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth))
muladder.io.op := 0.U
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := t_rec_resized
muladder.io.b := one_rec
muladder.io.c := self_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def -(t: Float): Float = {
val t_sgn = t.bits(t.getWidth-1)
val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t)
self + neg_t
}
override def >>(u: UInt): Float = {
// Recode self
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Get 2^(-u) as a recoded float
val shift_exp = Wire(UInt(self.expWidth.W))
shift_exp := self.bias.U - u
val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W))
val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn)
assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported")
// Multiply self and 2^(-u)
val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth))
muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
muladder.io.detectTininess := consts.tininess_afterRounding
muladder.io.a := self_rec
muladder.io.b := shift_rec
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out)
result
}
override def >(t: Float): Bool = {
// Recode all operands
val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits)
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
// Resize t to self's width
val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth))
t_resizer.io.in := t_rec
t_resizer.io.roundingMode := consts.round_near_even
t_resizer.io.detectTininess := consts.tininess_afterRounding
val t_rec_resized = t_resizer.io.out
val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth))
comparator.io.a := self_rec
comparator.io.b := t_rec_resized
comparator.io.signaling := false.B
comparator.io.gt
}
override def withWidthOf(t: Float): Float = {
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def clippedToWidthOf(t: Float): Float = {
// TODO check for overflow. Right now, we just assume that overflow doesn't happen
val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits)
val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth))
resizer.io.in := self_rec
resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag
resizer.io.detectTininess := consts.tininess_afterRounding
val result = Wire(Float(t.expWidth, t.sigWidth))
result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out)
result
}
override def relu: Float = {
val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits)
val result = Wire(Float(self.expWidth, self.sigWidth))
result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits)
result
}
override def zero: Float = 0.U.asTypeOf(self)
override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self)
}
}
implicit object DummySIntArithmetic extends Arithmetic[DummySInt] {
override implicit def cast(self: DummySInt) = new ArithmeticOps(self) {
override def *(t: DummySInt) = self.dontCare
override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare
override def +(t: DummySInt) = self.dontCare
override def -(t: DummySInt) = self.dontCare
override def >>(t: UInt) = self.dontCare
override def >(t: DummySInt): Bool = false.B
override def identity = self.dontCare
override def withWidthOf(t: DummySInt) = self.dontCare
override def clippedToWidthOf(t: DummySInt) = self.dontCare
override def relu = self.dontCare
override def zero = self.dontCare
override def minimum: DummySInt = self.dontCare
}
}
}
File AccumulatorMem.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
class AccumulatorReadReq[T <: Data: Arithmetic, U <: Data](n: Int, acc_t: T, scale_t: U) extends Bundle {
val addr = UInt(log2Ceil(n).W)
val scale = scale_t
val igelu_qb = acc_t.cloneType
val igelu_qc = acc_t.cloneType
val iexp_qln2 = acc_t.cloneType
val iexp_qln2_inv = acc_t.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val full = Bool() // Whether or not we return the full bitwidth output
val fromDMA = Bool()
}
class AccumulatorReadResp[T <: Data: Arithmetic, U <: Data](fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val data = fullDataType.cloneType
val fromDMA = Bool()
val scale = scale_t.cloneType
val igelu_qb = fullDataType.head.head.cloneType
val igelu_qc = fullDataType.head.head.cloneType
val iexp_qln2 = fullDataType.head.head.cloneType
val iexp_qln2_inv = fullDataType.head.head.cloneType
val act = UInt(Activation.bitwidth.W) // TODO magic number
val acc_bank_id = UInt(2.W) // TODO magic number
}
class AccumulatorReadIO[T <: Data: Arithmetic, U <: Data](n: Int, fullDataType: Vec[Vec[T]], scale_t: U) extends Bundle {
val req = Decoupled(new AccumulatorReadReq[T, U](n, fullDataType.head.head.cloneType, scale_t))
val resp = Flipped(Decoupled(new AccumulatorReadResp[T, U](fullDataType, scale_t)))
}
class AccumulatorWriteReq[T <: Data: Arithmetic](n: Int, t: Vec[Vec[T]]) extends Bundle {
val addr = UInt(log2Up(n).W)
val data = t.cloneType
val acc = Bool()
val mask = Vec(t.getWidth / 8, Bool()) // TODO Use aligned_to here
}
class AccumulatorMemIO [T <: Data: Arithmetic, U <: Data](n: Int, t: Vec[Vec[T]], scale_t: U,
acc_sub_banks: Int, use_shared_ext_mem: Boolean
) extends Bundle {
val read = Flipped(new AccumulatorReadIO(n, t, scale_t))
val write = Flipped(Decoupled(new AccumulatorWriteReq(n, t)))
val ext_mem = if (use_shared_ext_mem) Some(Vec(acc_sub_banks, new ExtMemIO)) else None
val adder = new Bundle {
val valid = Output(Bool())
val op1 = Output(t.cloneType)
val op2 = Output(t.cloneType)
val sum = Input(t.cloneType)
}
}
class AccPipe[T <: Data : Arithmetic](latency: Int, t: T)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val op1 = Input(t.cloneType)
val op2 = Input(t.cloneType)
val sum = Output(t.cloneType)
})
import ev._
io.sum := ShiftRegister(io.op1 + io.op2, latency)
}
class AccPipeShared[T <: Data : Arithmetic](latency: Int, t: Vec[Vec[T]], banks: Int) extends Module {
val io = IO(new Bundle {
val in_sel = Input(Vec(banks, Bool()))
val ina = Input(Vec(banks, t.cloneType))
val inb = Input(Vec(banks, t.cloneType))
val out = Output(t.cloneType)
})
val ina = Mux1H(io.in_sel, io.ina)
val inb = Mux1H(io.in_sel, io.inb)
io.out := VecInit((ina zip inb).map { case (rv, wv) =>
VecInit((rv zip wv).map { case (re, we) =>
val m = Module(new AccPipe(latency, t.head.head.cloneType))
m.io.op1 := re
m.io.op2 := we
m.io.sum
})
})
}
class AccumulatorMem[T <: Data, U <: Data](
n: Int, t: Vec[Vec[T]], scale_func: (T, U) => T, scale_t: U,
acc_singleported: Boolean, acc_sub_banks: Int,
use_shared_ext_mem: Boolean,
acc_latency: Int, acc_type: T, is_dummy: Boolean
)
(implicit ev: Arithmetic[T]) extends Module {
// TODO Do writes in this module work with matrices of size 2? If we try to read from an address right after writing
// to it, then we might not get the written data. We might need some kind of cooldown counter after addresses in the
// accumulator have been written to for configurations with such small matrices
// TODO make a new aligned_to variable specifically for AccumulatorMem. We should assume that inputs are at least
// accType.getWidth/8 aligned, because it won't make sense to do matrix additions directly in the DMA otherwise.
import ev._
// TODO unify this with TwoPortSyncMemIO
val io = IO(new AccumulatorMemIO(n, t, scale_t, acc_sub_banks, use_shared_ext_mem))
require (acc_latency >= 2)
val pipelined_writes = Reg(Vec(acc_latency, Valid(new AccumulatorWriteReq(n, t))))
val oldest_pipelined_write = pipelined_writes(acc_latency-1)
pipelined_writes(0).valid := io.write.fire
pipelined_writes(0).bits := io.write.bits
for (i <- 1 until acc_latency) {
pipelined_writes(i) := pipelined_writes(i-1)
}
val rdata_for_adder = Wire(t)
rdata_for_adder := DontCare
val rdata_for_read_resp = Wire(t)
rdata_for_read_resp := DontCare
val adder_sum = io.adder.sum
io.adder.valid := pipelined_writes(0).valid && pipelined_writes(0).bits.acc
io.adder.op1 := rdata_for_adder
io.adder.op2 := pipelined_writes(0).bits.data
val block_read_req = WireInit(false.B)
val block_write_req = WireInit(false.B)
val mask_len = t.getWidth / 8
val mask_elem = UInt((t.getWidth / mask_len).W)
if (!acc_singleported && !is_dummy) {
require(!use_shared_ext_mem)
val mem = TwoPortSyncMem(n, t, mask_len) // TODO We assume byte-alignment here. Use aligned_to instead
mem.io.waddr := oldest_pipelined_write.bits.addr
mem.io.wen := oldest_pipelined_write.valid
mem.io.wdata := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data)
mem.io.mask := oldest_pipelined_write.bits.mask
rdata_for_adder := mem.io.rdata
rdata_for_read_resp := mem.io.rdata
mem.io.raddr := Mux(io.write.fire && io.write.bits.acc, io.write.bits.addr, io.read.req.bits.addr)
mem.io.ren := io.read.req.fire || (io.write.fire && io.write.bits.acc)
} else if (!is_dummy) {
val rmw_req = Wire(Decoupled(UInt()))
rmw_req.valid := io.write.valid && io.write.bits.acc
rmw_req.bits := io.write.bits.addr
rmw_req.ready := true.B
block_write_req := !rmw_req.ready
val only_read_req = Wire(Decoupled(UInt()))
only_read_req.valid := io.read.req.valid
only_read_req.bits := io.read.req.bits.addr
only_read_req.ready := true.B
block_read_req := !only_read_req.ready
for (i <- 0 until acc_sub_banks) {
def isThisBank(addr: UInt) = addr(log2Ceil(acc_sub_banks)-1,0) === i.U
def getBankIdx(addr: UInt) = addr >> log2Ceil(acc_sub_banks)
val (read, write) = if (use_shared_ext_mem) {
def read(addr: UInt, ren: Bool): Data = {
io.ext_mem.get(i).read_en := ren
io.ext_mem.get(i).read_addr := addr
io.ext_mem.get(i).read_data
}
io.ext_mem.get(i).write_en := false.B
io.ext_mem.get(i).write_addr := DontCare
io.ext_mem.get(i).write_data := DontCare
io.ext_mem.get(i).write_mask := DontCare
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = {
io.ext_mem.get(i).write_en := true.B
io.ext_mem.get(i).write_addr := addr
io.ext_mem.get(i).write_data := wdata.asUInt
io.ext_mem.get(i).write_mask := wmask.asUInt
}
(read _, write _)
} else {
val mem = SyncReadMem(n / acc_sub_banks, Vec(mask_len, mask_elem))
def read(addr: UInt, ren: Bool): Data = mem.read(addr, ren)
def write(addr: UInt, wdata: Vec[UInt], wmask: Vec[Bool]) = mem.write(addr, wdata, wmask)
(read _, write _)
}
val ren = WireInit(false.B)
val raddr = WireInit(getBankIdx(rmw_req.bits))
val nEntries = 3
// Writes coming 2 cycles after read leads to bad bank behavior
// Add another buffer here
class W_Q_Entry[T <: Data](mask_len: Int, mask_elem: T) extends Bundle {
val valid = Bool()
val data = Vec(mask_len, mask_elem)
val mask = Vec(mask_len, Bool())
val addr = UInt(log2Ceil(n/acc_sub_banks).W)
}
val w_q = Reg(Vec(nEntries, new W_Q_Entry(mask_len, mask_elem)))
for (e <- w_q) {
when (e.valid) {
assert(!(
io.write.fire && io.write.bits.acc &&
isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr &&
((io.write.bits.mask.asUInt & e.mask.asUInt) =/= 0.U)
), "you cannot accumulate to an AccumulatorMem address until previous writes to that address have completed")
when (io.write.bits.acc && isThisBank(io.write.bits.addr) && getBankIdx(io.write.bits.addr) === e.addr) {
rmw_req.ready := false.B
}
when (isThisBank(io.read.req.bits.addr) && getBankIdx(io.read.req.bits.addr) === e.addr) {
only_read_req.ready := false.B
}
}
}
val w_q_head = RegInit(1.U(nEntries.W))
val w_q_tail = RegInit(1.U(nEntries.W))
val w_q_full = (w_q_tail.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val w_q_empty = !(w_q_head.asBools zip w_q.map(_.valid)).map({ case (h,v) => h && v }).reduce(_||_)
val wen = WireInit(false.B)
val wdata = Mux1H(w_q_head.asBools, w_q.map(_.data))
val wmask = Mux1H(w_q_head.asBools, w_q.map(_.mask))
val waddr = Mux1H(w_q_head.asBools, w_q.map(_.addr))
when (wen) {
w_q_head := (w_q_head << 1).asUInt | w_q_head(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_head(i)) {
w_q(i).valid := false.B
}
}
}
val w_q_push = oldest_pipelined_write.valid && isThisBank(oldest_pipelined_write.bits.addr)
when (w_q_push) {
assert(!w_q_full || wen, "we ran out of acc-sub-bank write q entries")
w_q_tail := (w_q_tail << 1).asUInt | w_q_tail(nEntries-1)
for (i <- 0 until nEntries) {
when (w_q_tail(i)) {
w_q(i).valid := true.B
w_q(i).data := Mux(oldest_pipelined_write.bits.acc, adder_sum, oldest_pipelined_write.bits.data).asTypeOf(Vec(mask_len, mask_elem))
w_q(i).mask := oldest_pipelined_write.bits.mask
w_q(i).addr := getBankIdx(oldest_pipelined_write.bits.addr)
}
}
}
val bank_rdata = read(raddr, ren && !wen).asTypeOf(t)
when (RegNext(ren && rmw_req.valid && isThisBank(rmw_req.bits))) {
rdata_for_adder := bank_rdata
} .elsewhen (RegNext(ren)) {
rdata_for_read_resp := bank_rdata
}
when (wen) {
write(waddr, wdata, wmask)
}
// Three requestors, 1 slot
// Priority is (in descending order):
// 1. incoming reads for RMW
// 2. writes from RMW
// 3. incoming reads
when (rmw_req.fire && isThisBank(rmw_req.bits)) {
ren := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .elsewhen (!w_q_empty) {
wen := true.B
when (isThisBank(only_read_req.bits)) {
only_read_req.ready := false.B
}
} .otherwise {
ren := isThisBank(only_read_req.bits) && only_read_req.fire
raddr := getBankIdx(only_read_req.bits)
}
when (reset.asBool) {
w_q.foreach(_.valid := false.B)
}
}
}
val q = Module(new Queue(new AccumulatorReadResp(t, scale_t), 1, true, true))
q.io.enq.bits.data := rdata_for_read_resp
if (is_dummy) {
rdata_for_read_resp := DontCare
rdata_for_adder := DontCare
}
q.io.enq.bits.scale := RegNext(io.read.req.bits.scale)
q.io.enq.bits.igelu_qb := RegNext(io.read.req.bits.igelu_qb)
q.io.enq.bits.igelu_qc := RegNext(io.read.req.bits.igelu_qc)
q.io.enq.bits.iexp_qln2 := RegNext(io.read.req.bits.iexp_qln2)
q.io.enq.bits.iexp_qln2_inv := RegNext(io.read.req.bits.iexp_qln2_inv)
q.io.enq.bits.act := RegNext(io.read.req.bits.act)
q.io.enq.bits.fromDMA := RegNext(io.read.req.bits.fromDMA)
q.io.enq.bits.acc_bank_id := DontCare
q.io.enq.valid := RegNext(io.read.req.fire)
val p = q.io.deq
io.read.resp.bits.data := p.bits.data
io.read.resp.bits.fromDMA := p.bits.fromDMA
io.read.resp.bits.igelu_qb := p.bits.igelu_qb
io.read.resp.bits.igelu_qc := p.bits.igelu_qc
io.read.resp.bits.iexp_qln2 := p.bits.iexp_qln2
io.read.resp.bits.iexp_qln2_inv := p.bits.iexp_qln2_inv
io.read.resp.bits.act := p.bits.act
io.read.resp.bits.scale := p.bits.scale
io.read.resp.bits.acc_bank_id := DontCare // This is set in Scratchpad
io.read.resp.valid := p.valid
p.ready := io.read.resp.ready
val q_will_be_empty = (q.io.count +& q.io.enq.fire) - q.io.deq.fire === 0.U
io.read.req.ready := q_will_be_empty && (
// Make sure we aren't accumulating, which would take over both ports
!(io.write.valid && io.write.bits.acc) &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.read.req.bits.addr).reduce(_||_) &&
!block_read_req
)
io.write.ready := !block_write_req &&
!pipelined_writes.map(r => r.valid && r.bits.addr === io.write.bits.addr && io.write.bits.acc).reduce(_||_)
when (reset.asBool) {
pipelined_writes.foreach(_.valid := false.B)
}
// assert(!(io.read.req.valid && io.write.en && io.write.acc), "reading and accumulating simultaneously is not supported")
assert(!(io.read.req.fire && io.write.fire && io.read.req.bits.addr === io.write.bits.addr), "reading from and writing to same address is not supported")
}
| module AccPipe_7( // @[AccumulatorMem.scala:63:7]
input clock, // @[AccumulatorMem.scala:63:7]
input reset, // @[AccumulatorMem.scala:63:7]
input [31:0] io_op1, // @[AccumulatorMem.scala:64:14]
input [31:0] io_op2, // @[AccumulatorMem.scala:64:14]
output [31:0] io_sum // @[AccumulatorMem.scala:64:14]
);
wire [31:0] io_op1_0 = io_op1; // @[AccumulatorMem.scala:63:7]
wire [31:0] io_op2_0 = io_op2; // @[AccumulatorMem.scala:63:7]
wire [31:0] io_sum_0; // @[AccumulatorMem.scala:63:7]
wire [32:0] _io_sum_T = {io_op1_0[31], io_op1_0} + {io_op2_0[31], io_op2_0}; // @[Arithmetic.scala:94:38]
wire [31:0] _io_sum_T_1 = _io_sum_T[31:0]; // @[Arithmetic.scala:94:38]
wire [31:0] _io_sum_T_2 = _io_sum_T_1; // @[Arithmetic.scala:94:38]
reg [31:0] io_sum_r; // @[AccumulatorMem.scala:70:26]
assign io_sum_0 = io_sum_r; // @[AccumulatorMem.scala:63:7, :70:26]
always @(posedge clock) // @[AccumulatorMem.scala:63:7]
io_sum_r <= _io_sum_T_2; // @[Arithmetic.scala:94:38]
assign io_sum = io_sum_0; // @[AccumulatorMem.scala:63:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File SingleVCAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.random.{LFSR}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{ChannelRoutingInfo, FlowRoutingBundle}
// Allocates 1 VC per cycle
abstract class SingleVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends VCAllocator(vP)(p) {
// get single input
val mask = RegInit(0.U(allInParams.size.W))
val in_arb_reqs = Wire(Vec(allInParams.size, MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })))
val in_arb_vals = Wire(Vec(allInParams.size, Bool()))
val in_arb_filter = PriorityEncoderOH(Cat(in_arb_vals.asUInt, in_arb_vals.asUInt & ~mask))
val in_arb_sel = (in_arb_filter(allInParams.size-1,0) | (in_arb_filter >> allInParams.size))
when (in_arb_vals.orR) {
mask := Mux1H(in_arb_sel, (0 until allInParams.size).map { w => ~(0.U((w+1).W)) })
}
for (i <- 0 until allInParams.size) {
(0 until allOutParams.size).map { m =>
(0 until allOutParams(m).nVirtualChannels).map { n =>
in_arb_reqs(i)(m)(n) := io.req(i).bits.vc_sel(m)(n) && !io.channel_status(m)(n).occupied
}
}
in_arb_vals(i) := io.req(i).valid && in_arb_reqs(i).map(_.orR).toSeq.orR
}
// Input arbitration
io.req.foreach(_.ready := false.B)
val in_alloc = Wire(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val in_flow = Mux1H(in_arb_sel, io.req.map(_.bits.flow).toSeq)
val in_vc = Mux1H(in_arb_sel, io.req.map(_.bits.in_vc).toSeq)
val in_vc_sel = Mux1H(in_arb_sel, in_arb_reqs)
in_alloc := Mux(in_arb_vals.orR,
inputAllocPolicy(in_flow, in_vc_sel, OHToUInt(in_arb_sel), in_vc, io.req.map(_.fire).toSeq.orR),
0.U.asTypeOf(in_alloc))
// send allocation to inputunits
for (i <- 0 until allInParams.size) {
io.req(i).ready := in_arb_sel(i)
for (m <- 0 until allOutParams.size) {
(0 until allOutParams(m).nVirtualChannels).map { n =>
io.resp(i).vc_sel(m)(n) := in_alloc(m)(n)
}
}
assert(PopCount(io.resp(i).vc_sel.asUInt) <= 1.U)
}
// send allocation to output units
for (i <- 0 until allOutParams.size) {
(0 until allOutParams(i).nVirtualChannels).map { j =>
io.out_allocs(i)(j).alloc := in_alloc(i)(j)
io.out_allocs(i)(j).flow := in_flow
}
}
}
File VCAllocator.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import freechips.rocketchip.rocket.{DecodeLogic}
import constellation.channel._
import constellation.noc.{HasNoCParams}
import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo, ChannelRoutingInfo}
class VCAllocReq(
val inParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams])
(implicit val p: Parameters) extends Bundle
with HasRouterOutputParams
with HasNoCParams {
val flow = new FlowRoutingBundle
val in_vc = UInt(log2Ceil(inParam.nVirtualChannels).W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
class VCAllocResp(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
}
case class VCAllocatorParams(
routerParams: RouterParams,
inParams: Seq[ChannelParams],
outParams: Seq[ChannelParams],
ingressParams: Seq[IngressChannelParams],
egressParams: Seq[EgressChannelParams])
abstract class VCAllocator(val vP: VCAllocatorParams)(implicit val p: Parameters) extends Module
with HasRouterParams
with HasRouterInputParams
with HasRouterOutputParams
with HasNoCParams {
val routerParams = vP.routerParams
val inParams = vP.inParams
val outParams = vP.outParams
val ingressParams = vP.ingressParams
val egressParams = vP.egressParams
val io = IO(new Bundle {
val req = MixedVec(allInParams.map { u =>
Flipped(Decoupled(new VCAllocReq(u, outParams, egressParams)))
})
val resp = MixedVec(allInParams.map { u =>
Output(new VCAllocResp(outParams, egressParams))
})
val channel_status = MixedVec(allOutParams.map { u =>
Vec(u.nVirtualChannels, Input(new OutputChannelStatus)) })
val out_allocs = MixedVec(allOutParams.map { u =>
Vec(u.nVirtualChannels, Output(new OutputChannelAlloc)) })
})
val nOutChannels = allOutParams.map(_.nVirtualChannels).sum
def inputAllocPolicy(
flow: FlowRoutingBundle, vc_sel: MixedVec[Vec[Bool]],
inId: UInt, inVId: UInt, fire: Bool): MixedVec[Vec[Bool]]
def outputAllocPolicy(
out: ChannelRoutingInfo,
flows: Seq[FlowRoutingBundle], reqs: Seq[Bool], fire: Bool): Vec[Bool]
}
File ISLIP.scala:
package constellation.router
import chisel3._
import chisel3.util._
import chisel3.util.random.{LFSR}
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{ChannelRoutingInfo, FlowRoutingBundle}
trait ISLIP { this: VCAllocator =>
def islip(in: UInt, fire: Bool): UInt = {
val w = in.getWidth
if (w > 1) {
val mask = RegInit(0.U(w.W))
val full = Cat(in, in & ~mask)
val oh = PriorityEncoderOH(full)
val sel = (oh(w-1,0) | (oh >> w))
when (fire) {
mask := MuxCase(0.U, (0 until w).map { i =>
sel(i) -> ~(0.U((i+1).W))
})
}
sel
} else {
in
}
}
def inputAllocPolicy(flow: FlowRoutingBundle, vc_sel: MixedVec[Vec[Bool]], inId: UInt, inVId: UInt, fire: Bool) = {
islip(vc_sel.asUInt, fire).asTypeOf(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool())}))
}
def outputAllocPolicy(channel: ChannelRoutingInfo, flows: Seq[FlowRoutingBundle], reqs: Seq[Bool], fire: Bool) = {
islip(VecInit(reqs).asUInt, fire).asTypeOf(Vec(allInParams.size, Bool()))
}
}
class ISLIPMultiVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends MultiVCAllocator(vP)(p)
with ISLIP
class RotatingSingleVCAllocator(vP: VCAllocatorParams)(implicit p: Parameters) extends SingleVCAllocator(vP)(p)
with ISLIP
| module RotatingSingleVCAllocator_8( // @[ISLIP.scala:43:7]
input clock, // @[ISLIP.scala:43:7]
input reset, // @[ISLIP.scala:43:7]
output io_req_3_ready, // @[VCAllocator.scala:49:14]
input io_req_3_valid, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_2_0, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_1_0, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_0, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_1, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_2, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_3, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_4, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_5, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_6, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_7, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_8, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_9, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_10, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_11, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_12, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_13, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_14, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_15, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_16, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_17, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_18, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_19, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_20, // @[VCAllocator.scala:49:14]
input io_req_3_bits_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_req_2_ready, // @[VCAllocator.scala:49:14]
input io_req_2_valid, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_2_0, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_1_0, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_0, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_1, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_2, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_3, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_4, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_5, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_6, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_7, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_8, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_9, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_10, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_11, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_12, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_13, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_14, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_15, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_16, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_17, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_18, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_19, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_20, // @[VCAllocator.scala:49:14]
input io_req_2_bits_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_req_1_ready, // @[VCAllocator.scala:49:14]
input io_req_1_valid, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_2_0, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_1_0, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_0, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_1, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_2, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_3, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_4, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_5, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_6, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_7, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_8, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_9, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_10, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_11, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_12, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_13, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_14, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_15, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_16, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_17, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_18, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_19, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_20, // @[VCAllocator.scala:49:14]
input io_req_1_bits_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_req_0_ready, // @[VCAllocator.scala:49:14]
input io_req_0_valid, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_2_0, // @[VCAllocator.scala:49:14]
input io_req_0_bits_vc_sel_1_0, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_2_0, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_1_0, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_0, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_1, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_2, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_3, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_4, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_5, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_6, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_7, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_8, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_9, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_10, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_11, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_12, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_13, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_14, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_15, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_16, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_17, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_18, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_19, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_20, // @[VCAllocator.scala:49:14]
output io_resp_3_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_2_0, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_1_0, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_0, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_1, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_2, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_3, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_4, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_5, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_6, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_7, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_8, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_9, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_10, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_11, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_12, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_13, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_14, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_15, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_16, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_17, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_18, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_19, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_20, // @[VCAllocator.scala:49:14]
output io_resp_2_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_2_0, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_1_0, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_0, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_1, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_2, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_3, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_4, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_5, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_6, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_7, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_8, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_9, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_10, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_11, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_12, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_13, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_14, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_15, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_16, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_17, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_18, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_19, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_20, // @[VCAllocator.scala:49:14]
output io_resp_1_vc_sel_0_21, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_2_0, // @[VCAllocator.scala:49:14]
output io_resp_0_vc_sel_1_0, // @[VCAllocator.scala:49:14]
input io_channel_status_2_0_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_1_0_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_10_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_11_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_14_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_15_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_18_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_19_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_20_occupied, // @[VCAllocator.scala:49:14]
input io_channel_status_0_21_occupied, // @[VCAllocator.scala:49:14]
output io_out_allocs_2_0_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_1_0_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_10_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_11_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_14_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_15_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_18_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_19_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_20_alloc, // @[VCAllocator.scala:49:14]
output io_out_allocs_0_21_alloc // @[VCAllocator.scala:49:14]
);
wire in_arb_vals_3; // @[SingleVCAllocator.scala:32:39]
wire in_arb_vals_2; // @[SingleVCAllocator.scala:32:39]
wire in_arb_vals_1; // @[SingleVCAllocator.scala:32:39]
wire in_arb_vals_0; // @[SingleVCAllocator.scala:32:39]
reg [3:0] mask; // @[SingleVCAllocator.scala:16:21]
wire [3:0] _in_arb_filter_T_3 = {in_arb_vals_3, in_arb_vals_2, in_arb_vals_1, in_arb_vals_0} & ~mask; // @[SingleVCAllocator.scala:16:21, :19:{77,84,86}, :32:39]
wire [7:0] in_arb_filter = _in_arb_filter_T_3[0] ? 8'h1 : _in_arb_filter_T_3[1] ? 8'h2 : _in_arb_filter_T_3[2] ? 8'h4 : _in_arb_filter_T_3[3] ? 8'h8 : in_arb_vals_0 ? 8'h10 : in_arb_vals_1 ? 8'h20 : in_arb_vals_2 ? 8'h40 : {in_arb_vals_3, 7'h0}; // @[OneHot.scala:85:71]
wire [3:0] in_arb_sel = in_arb_filter[3:0] | in_arb_filter[7:4]; // @[Mux.scala:50:70]
wire _GEN = in_arb_vals_0 | in_arb_vals_1 | in_arb_vals_2 | in_arb_vals_3; // @[package.scala:81:59]
wire in_arb_reqs_0_1_0 = io_req_0_bits_vc_sel_1_0 & ~io_channel_status_1_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_0_2_0 = io_req_0_bits_vc_sel_2_0 & ~io_channel_status_2_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_0 = io_req_0_valid & (in_arb_reqs_0_1_0 | in_arb_reqs_0_2_0); // @[package.scala:81:59]
wire in_arb_reqs_1_0_10 = io_req_1_bits_vc_sel_0_10 & ~io_channel_status_0_10_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_11 = io_req_1_bits_vc_sel_0_11 & ~io_channel_status_0_11_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_14 = io_req_1_bits_vc_sel_0_14 & ~io_channel_status_0_14_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_15 = io_req_1_bits_vc_sel_0_15 & ~io_channel_status_0_15_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_18 = io_req_1_bits_vc_sel_0_18 & ~io_channel_status_0_18_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_19 = io_req_1_bits_vc_sel_0_19 & ~io_channel_status_0_19_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_20 = io_req_1_bits_vc_sel_0_20 & ~io_channel_status_0_20_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_0_21 = io_req_1_bits_vc_sel_0_21 & ~io_channel_status_0_21_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_1_0 = io_req_1_bits_vc_sel_1_0 & ~io_channel_status_1_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_1_2_0 = io_req_1_bits_vc_sel_2_0 & ~io_channel_status_2_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_1 = io_req_1_valid & (io_req_1_bits_vc_sel_0_0 | io_req_1_bits_vc_sel_0_1 | io_req_1_bits_vc_sel_0_2 | io_req_1_bits_vc_sel_0_3 | io_req_1_bits_vc_sel_0_4 | io_req_1_bits_vc_sel_0_5 | io_req_1_bits_vc_sel_0_6 | io_req_1_bits_vc_sel_0_7 | io_req_1_bits_vc_sel_0_8 | io_req_1_bits_vc_sel_0_9 | in_arb_reqs_1_0_10 | in_arb_reqs_1_0_11 | io_req_1_bits_vc_sel_0_12 | io_req_1_bits_vc_sel_0_13 | in_arb_reqs_1_0_14 | in_arb_reqs_1_0_15 | io_req_1_bits_vc_sel_0_16 | io_req_1_bits_vc_sel_0_17 | in_arb_reqs_1_0_18 | in_arb_reqs_1_0_19 | in_arb_reqs_1_0_20 | in_arb_reqs_1_0_21 | in_arb_reqs_1_1_0 | in_arb_reqs_1_2_0); // @[package.scala:81:59]
wire in_arb_reqs_2_0_10 = io_req_2_bits_vc_sel_0_10 & ~io_channel_status_0_10_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_0_11 = io_req_2_bits_vc_sel_0_11 & ~io_channel_status_0_11_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_0_14 = io_req_2_bits_vc_sel_0_14 & ~io_channel_status_0_14_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_0_15 = io_req_2_bits_vc_sel_0_15 & ~io_channel_status_0_15_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_0_18 = io_req_2_bits_vc_sel_0_18 & ~io_channel_status_0_18_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_0_19 = io_req_2_bits_vc_sel_0_19 & ~io_channel_status_0_19_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_0_20 = io_req_2_bits_vc_sel_0_20 & ~io_channel_status_0_20_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_0_21 = io_req_2_bits_vc_sel_0_21 & ~io_channel_status_0_21_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_1_0 = io_req_2_bits_vc_sel_1_0 & ~io_channel_status_1_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_2_2_0 = io_req_2_bits_vc_sel_2_0 & ~io_channel_status_2_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_2 = io_req_2_valid & (io_req_2_bits_vc_sel_0_0 | io_req_2_bits_vc_sel_0_1 | io_req_2_bits_vc_sel_0_2 | io_req_2_bits_vc_sel_0_3 | io_req_2_bits_vc_sel_0_4 | io_req_2_bits_vc_sel_0_5 | io_req_2_bits_vc_sel_0_6 | io_req_2_bits_vc_sel_0_7 | io_req_2_bits_vc_sel_0_8 | io_req_2_bits_vc_sel_0_9 | in_arb_reqs_2_0_10 | in_arb_reqs_2_0_11 | io_req_2_bits_vc_sel_0_12 | io_req_2_bits_vc_sel_0_13 | in_arb_reqs_2_0_14 | in_arb_reqs_2_0_15 | io_req_2_bits_vc_sel_0_16 | io_req_2_bits_vc_sel_0_17 | in_arb_reqs_2_0_18 | in_arb_reqs_2_0_19 | in_arb_reqs_2_0_20 | in_arb_reqs_2_0_21 | in_arb_reqs_2_1_0 | in_arb_reqs_2_2_0); // @[package.scala:81:59]
wire in_arb_reqs_3_0_10 = io_req_3_bits_vc_sel_0_10 & ~io_channel_status_0_10_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_0_11 = io_req_3_bits_vc_sel_0_11 & ~io_channel_status_0_11_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_0_14 = io_req_3_bits_vc_sel_0_14 & ~io_channel_status_0_14_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_0_15 = io_req_3_bits_vc_sel_0_15 & ~io_channel_status_0_15_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_0_18 = io_req_3_bits_vc_sel_0_18 & ~io_channel_status_0_18_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_0_19 = io_req_3_bits_vc_sel_0_19 & ~io_channel_status_0_19_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_0_20 = io_req_3_bits_vc_sel_0_20 & ~io_channel_status_0_20_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_0_21 = io_req_3_bits_vc_sel_0_21 & ~io_channel_status_0_21_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_1_0 = io_req_3_bits_vc_sel_1_0 & ~io_channel_status_1_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
wire in_arb_reqs_3_2_0 = io_req_3_bits_vc_sel_2_0 & ~io_channel_status_2_0_occupied; // @[SingleVCAllocator.scala:28:{61,64}]
assign in_arb_vals_3 = io_req_3_valid & (io_req_3_bits_vc_sel_0_0 | io_req_3_bits_vc_sel_0_1 | io_req_3_bits_vc_sel_0_2 | io_req_3_bits_vc_sel_0_3 | io_req_3_bits_vc_sel_0_4 | io_req_3_bits_vc_sel_0_5 | io_req_3_bits_vc_sel_0_6 | io_req_3_bits_vc_sel_0_7 | io_req_3_bits_vc_sel_0_8 | io_req_3_bits_vc_sel_0_9 | in_arb_reqs_3_0_10 | in_arb_reqs_3_0_11 | io_req_3_bits_vc_sel_0_12 | io_req_3_bits_vc_sel_0_13 | in_arb_reqs_3_0_14 | in_arb_reqs_3_0_15 | io_req_3_bits_vc_sel_0_16 | io_req_3_bits_vc_sel_0_17 | in_arb_reqs_3_0_18 | in_arb_reqs_3_0_19 | in_arb_reqs_3_0_20 | in_arb_reqs_3_0_21 | in_arb_reqs_3_1_0 | in_arb_reqs_3_2_0); // @[package.scala:81:59]
wire _in_vc_sel_T_10 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_0 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_0 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_0; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_17 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_1 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_1 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_1; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_24 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_2 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_2 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_2; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_31 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_3 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_3 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_3; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_38 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_4 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_4 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_4; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_45 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_5 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_5 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_5; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_52 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_6 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_6 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_6; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_59 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_7 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_7 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_7; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_66 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_8 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_8 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_8; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_73 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_9 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_9 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_9; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_80 = in_arb_sel[1] & in_arb_reqs_1_0_10 | in_arb_sel[2] & in_arb_reqs_2_0_10 | in_arb_sel[3] & in_arb_reqs_3_0_10; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_87 = in_arb_sel[1] & in_arb_reqs_1_0_11 | in_arb_sel[2] & in_arb_reqs_2_0_11 | in_arb_sel[3] & in_arb_reqs_3_0_11; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_94 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_12 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_12 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_12; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_101 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_13 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_13 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_13; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_108 = in_arb_sel[1] & in_arb_reqs_1_0_14 | in_arb_sel[2] & in_arb_reqs_2_0_14 | in_arb_sel[3] & in_arb_reqs_3_0_14; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_115 = in_arb_sel[1] & in_arb_reqs_1_0_15 | in_arb_sel[2] & in_arb_reqs_2_0_15 | in_arb_sel[3] & in_arb_reqs_3_0_15; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_122 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_16 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_16 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_16; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_129 = in_arb_sel[1] & io_req_1_bits_vc_sel_0_17 | in_arb_sel[2] & io_req_2_bits_vc_sel_0_17 | in_arb_sel[3] & io_req_3_bits_vc_sel_0_17; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_136 = in_arb_sel[1] & in_arb_reqs_1_0_18 | in_arb_sel[2] & in_arb_reqs_2_0_18 | in_arb_sel[3] & in_arb_reqs_3_0_18; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_143 = in_arb_sel[1] & in_arb_reqs_1_0_19 | in_arb_sel[2] & in_arb_reqs_2_0_19 | in_arb_sel[3] & in_arb_reqs_3_0_19; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_150 = in_arb_sel[1] & in_arb_reqs_1_0_20 | in_arb_sel[2] & in_arb_reqs_2_0_20 | in_arb_sel[3] & in_arb_reqs_3_0_20; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_157 = in_arb_sel[1] & in_arb_reqs_1_0_21 | in_arb_sel[2] & in_arb_reqs_2_0_21 | in_arb_sel[3] & in_arb_reqs_3_0_21; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_164 = in_arb_sel[0] & in_arb_reqs_0_1_0 | in_arb_sel[1] & in_arb_reqs_1_1_0 | in_arb_sel[2] & in_arb_reqs_2_1_0 | in_arb_sel[3] & in_arb_reqs_3_1_0; // @[Mux.scala:30:73, :32:36]
wire _in_vc_sel_T_171 = in_arb_sel[0] & in_arb_reqs_0_2_0 | in_arb_sel[1] & in_arb_reqs_1_2_0 | in_arb_sel[2] & in_arb_reqs_2_2_0 | in_arb_sel[3] & in_arb_reqs_3_2_0; // @[Mux.scala:30:73, :32:36]
reg [23:0] mask_1; // @[ISLIP.scala:17:25]
wire [23:0] _full_T_1 = {_in_vc_sel_T_171, _in_vc_sel_T_164, _in_vc_sel_T_157, _in_vc_sel_T_150, _in_vc_sel_T_143, _in_vc_sel_T_136, _in_vc_sel_T_129, _in_vc_sel_T_122, _in_vc_sel_T_115, _in_vc_sel_T_108, _in_vc_sel_T_101, _in_vc_sel_T_94, _in_vc_sel_T_87, _in_vc_sel_T_80, _in_vc_sel_T_73, _in_vc_sel_T_66, _in_vc_sel_T_59, _in_vc_sel_T_52, _in_vc_sel_T_45, _in_vc_sel_T_38, _in_vc_sel_T_31, _in_vc_sel_T_24, _in_vc_sel_T_17, _in_vc_sel_T_10} & ~mask_1; // @[Mux.scala:30:73]
wire [47:0] oh = _full_T_1[0] ? 48'h1 : _full_T_1[1] ? 48'h2 : _full_T_1[2] ? 48'h4 : _full_T_1[3] ? 48'h8 : _full_T_1[4] ? 48'h10 : _full_T_1[5] ? 48'h20 : _full_T_1[6] ? 48'h40 : _full_T_1[7] ? 48'h80 : _full_T_1[8] ? 48'h100 : _full_T_1[9] ? 48'h200 : _full_T_1[10] ? 48'h400 : _full_T_1[11] ? 48'h800 : _full_T_1[12] ? 48'h1000 : _full_T_1[13] ? 48'h2000 : _full_T_1[14] ? 48'h4000 : _full_T_1[15] ? 48'h8000 : _full_T_1[16] ? 48'h10000 : _full_T_1[17] ? 48'h20000 : _full_T_1[18] ? 48'h40000 : _full_T_1[19] ? 48'h80000 : _full_T_1[20] ? 48'h100000 : _full_T_1[21] ? 48'h200000 : _full_T_1[22] ? 48'h400000 : _full_T_1[23] ? 48'h800000 : _in_vc_sel_T_10 ? 48'h1000000 : _in_vc_sel_T_17 ? 48'h2000000 : _in_vc_sel_T_24 ? 48'h4000000 : _in_vc_sel_T_31 ? 48'h8000000 : _in_vc_sel_T_38 ? 48'h10000000 : _in_vc_sel_T_45 ? 48'h20000000 : _in_vc_sel_T_52 ? 48'h40000000 : _in_vc_sel_T_59 ? 48'h80000000 : _in_vc_sel_T_66 ? 48'h100000000 : _in_vc_sel_T_73 ? 48'h200000000 : _in_vc_sel_T_80 ? 48'h400000000 : _in_vc_sel_T_87 ? 48'h800000000 : _in_vc_sel_T_94 ? 48'h1000000000 : _in_vc_sel_T_101 ? 48'h2000000000 : _in_vc_sel_T_108 ? 48'h4000000000 : _in_vc_sel_T_115 ? 48'h8000000000 : _in_vc_sel_T_122 ? 48'h10000000000 : _in_vc_sel_T_129 ? 48'h20000000000 : _in_vc_sel_T_136 ? 48'h40000000000 : _in_vc_sel_T_143 ? 48'h80000000000 : _in_vc_sel_T_150 ? 48'h100000000000 : _in_vc_sel_T_157 ? 48'h200000000000 : _in_vc_sel_T_164 ? 48'h400000000000 : {_in_vc_sel_T_171, 47'h0}; // @[OneHot.scala:85:71]
wire [23:0] sel = oh[23:0] | oh[47:24]; // @[Mux.scala:50:70]
wire in_alloc_2_0 = _GEN & sel[23]; // @[package.scala:81:59]
wire in_alloc_1_0 = _GEN & sel[22]; // @[package.scala:81:59]
wire in_alloc_0_0 = _GEN & sel[0]; // @[package.scala:81:59]
wire in_alloc_0_1 = _GEN & sel[1]; // @[package.scala:81:59]
wire in_alloc_0_2 = _GEN & sel[2]; // @[package.scala:81:59]
wire in_alloc_0_3 = _GEN & sel[3]; // @[package.scala:81:59]
wire in_alloc_0_4 = _GEN & sel[4]; // @[package.scala:81:59]
wire in_alloc_0_5 = _GEN & sel[5]; // @[package.scala:81:59]
wire in_alloc_0_6 = _GEN & sel[6]; // @[package.scala:81:59]
wire in_alloc_0_7 = _GEN & sel[7]; // @[package.scala:81:59]
wire in_alloc_0_8 = _GEN & sel[8]; // @[package.scala:81:59]
wire in_alloc_0_9 = _GEN & sel[9]; // @[package.scala:81:59]
wire in_alloc_0_10 = _GEN & sel[10]; // @[package.scala:81:59]
wire in_alloc_0_11 = _GEN & sel[11]; // @[package.scala:81:59]
wire in_alloc_0_12 = _GEN & sel[12]; // @[package.scala:81:59]
wire in_alloc_0_13 = _GEN & sel[13]; // @[package.scala:81:59]
wire in_alloc_0_14 = _GEN & sel[14]; // @[package.scala:81:59]
wire in_alloc_0_15 = _GEN & sel[15]; // @[package.scala:81:59]
wire in_alloc_0_16 = _GEN & sel[16]; // @[package.scala:81:59]
wire in_alloc_0_17 = _GEN & sel[17]; // @[package.scala:81:59]
wire in_alloc_0_18 = _GEN & sel[18]; // @[package.scala:81:59]
wire in_alloc_0_19 = _GEN & sel[19]; // @[package.scala:81:59]
wire in_alloc_0_20 = _GEN & sel[20]; // @[package.scala:81:59]
wire in_alloc_0_21 = _GEN & sel[21]; // @[package.scala:81:59] |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_370( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_65( // @[SynchronizerReg.scala:68:19]
input clock, // @[SynchronizerReg.scala:68:19]
input reset, // @[SynchronizerReg.scala:68:19]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire _sync_2_T = 1'h1; // @[SynchronizerReg.scala:54:22, :68:19]
wire io_q_0; // @[SynchronizerReg.scala:68:19]
reg sync_0; // @[SynchronizerReg.scala:51:87]
assign io_q_0 = sync_0; // @[SynchronizerReg.scala:51:87, :68:19]
reg sync_1; // @[SynchronizerReg.scala:51:87]
reg sync_2; // @[SynchronizerReg.scala:51:87]
always @(posedge clock or posedge reset) begin // @[SynchronizerReg.scala:68:19]
if (reset) begin // @[SynchronizerReg.scala:68:19]
sync_0 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_1 <= 1'h0; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h0; // @[SynchronizerReg.scala:51:87]
end
else begin // @[SynchronizerReg.scala:68:19]
sync_0 <= sync_1; // @[SynchronizerReg.scala:51:87]
sync_1 <= sync_2; // @[SynchronizerReg.scala:51:87]
sync_2 <= 1'h1; // @[SynchronizerReg.scala:51:87, :54:22, :68:19]
end
always @(posedge, posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File Transposer.scala:
package gemmini
import chisel3._
import chisel3.util._
import Util._
trait Transposer[T <: Data] extends Module {
def dim: Int
def dataType: T
val io = IO(new Bundle {
val inRow = Flipped(Decoupled(Vec(dim, dataType)))
val outCol = Decoupled(Vec(dim, dataType))
})
}
class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
val sMoveUp :: sMoveLeft :: Nil = Enum(2)
val state = RegInit(sMoveUp)
val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1)
val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1)
io.outCol.valid := 0.U
io.inRow.ready := 0.U
switch(state) {
is(sMoveUp) {
io.inRow.ready := upCounter <= dim.U
io.outCol.valid := leftCounter > 0.U
when(io.inRow.fire) {
upCounter := upCounter + 1.U
}
when(upCounter === (dim-1).U) {
state := sMoveLeft
leftCounter := 0.U
}
when(io.outCol.fire) {
leftCounter := leftCounter - 1.U
}
}
is(sMoveLeft) {
io.inRow.ready := leftCounter <= dim.U // TODO: this is naive
io.outCol.valid := upCounter > 0.U
when(leftCounter === (dim-1).U) {
state := sMoveUp
}
when(io.inRow.fire) {
leftCounter := leftCounter + 1.U
upCounter := 0.U
}
when(io.outCol.fire) {
upCounter := upCounter - 1.U
}
}
}
// Propagate input from bottom row to top row systolically in the move up phase
// TODO: need to iterate over columns to connect Chisel values of type T
// Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?)
for (colIdx <- 0 until dim) {
regArray.foldRight(io.inRow.bits(colIdx)) {
case (regRow, prevReg) =>
when (state === sMoveUp) {
regRow(colIdx) := prevReg
}
regRow(colIdx)
}
}
// Propagate input from right side to left side systolically in the move left phase
for (rowIdx <- 0 until dim) {
regArrayT.foldRight(io.inRow.bits(rowIdx)) {
case (regCol, prevReg) =>
when (state === sMoveLeft) {
regCol(rowIdx) := prevReg
}
regCol(rowIdx)
}
}
// Pull from the left side or the top side based on the state
for (idx <- 0 until dim) {
when (state === sMoveUp) {
io.outCol.bits(idx) := regArray(0)(idx)
}.elsewhen(state === sMoveLeft) {
io.outCol.bits(idx) := regArrayT(0)(idx)
}.otherwise {
io.outCol.bits(idx) := DontCare
}
}
}
class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
require(isPow2(dim))
val LEFT_DIR = 0.U(1.W)
val UP_DIR = 1.U(1.W)
class PE extends Module {
val io = IO(new Bundle {
val inR = Input(dataType)
val inD = Input(dataType)
val outL = Output(dataType)
val outU = Output(dataType)
val dir = Input(UInt(1.W))
val en = Input(Bool())
})
val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en)
io.outU := reg
io.outL := reg
}
val pes = Seq.fill(dim,dim)(Module(new PE))
val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter
val dir = RegInit(LEFT_DIR)
// Wire up horizontal signals
for (row <- 0 until dim; col <- 0 until dim) {
val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL
pes(row)(col).io.inR := right_in
}
// Wire up vertical signals
for (row <- 0 until dim; col <- 0 until dim) {
val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU
pes(row)(col).io.inD := down_in
}
// Wire up global signals
pes.flatten.foreach(_.io.dir := dir)
pes.flatten.foreach(_.io.en := io.inRow.fire)
io.outCol.valid := true.B
io.inRow.ready := true.B
val left_out = VecInit(pes.transpose.head.map(_.io.outL))
val up_out = VecInit(pes.head.map(_.io.outU))
io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out)
when (io.inRow.fire) {
counter := wrappingAdd(counter, 1.U, dim)
}
when (counter === (dim-1).U && io.inRow.fire) {
dir := ~dir
}
}
class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] {
val regArray = Seq.fill(dim, dim)(Reg(dataType))
val regArrayT = regArray.transpose
// state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise
val state = RegInit(0.U(1.W))
val countInc = io.inRow.fire || io.outCol.fire
val (countValue, countWrap) = Counter(countInc, dim)
io.inRow.ready := state === 0.U
io.outCol.valid := state === 1.U
for (i <- 0 until dim) {
for (j <- 0 until dim) {
when(countValue === i.U && io.inRow.fire) {
regArray(i)(j) := io.inRow.bits(j)
}
}
}
for (i <- 0 until dim) {
io.outCol.bits(i) := 0.U
for (j <- 0 until dim) {
when(countValue === j.U) {
io.outCol.bits(i) := regArrayT(j)(i)
}
}
}
when (io.inRow.fire && countWrap) {
state := 1.U
}
when (io.outCol.fire && countWrap) {
state := 0.U
}
assert(!(state === 0.U) || !io.outCol.fire)
assert(!(state === 1.U) || !io.inRow.fire)
}
| module PE_162( // @[Transposer.scala:100:9]
input clock, // @[Transposer.scala:100:9]
input reset, // @[Transposer.scala:100:9]
input [7:0] io_inR, // @[Transposer.scala:101:16]
input [7:0] io_inD, // @[Transposer.scala:101:16]
output [7:0] io_outL, // @[Transposer.scala:101:16]
output [7:0] io_outU, // @[Transposer.scala:101:16]
input io_dir, // @[Transposer.scala:101:16]
input io_en // @[Transposer.scala:101:16]
);
wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9]
wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9]
wire io_dir_0 = io_dir; // @[Transposer.scala:100:9]
wire io_en_0 = io_en; // @[Transposer.scala:100:9]
wire [7:0] io_outL_0; // @[Transposer.scala:100:9]
wire [7:0] io_outU_0; // @[Transposer.scala:100:9]
wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36]
wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}]
reg [7:0] reg_0; // @[Transposer.scala:110:24]
assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24]
always @(posedge clock) begin // @[Transposer.scala:100:9]
if (io_en_0) // @[Transposer.scala:100:9]
reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}]
always @(posedge)
assign io_outL = io_outL_0; // @[Transposer.scala:100:9]
assign io_outU = io_outU_0; // @[Transposer.scala:100:9]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Monitor.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceLine
import org.chipsalliance.cde.config._
import org.chipsalliance.diplomacy._
import freechips.rocketchip.diplomacy.EnableMonitors
import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode}
import freechips.rocketchip.util.PlusArg
case class TLMonitorArgs(edge: TLEdge)
abstract class TLMonitorBase(args: TLMonitorArgs) extends Module
{
val io = IO(new Bundle {
val in = Input(new TLBundle(args.edge.bundle))
})
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit
legalize(io.in, args.edge, reset)
}
object TLMonitor {
def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = {
if (enable) {
EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) }
} else { node }
}
}
class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args)
{
require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal))
val cover_prop_class = PropertyClass.Default
//Like assert but can flip to being an assumption for formal verification
def monAssert(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir, cond, message, PropertyClass.Default)
}
def assume(cond: Bool, message: String): Unit =
if (monitorDir == MonitorDirection.Monitor) {
assert(cond, message)
} else {
Property(monitorDir.flip, cond, message, PropertyClass.Default)
}
def extra = {
args.edge.sourceInfo match {
case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)"
case _ => ""
}
}
def visible(address: UInt, source: UInt, edge: TLEdge) =
edge.client.clients.map { c =>
!c.sourceId.contains(source) ||
c.visibility.map(_.contains(address)).reduce(_ || _)
}.reduce(_ && _)
def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = {
//switch this flag to turn on diplomacy in error messages
def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n"
monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra)
// Reuse these subexpressions to save some firrtl lines
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility")
//The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B
//TODO: check for acquireT?
when (bundle.opcode === TLMessages.AcquireBlock) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AcquirePerm) {
monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra)
monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra)
monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra)
monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra)
monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra)
monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra)
monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra)
monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra)
monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra)
monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra)
}
}
def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = {
monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra)
monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility")
// Reuse these subexpressions to save some firrtl lines
val address_ok = edge.manager.containsSafe(edge.address(bundle))
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val mask = edge.full_mask(bundle)
val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source
when (bundle.opcode === TLMessages.Probe) {
assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra)
assume (address_ok, "'B' channel Probe carries unmanaged address" + extra)
assume (legal_source, "'B' channel Probe carries source that is not first source" + extra)
assume (is_aligned, "'B' channel Probe address not aligned to size" + extra)
assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra)
assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra)
assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra)
}
when (bundle.opcode === TLMessages.Get) {
monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra)
}
when (bundle.opcode === TLMessages.PutFullData) {
monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra)
monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.PutPartialData) {
monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra)
monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra)
monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.ArithmeticData) {
monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra)
monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra)
monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.LogicalData) {
monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra)
monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra)
monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra)
}
when (bundle.opcode === TLMessages.Hint) {
monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra)
monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra)
monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra)
monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra)
monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra)
monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra)
}
}
def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = {
monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val is_aligned = edge.isAligned(bundle.address, bundle.size)
val address_ok = edge.manager.containsSafe(edge.address(bundle))
monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility")
when (bundle.opcode === TLMessages.ProbeAck) {
monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ProbeAckData) {
monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.Release) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra)
monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra)
}
when (bundle.opcode === TLMessages.ReleaseData) {
monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra)
monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra)
monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra)
monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra)
monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra)
monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra)
monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra)
monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra)
monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra)
monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra)
}
}
def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = {
assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra)
val source_ok = edge.client.contains(bundle.source)
val sink_ok = bundle.sink < edge.manager.endSinkId.U
val deny_put_ok = edge.manager.mayDenyPut.B
val deny_get_ok = edge.manager.mayDenyGet.B
when (bundle.opcode === TLMessages.ReleaseAck) {
assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra)
assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra)
assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra)
}
when (bundle.opcode === TLMessages.Grant) {
assume (source_ok, "'D' channel Grant carries invalid source ID" + extra)
assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra)
assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra)
}
when (bundle.opcode === TLMessages.GrantData) {
assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra)
assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra)
assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra)
assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra)
assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAck) {
assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra)
}
when (bundle.opcode === TLMessages.AccessAckData) {
assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra)
assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra)
assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra)
}
when (bundle.opcode === TLMessages.HintAck) {
assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra)
// size is ignored
assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra)
assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra)
assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra)
}
}
def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = {
val sink_ok = bundle.sink < edge.manager.endSinkId.U
monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra)
}
def legalizeFormat(bundle: TLBundle, edge: TLEdge) = {
when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) }
when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) }
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) }
when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) }
when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) }
} else {
monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra)
monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra)
monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra)
}
}
def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = {
val a_first = edge.first(a.bits, a.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (a.valid && !a_first) {
monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra)
monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra)
monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra)
monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra)
monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra)
}
when (a.fire && a_first) {
opcode := a.bits.opcode
param := a.bits.param
size := a.bits.size
source := a.bits.source
address := a.bits.address
}
}
def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = {
val b_first = edge.first(b.bits, b.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (b.valid && !b_first) {
monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra)
monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra)
monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra)
monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra)
monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra)
}
when (b.fire && b_first) {
opcode := b.bits.opcode
param := b.bits.param
size := b.bits.size
source := b.bits.source
address := b.bits.address
}
}
def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = {
// Symbolic variable
val sym_source = Wire(UInt(edge.client.endSourceId.W))
// TODO: Connect sym_source to a fixed value for simulation and to a
// free wire in formal
sym_source := 0.U
// Type casting Int to UInt
val maxSourceId = Wire(UInt(edge.client.endSourceId.W))
maxSourceId := edge.client.endSourceId.U
// Delayed verison of sym_source
val sym_source_d = Reg(UInt(edge.client.endSourceId.W))
sym_source_d := sym_source
// These will be constraints for FV setup
Property(
MonitorDirection.Monitor,
(sym_source === sym_source_d),
"sym_source should remain stable",
PropertyClass.Default)
Property(
MonitorDirection.Monitor,
(sym_source <= maxSourceId),
"sym_source should take legal value",
PropertyClass.Default)
val my_resp_pend = RegInit(false.B)
val my_opcode = Reg(UInt())
val my_size = Reg(UInt())
val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire)
val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire)
val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source)
val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source)
val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat)
val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend)
when (my_set_resp_pend) {
my_resp_pend := true.B
} .elsewhen (my_clr_resp_pend) {
my_resp_pend := false.B
}
when (my_a_first_beat) {
my_opcode := bundle.a.bits.opcode
my_size := bundle.a.bits.size
}
val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size)
val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode)
val my_resp_opcode_legal = Wire(Bool())
when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) ||
(my_resp_opcode === TLMessages.LogicalData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData)
} .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck)
} .otherwise {
my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck)
}
monAssert (IfThen(my_resp_pend, !my_a_first_beat),
"Request message should not be sent with a source ID, for which a response message" +
"is already pending (not received until current cycle) for a prior request message" +
"with the same source ID" + extra)
assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)),
"Response message should be accepted with a source ID only if a request message with the" +
"same source ID has been accepted or is being accepted in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)),
"Response message should be sent with a source ID only if a request message with the" +
"same source ID has been accepted or is being sent in the current cycle" + extra)
assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)),
"If d_valid is 1, then d_size should be same as a_size of the corresponding request" +
"message" + extra)
assume (IfThen(my_d_first_beat, my_resp_opcode_legal),
"If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" +
"request message" + extra)
}
def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = {
val c_first = edge.first(c.bits, c.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val address = Reg(UInt())
when (c.valid && !c_first) {
monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra)
monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra)
monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra)
monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra)
monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra)
}
when (c.fire && c_first) {
opcode := c.bits.opcode
param := c.bits.param
size := c.bits.size
source := c.bits.source
address := c.bits.address
}
}
def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = {
val d_first = edge.first(d.bits, d.fire)
val opcode = Reg(UInt())
val param = Reg(UInt())
val size = Reg(UInt())
val source = Reg(UInt())
val sink = Reg(UInt())
val denied = Reg(Bool())
when (d.valid && !d_first) {
assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra)
assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra)
assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra)
assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra)
assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra)
assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra)
}
when (d.fire && d_first) {
opcode := d.bits.opcode
param := d.bits.param
size := d.bits.size
source := d.bits.source
sink := d.bits.sink
denied := d.bits.denied
}
}
def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = {
legalizeMultibeatA(bundle.a, edge)
legalizeMultibeatD(bundle.d, edge)
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
legalizeMultibeatB(bundle.b, edge)
legalizeMultibeatC(bundle.c, edge)
}
}
//This is left in for almond which doesn't adhere to the tilelink protocol
@deprecated("Use legalizeADSource instead if possible","")
def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.client.endSourceId.W))
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val a_set = WireInit(0.U(edge.client.endSourceId.W))
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra)
}
if (edge.manager.minLatency > 0) {
assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = {
val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size)
val log_a_size_bus_size = log2Ceil(a_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error
inflight.suggestName("inflight")
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
inflight_opcodes.suggestName("inflight_opcodes")
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
inflight_sizes.suggestName("inflight_sizes")
val a_first = edge.first(bundle.a.bits, bundle.a.fire)
a_first.suggestName("a_first")
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
d_first.suggestName("d_first")
val a_set = WireInit(0.U(edge.client.endSourceId.W))
val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
a_set.suggestName("a_set")
a_set_wo_ready.suggestName("a_set_wo_ready")
val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
a_opcodes_set.suggestName("a_opcodes_set")
val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
a_sizes_set.suggestName("a_sizes_set")
val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W))
a_opcode_lookup.suggestName("a_opcode_lookup")
a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U
val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W))
a_size_lookup.suggestName("a_size_lookup")
a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U
val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant))
val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant))
val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W))
a_opcodes_set_interm.suggestName("a_opcodes_set_interm")
val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W))
a_sizes_set_interm.suggestName("a_sizes_set_interm")
when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) {
a_set_wo_ready := UIntToOH(bundle.a.bits.source)
}
when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) {
a_set := UIntToOH(bundle.a.bits.source)
a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U
a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U
a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U)
a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U)
monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra)
}
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W))
d_opcodes_clr.suggestName("d_opcodes_clr")
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W))
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) {
val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) ||
(bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra)
assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) ||
(bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra)
assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) {
assume((!bundle.d.ready) || bundle.a.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra)
}
inflight := (inflight | a_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = {
val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset)
val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything
val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size)
val log_c_size_bus_size = log2Ceil(c_size_bus_size)
def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits
val inflight = RegInit(0.U((2 max edge.client.endSourceId).W))
val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
inflight.suggestName("inflight")
inflight_opcodes.suggestName("inflight_opcodes")
inflight_sizes.suggestName("inflight_sizes")
val c_first = edge.first(bundle.c.bits, bundle.c.fire)
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
c_first.suggestName("c_first")
d_first.suggestName("d_first")
val c_set = WireInit(0.U(edge.client.endSourceId.W))
val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
c_set.suggestName("c_set")
c_set_wo_ready.suggestName("c_set_wo_ready")
c_opcodes_set.suggestName("c_opcodes_set")
c_sizes_set.suggestName("c_sizes_set")
val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W))
val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W))
c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U
c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U
c_opcode_lookup.suggestName("c_opcode_lookup")
c_size_lookup.suggestName("c_size_lookup")
val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W))
val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W))
c_opcodes_set_interm.suggestName("c_opcodes_set_interm")
c_sizes_set_interm.suggestName("c_sizes_set_interm")
when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) {
c_set_wo_ready := UIntToOH(bundle.c.bits.source)
}
when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) {
c_set := UIntToOH(bundle.c.bits.source)
c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U
c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U
c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U)
c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U)
monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra)
}
val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData
val d_clr = WireInit(0.U(edge.client.endSourceId.W))
val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W))
val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W))
val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W))
d_clr.suggestName("d_clr")
d_clr_wo_ready.suggestName("d_clr_wo_ready")
d_opcodes_clr.suggestName("d_opcodes_clr")
d_sizes_clr.suggestName("d_sizes_clr")
val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr_wo_ready := UIntToOH(bundle.d.bits.source)
}
when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
d_clr := UIntToOH(bundle.d.bits.source)
d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U)
d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U)
}
when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) {
val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source)
assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra)
when (same_cycle_resp) {
assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra)
} .otherwise {
assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra)
}
}
when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) {
assume((!bundle.d.ready) || bundle.c.ready, "ready check")
}
if (edge.manager.minLatency > 0) {
when (c_set_wo_ready.orR) {
assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra)
}
}
inflight := (inflight | c_set) & ~d_clr
inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr
inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr
val watchdog = RegInit(0.U(32.W))
val limit = PlusArg("tilelink_timeout",
docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.")
monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra)
watchdog := watchdog + 1.U
when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U }
}
def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = {
val inflight = RegInit(0.U(edge.manager.endSinkId.W))
val d_first = edge.first(bundle.d.bits, bundle.d.fire)
val e_first = true.B
val d_set = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) {
d_set := UIntToOH(bundle.d.bits.sink)
assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra)
}
val e_clr = WireInit(0.U(edge.manager.endSinkId.W))
when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) {
e_clr := UIntToOH(bundle.e.bits.sink)
monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra)
}
// edge.client.minLatency applies to BC, not DE
inflight := (inflight | d_set) & ~e_clr
}
def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = {
val sourceBits = log2Ceil(edge.client.endSourceId)
val tooBig = 14 // >16kB worth of flight information gets to be too much
if (sourceBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked")
} else {
if (args.edge.params(TestplanTestType).simulation) {
if (args.edge.params(TLMonitorStrictMode)) {
legalizeADSource(bundle, edge)
legalizeCDSource(bundle, edge)
} else {
legalizeADSourceOld(bundle, edge)
}
}
if (args.edge.params(TestplanTestType).formal) {
legalizeADSourceFormal(bundle, edge)
}
}
if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) {
// legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize...
val sinkBits = log2Ceil(edge.manager.endSinkId)
if (sinkBits > tooBig) {
println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked")
} else {
legalizeDESink(bundle, edge)
}
}
}
def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = {
legalizeFormat (bundle, edge)
legalizeMultibeat (bundle, edge)
legalizeUnique (bundle, edge)
}
}
File Misc.scala:
// See LICENSE.Berkeley for license details.
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util._
import chisel3.util.random.LFSR
import org.chipsalliance.cde.config.Parameters
import scala.math._
class ParameterizedBundle(implicit p: Parameters) extends Bundle
trait Clocked extends Bundle {
val clock = Clock()
val reset = Bool()
}
object DecoupledHelper {
def apply(rvs: Bool*) = new DecoupledHelper(rvs)
}
class DecoupledHelper(val rvs: Seq[Bool]) {
def fire(exclude: Bool, includes: Bool*) = {
require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!")
(rvs.filter(_ ne exclude) ++ includes).reduce(_ && _)
}
def fire() = {
rvs.reduce(_ && _)
}
}
object MuxT {
def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2))
def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3))
def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) =
(Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4))
}
/** Creates a cascade of n MuxTs to search for a key value. */
object MuxTLookup {
def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = {
var res = default
for ((k, v) <- mapping.reverse)
res = MuxT(k === key, v, res)
res
}
}
object ValidMux {
def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = {
apply(v1 +: v2.toSeq)
}
def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = {
val out = Wire(Valid(valids.head.bits.cloneType))
out.valid := valids.map(_.valid).reduce(_ || _)
out.bits := MuxCase(valids.head.bits,
valids.map(v => (v.valid -> v.bits)))
out
}
}
object Str
{
def apply(s: String): UInt = {
var i = BigInt(0)
require(s.forall(validChar _))
for (c <- s)
i = (i << 8) | c
i.U((s.length*8).W)
}
def apply(x: Char): UInt = {
require(validChar(x))
x.U(8.W)
}
def apply(x: UInt): UInt = apply(x, 10)
def apply(x: UInt, radix: Int): UInt = {
val rad = radix.U
val w = x.getWidth
require(w > 0)
var q = x
var s = digit(q % rad)
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s)
}
s
}
def apply(x: SInt): UInt = apply(x, 10)
def apply(x: SInt, radix: Int): UInt = {
val neg = x < 0.S
val abs = x.abs.asUInt
if (radix != 10) {
Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix))
} else {
val rad = radix.U
val w = abs.getWidth
require(w > 0)
var q = abs
var s = digit(q % rad)
var needSign = neg
for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) {
q = q / rad
val placeSpace = q === 0.U
val space = Mux(needSign, Str('-'), Str(' '))
needSign = needSign && !placeSpace
s = Cat(Mux(placeSpace, space, digit(q % rad)), s)
}
Cat(Mux(needSign, Str('-'), Str(' ')), s)
}
}
private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0)
private def validChar(x: Char) = x == (x & 0xFF)
}
object Split
{
def apply(x: UInt, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
def apply(x: UInt, n2: Int, n1: Int, n0: Int) = {
val w = x.getWidth
(x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0))
}
}
object Random
{
def apply(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0)
else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod))
}
def apply(mod: Int): UInt = apply(mod, randomizer)
def oneHot(mod: Int, random: UInt): UInt = {
if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0))
else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt
}
def oneHot(mod: Int): UInt = oneHot(mod, randomizer)
private def randomizer = LFSR(16)
private def partition(value: UInt, slices: Int) =
Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U)
}
object Majority {
def apply(in: Set[Bool]): Bool = {
val n = (in.size >> 1) + 1
val clauses = in.subsets(n).map(_.reduce(_ && _))
clauses.reduce(_ || _)
}
def apply(in: Seq[Bool]): Bool = apply(in.toSet)
def apply(in: UInt): Bool = apply(in.asBools.toSet)
}
object PopCountAtLeast {
private def two(x: UInt): (Bool, Bool) = x.getWidth match {
case 1 => (x.asBool, false.B)
case n =>
val half = x.getWidth / 2
val (leftOne, leftTwo) = two(x(half - 1, 0))
val (rightOne, rightTwo) = two(x(x.getWidth - 1, half))
(leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne))
}
def apply(x: UInt, n: Int): Bool = n match {
case 0 => true.B
case 1 => x.orR
case 2 => two(x)._2
case 3 => PopCount(x) >= n.U
}
}
// This gets used everywhere, so make the smallest circuit possible ...
// Given an address and size, create a mask of beatBytes size
// eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111
// groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01
object MaskGen {
def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = {
require (groupBy >= 1 && beatBytes >= groupBy)
require (isPow2(beatBytes) && isPow2(groupBy))
val lgBytes = log2Ceil(beatBytes)
val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U
def helper(i: Int): Seq[(Bool, Bool)] = {
if (i == 0) {
Seq((lgSize >= lgBytes.asUInt, true.B))
} else {
val sub = helper(i-1)
val size = sizeOH(lgBytes - i)
val bit = addr_lo(lgBytes - i)
val nbit = !bit
Seq.tabulate (1 << i) { j =>
val (sub_acc, sub_eq) = sub(j/2)
val eq = sub_eq && (if (j % 2 == 1) bit else nbit)
val acc = sub_acc || (size && eq)
(acc, eq)
}
}
}
if (groupBy == beatBytes) 1.U else
Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse)
}
}
File PlusArg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.experimental._
import chisel3.util.HasBlackBoxResource
@deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05")
case class PlusArgInfo(default: BigInt, docstring: String)
/** Case class for PlusArg information
*
* @tparam A scala type of the PlusArg value
* @param default optional default value
* @param docstring text to include in the help
* @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT)
*/
private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String)
/** Typeclass for converting a type to a doctype string
* @tparam A some type
*/
trait Doctypeable[A] {
/** Return the doctype string for some option */
def toDoctype(a: Option[A]): String
}
/** Object containing implementations of the Doctypeable typeclass */
object Doctypes {
/** Converts an Int => "INT" */
implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" }
/** Converts a BigInt => "INT" */
implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" }
/** Converts a String => "STRING" */
implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" }
}
class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map(
"FORMAT" -> StringParam(format),
"DEFAULT" -> IntParam(default),
"WIDTH" -> IntParam(width)
)) with HasBlackBoxResource {
val io = IO(new Bundle {
val out = Output(UInt(width.W))
})
addResource("/vsrc/plusarg_reader.v")
}
/* This wrapper class has no outputs, making it clear it is a simulation-only construct */
class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module {
val io = IO(new Bundle {
val count = Input(UInt(width.W))
})
val max = Module(new plusarg_reader(format, default, docstring, width)).io.out
when (max > 0.U) {
assert (io.count < max, s"Timeout exceeded: $docstring")
}
}
import Doctypes._
object PlusArg
{
/** PlusArg("foo") will return 42.U if the simulation is run with +foo=42
* Do not use this as an initial register value. The value is set in an
* initial block and thus accessing it from another initial is racey.
* Add a docstring to document the arg, which can be dumped in an elaboration
* pass.
*/
def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out
}
/** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert
* to kill the simulation when count exceeds the specified integer argument.
* Default 0 will never assert.
*/
def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = {
PlusArgArtefacts.append(name, Some(default), docstring)
Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count
}
}
object PlusArgArtefacts {
private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty
/* Add a new PlusArg */
@deprecated(
"Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08",
"Rocket Chip 2020.05"
)
def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring)
/** Add a new PlusArg
*
* @tparam A scala type of the PlusArg value
* @param name name for the PlusArg
* @param default optional default value
* @param docstring text to include in the help
*/
def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit =
artefacts = artefacts ++
Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default)))
/* From plus args, generate help text */
private def serializeHelp_cHeader(tab: String = ""): String = artefacts
.map{ case(arg, info) =>
s"""|$tab+$arg=${info.doctype}\\n\\
|$tab${" "*20}${info.docstring}\\n\\
|""".stripMargin ++ info.default.map{ case default =>
s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("")
}.toSeq.mkString("\\n\\\n") ++ "\""
/* From plus args, generate a char array of their names */
private def serializeArray_cHeader(tab: String = ""): String = {
val prettyTab = tab + " " * 44 // Length of 'static const ...'
s"${tab}static const char * verilog_plusargs [] = {\\\n" ++
artefacts
.map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" }
.mkString("")++
s"${prettyTab}0};"
}
/* Generate C code to be included in emulator.cc that helps with
* argument parsing based on available Verilog PlusArgs */
def serialize_cHeader(): String =
s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\
|${serializeHelp_cHeader(" "*7)}
|${serializeArray_cHeader()}
|""".stripMargin
}
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
File Parameters.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.diplomacy
import chisel3._
import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor}
import freechips.rocketchip.util.ShiftQueue
/** Options for describing the attributes of memory regions */
object RegionType {
// Define the 'more relaxed than' ordering
val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS)
sealed trait T extends Ordered[T] {
def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this)
}
case object CACHED extends T // an intermediate agent may have cached a copy of the region for you
case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided
case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible
case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached
case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects
case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed
case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively
}
// A non-empty half-open range; [start, end)
case class IdRange(start: Int, end: Int) extends Ordered[IdRange]
{
require (start >= 0, s"Ids cannot be negative, but got: $start.")
require (start <= end, "Id ranges cannot be negative.")
def compare(x: IdRange) = {
val primary = (this.start - x.start).signum
val secondary = (x.end - this.end).signum
if (primary != 0) primary else secondary
}
def overlaps(x: IdRange) = start < x.end && x.start < end
def contains(x: IdRange) = start <= x.start && x.end <= end
def contains(x: Int) = start <= x && x < end
def contains(x: UInt) =
if (size == 0) {
false.B
} else if (size == 1) { // simple comparison
x === start.U
} else {
// find index of largest different bit
val largestDeltaBit = log2Floor(start ^ (end-1))
val smallestCommonBit = largestDeltaBit + 1 // may not exist in x
val uncommonMask = (1 << smallestCommonBit) - 1
val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0)
// the prefix must match exactly (note: may shift ALL bits away)
(x >> smallestCommonBit) === (start >> smallestCommonBit).U &&
// firrtl constant prop range analysis can eliminate these two:
(start & uncommonMask).U <= uncommonBits &&
uncommonBits <= ((end-1) & uncommonMask).U
}
def shift(x: Int) = IdRange(start+x, end+x)
def size = end - start
def isEmpty = end == start
def range = start until end
}
object IdRange
{
def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else {
val ranges = s.sorted
(ranges.tail zip ranges.init) find { case (a, b) => a overlaps b }
}
}
// An potentially empty inclusive range of 2-powers [min, max] (in bytes)
case class TransferSizes(min: Int, max: Int)
{
def this(x: Int) = this(x, x)
require (min <= max, s"Min transfer $min > max transfer $max")
require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)")
require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max")
require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min")
require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)")
def none = min == 0
def contains(x: Int) = isPow2(x) && min <= x && x <= max
def containsLg(x: Int) = contains(1 << x)
def containsLg(x: UInt) =
if (none) false.B
else if (min == max) { log2Ceil(min).U === x }
else { log2Ceil(min).U <= x && x <= log2Ceil(max).U }
def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max)
def intersect(x: TransferSizes) =
if (x.max < min || max < x.min) TransferSizes.none
else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max))
// Not a union, because the result may contain sizes contained by neither term
// NOT TO BE CONFUSED WITH COVERPOINTS
def mincover(x: TransferSizes) = {
if (none) {
x
} else if (x.none) {
this
} else {
TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max))
}
}
override def toString() = "TransferSizes[%d, %d]".format(min, max)
}
object TransferSizes {
def apply(x: Int) = new TransferSizes(x)
val none = new TransferSizes(0)
def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _)
def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _)
implicit def asBool(x: TransferSizes) = !x.none
}
// AddressSets specify the address space managed by the manager
// Base is the base address, and mask are the bits consumed by the manager
// e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff
// e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ...
case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
{
// Forbid misaligned base address (and empty sets)
require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}")
require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous
// We do allow negative mask (=> ignore all high bits)
def contains(x: BigInt) = ((x ^ base) & ~mask) == 0
def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S
// turn x into an address contained in this set
def legalize(x: UInt): UInt = base.U | (mask.U & x)
// overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1)
def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0
// contains iff bitwise: x.mask => mask && contains(x.base)
def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0
// The number of bytes to which the manager must be aligned
def alignment = ((mask + 1) & ~mask)
// Is this a contiguous memory range
def contiguous = alignment == mask+1
def finite = mask >= 0
def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask }
// Widen the match function to ignore all bits in imask
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
// Return an AddressSet that only contains the addresses both sets contain
def intersect(x: AddressSet): Option[AddressSet] = {
if (!overlaps(x)) {
None
} else {
val r_mask = mask & x.mask
val r_base = base | x.base
Some(AddressSet(r_base, r_mask))
}
}
def subtract(x: AddressSet): Seq[AddressSet] = {
intersect(x) match {
case None => Seq(this)
case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit =>
val nmask = (mask & (bit-1)) | remove.mask
val nbase = (remove.base ^ bit) & ~nmask
AddressSet(nbase, nmask)
}
}
}
// AddressSets have one natural Ordering (the containment order, if contiguous)
def compare(x: AddressSet) = {
val primary = (this.base - x.base).signum // smallest address first
val secondary = (x.mask - this.mask).signum // largest mask first
if (primary != 0) primary else secondary
}
// We always want to see things in hex
override def toString() = {
if (mask >= 0) {
"AddressSet(0x%x, 0x%x)".format(base, mask)
} else {
"AddressSet(0x%x, ~0x%x)".format(base, ~mask)
}
}
def toRanges = {
require (finite, "Ranges cannot be calculated on infinite mask")
val size = alignment
val fragments = mask & ~(size-1)
val bits = bitIndexes(fragments)
(BigInt(0) until (BigInt(1) << bits.size)).map { i =>
val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) }
AddressRange(off, size)
}
}
}
object AddressSet
{
val everything = AddressSet(0, -1)
def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = {
if (size == 0) tail.reverse else {
val maxBaseAlignment = base & (-base) // 0 for infinite (LSB)
val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size
val step =
if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment)
maxSizeAlignment else maxBaseAlignment
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
}
}
def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = {
// Pair terms up by ignoring 'bit'
seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) =>
if (seq.size == 1) {
seq.head // singleton -> unaffected
} else {
key.copy(mask = key.mask | bit) // pair - widen mask by bit
}
}.toList
}
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _)
AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted
}
def enumerateMask(mask: BigInt): Seq[BigInt] = {
def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] =
if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail)
helper(0, Nil)
}
def enumerateBits(mask: BigInt): Seq[BigInt] = {
def helper(x: BigInt): Seq[BigInt] = {
if (x == 0) {
Nil
} else {
val bit = x & (-x)
bit +: helper(x & ~bit)
}
}
helper(mask)
}
}
case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean)
{
require (depth >= 0, "Buffer depth must be >= 0")
def isDefined = depth > 0
def latency = if (isDefined && !flow) 1 else 0
def apply[T <: Data](x: DecoupledIO[T]) =
if (isDefined) Queue(x, depth, flow=flow, pipe=pipe)
else x
def irrevocable[T <: Data](x: ReadyValidIO[T]) =
if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe)
else x
def sq[T <: Data](x: DecoupledIO[T]) =
if (!isDefined) x else {
val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe))
sq.io.enq <> x
sq.io.deq
}
override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "")
}
object BufferParams
{
implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false)
val default = BufferParams(2)
val none = BufferParams(0)
val flow = BufferParams(1, true, false)
val pipe = BufferParams(1, false, true)
}
case class TriStateValue(value: Boolean, set: Boolean)
{
def update(orig: Boolean) = if (set) value else orig
}
object TriStateValue
{
implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true)
def unset = TriStateValue(false, false)
}
trait DirectedBuffers[T] {
def copyIn(x: BufferParams): T
def copyOut(x: BufferParams): T
def copyInOut(x: BufferParams): T
}
trait IdMapEntry {
def name: String
def from: IdRange
def to: IdRange
def isCache: Boolean
def requestFifo: Boolean
def maxTransactionsInFlight: Option[Int]
def pretty(fmt: String) =
if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5
fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
} else {
fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "")
}
}
abstract class IdMap[T <: IdMapEntry] {
protected val fmt: String
val mapping: Seq[T]
def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n")
}
File Edges.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.tilelink
import chisel3._
import chisel3.util._
import chisel3.experimental.SourceInfo
import org.chipsalliance.cde.config.Parameters
import freechips.rocketchip.util._
class TLEdge(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdgeParameters(client, manager, params, sourceInfo)
{
def isAligned(address: UInt, lgSize: UInt): Bool = {
if (maxLgSize == 0) true.B else {
val mask = UIntToOH1(lgSize, maxLgSize)
(address & mask) === 0.U
}
}
def mask(address: UInt, lgSize: UInt): UInt =
MaskGen(address, lgSize, manager.beatBytes)
def staticHasData(bundle: TLChannel): Option[Boolean] = {
bundle match {
case _:TLBundleA => {
// Do there exist A messages with Data?
val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial
// Do there exist A messages without Data?
val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint
// Statically optimize the case where hasData is a constant
if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None
}
case _:TLBundleB => {
// Do there exist B messages with Data?
val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial
// Do there exist B messages without Data?
val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint
// Statically optimize the case where hasData is a constant
if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None
}
case _:TLBundleC => {
// Do there eixst C messages with Data?
val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe
// Do there exist C messages without Data?
val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe
if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None
}
case _:TLBundleD => {
// Do there eixst D messages with Data?
val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB
// Do there exist D messages without Data?
val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT
if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None
}
case _:TLBundleE => Some(false)
}
}
def isRequest(x: TLChannel): Bool = {
x match {
case a: TLBundleA => true.B
case b: TLBundleB => true.B
case c: TLBundleC => c.opcode(2) && c.opcode(1)
// opcode === TLMessages.Release ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(2) && !d.opcode(1)
// opcode === TLMessages.Grant ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
}
def isResponse(x: TLChannel): Bool = {
x match {
case a: TLBundleA => false.B
case b: TLBundleB => false.B
case c: TLBundleC => !c.opcode(2) || !c.opcode(1)
// opcode =/= TLMessages.Release &&
// opcode =/= TLMessages.ReleaseData
case d: TLBundleD => true.B // Grant isResponse + isRequest
case e: TLBundleE => true.B
}
}
def hasData(x: TLChannel): Bool = {
val opdata = x match {
case a: TLBundleA => !a.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case b: TLBundleB => !b.opcode(2)
// opcode === TLMessages.PutFullData ||
// opcode === TLMessages.PutPartialData ||
// opcode === TLMessages.ArithmeticData ||
// opcode === TLMessages.LogicalData
case c: TLBundleC => c.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.ProbeAckData ||
// opcode === TLMessages.ReleaseData
case d: TLBundleD => d.opcode(0)
// opcode === TLMessages.AccessAckData ||
// opcode === TLMessages.GrantData
case e: TLBundleE => false.B
}
staticHasData(x).map(_.B).getOrElse(opdata)
}
def opcode(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.opcode
case b: TLBundleB => b.opcode
case c: TLBundleC => c.opcode
case d: TLBundleD => d.opcode
}
}
def param(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.param
case b: TLBundleB => b.param
case c: TLBundleC => c.param
case d: TLBundleD => d.param
}
}
def size(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.size
case b: TLBundleB => b.size
case c: TLBundleC => c.size
case d: TLBundleD => d.size
}
}
def data(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.data
case b: TLBundleB => b.data
case c: TLBundleC => c.data
case d: TLBundleD => d.data
}
}
def corrupt(x: TLDataChannel): Bool = {
x match {
case a: TLBundleA => a.corrupt
case b: TLBundleB => b.corrupt
case c: TLBundleC => c.corrupt
case d: TLBundleD => d.corrupt
}
}
def mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.mask
case b: TLBundleB => b.mask
case c: TLBundleC => mask(c.address, c.size)
}
}
def full_mask(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => mask(a.address, a.size)
case b: TLBundleB => mask(b.address, b.size)
case c: TLBundleC => mask(c.address, c.size)
}
}
def address(x: TLAddrChannel): UInt = {
x match {
case a: TLBundleA => a.address
case b: TLBundleB => b.address
case c: TLBundleC => c.address
}
}
def source(x: TLDataChannel): UInt = {
x match {
case a: TLBundleA => a.source
case b: TLBundleB => b.source
case c: TLBundleC => c.source
case d: TLBundleD => d.source
}
}
def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes)
def addr_lo(x: UInt): UInt =
if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0)
def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x))
def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x))
def numBeats(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 1.U
case bundle: TLDataChannel => {
val hasData = this.hasData(bundle)
val size = this.size(bundle)
val cutoff = log2Ceil(manager.beatBytes)
val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U
val decode = UIntToOH(size, maxLgSize+1) >> cutoff
Mux(hasData, decode | small.asUInt, 1.U)
}
}
}
def numBeats1(x: TLChannel): UInt = {
x match {
case _: TLBundleE => 0.U
case bundle: TLDataChannel => {
if (maxLgSize == 0) {
0.U
} else {
val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes)
Mux(hasData(bundle), decode, 0.U)
}
}
}
}
def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val beats1 = numBeats1(bits)
val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W))
val counter1 = counter - 1.U
val first = counter === 0.U
val last = counter === 1.U || beats1 === 0.U
val done = last && fire
val count = (beats1 & ~counter1)
when (fire) {
counter := Mux(first, beats1, counter1)
}
(first, last, done, count)
}
def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1
def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire)
def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid)
def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2
def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire)
def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid)
def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3
def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire)
def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid)
def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3)
}
def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire)
def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid)
def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4)
}
def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire)
def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid)
def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = {
val r = firstlastHelper(bits, fire)
(r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes))
}
def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire)
def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid)
// Does the request need T permissions to be executed?
def needT(a: TLBundleA): Bool = {
val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLPermissions.NtoB -> false.B,
TLPermissions.NtoT -> true.B,
TLPermissions.BtoT -> true.B))
MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array(
TLMessages.PutFullData -> true.B,
TLMessages.PutPartialData -> true.B,
TLMessages.ArithmeticData -> true.B,
TLMessages.LogicalData -> true.B,
TLMessages.Get -> false.B,
TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array(
TLHints.PREFETCH_READ -> false.B,
TLHints.PREFETCH_WRITE -> true.B)),
TLMessages.AcquireBlock -> acq_needT,
TLMessages.AcquirePerm -> acq_needT))
}
// This is a very expensive circuit; use only if you really mean it!
def inFlight(x: TLBundle): (UInt, UInt) = {
val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W))
val bce = manager.anySupportAcquireB && client.anySupportProbe
val (a_first, a_last, _) = firstlast(x.a)
val (b_first, b_last, _) = firstlast(x.b)
val (c_first, c_last, _) = firstlast(x.c)
val (d_first, d_last, _) = firstlast(x.d)
val (e_first, e_last, _) = firstlast(x.e)
val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits))
val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits))
val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits))
val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits))
val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits))
val a_inc = x.a.fire && a_first && a_request
val b_inc = x.b.fire && b_first && b_request
val c_inc = x.c.fire && c_first && c_request
val d_inc = x.d.fire && d_first && d_request
val e_inc = x.e.fire && e_first && e_request
val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil))
val a_dec = x.a.fire && a_last && a_response
val b_dec = x.b.fire && b_last && b_response
val c_dec = x.c.fire && c_last && c_response
val d_dec = x.d.fire && d_last && d_response
val e_dec = x.e.fire && e_last && e_response
val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil))
val next_flight = flight + PopCount(inc) - PopCount(dec)
flight := next_flight
(flight, next_flight)
}
def prettySourceMapping(context: String): String = {
s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n"
}
}
class TLEdgeOut(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
// Transfers
def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquireBlock
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.AcquirePerm
a.param := growPermissions
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.Release
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = {
require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsAcquireBFast(toAddress, lgSize)
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ReleaseData
c.param := shrinkPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
(legal, c)
}
def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) =
Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B)
def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAck
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(b.source, b.address, b.size, reportPermissions, data)
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.ProbeAckData
c.param := reportPermissions
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC =
ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B)
def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink)
def GrantAck(toSink: UInt): TLBundleE = {
val e = Wire(new TLBundleE(bundle))
e.sink := toSink
e
}
// Accesses
def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsGetFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Get
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutFullFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutFullData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) =
Put(fromSource, toAddress, lgSize, data, mask, false.B)
def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = {
require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsPutPartialFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.PutPartialData
a.param := 0.U
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = {
require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsArithmeticFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.ArithmeticData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsLogicalFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.LogicalData
a.param := atomic
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := data
a.corrupt := corrupt
(legal, a)
}
def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = {
require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}")
val legal = manager.supportsHintFast(toAddress, lgSize)
val a = Wire(new TLBundleA(bundle))
a.opcode := TLMessages.Hint
a.param := param
a.size := lgSize
a.source := fromSource
a.address := toAddress
a.user := DontCare
a.echo := DontCare
a.mask := mask(toAddress, lgSize)
a.data := DontCare
a.corrupt := false.B
(legal, a)
}
def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data)
def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B)
def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.AccessAckData
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := data
c.corrupt := corrupt
c
}
def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size)
def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = {
val c = Wire(new TLBundleC(bundle))
c.opcode := TLMessages.HintAck
c.param := 0.U
c.size := lgSize
c.source := fromSource
c.address := toAddress
c.user := DontCare
c.echo := DontCare
c.data := DontCare
c.corrupt := false.B
c
}
}
class TLEdgeIn(
client: TLClientPortParameters,
manager: TLManagerPortParameters,
params: Parameters,
sourceInfo: SourceInfo)
extends TLEdge(client, manager, params, sourceInfo)
{
private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = {
val todo = x.filter(!_.isEmpty)
val heads = todo.map(_.head)
val tails = todo.map(_.tail)
if (todo.isEmpty) Nil else { heads +: myTranspose(tails) }
}
// Transfers
def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = {
require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}")
val legal = client.supportsProbe(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Probe
b.param := capPermissions
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.Grant
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B)
def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.GrantData
d.param := capPermissions
d.size := lgSize
d.source := toSource
d.sink := fromSink
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B)
def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.ReleaseAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
// Accesses
def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = {
require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsGet(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Get
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}")
val legal = client.supportsPutFull(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutFullData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) =
Put(fromAddress, toSource, lgSize, data, mask, false.B)
def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = {
require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsPutPartial(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.PutPartialData
b.param := 0.U
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsArithmetic(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.ArithmeticData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = {
require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsLogical(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.LogicalData
b.param := atomic
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := data
b.corrupt := corrupt
(legal, b)
}
def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = {
require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}")
val legal = client.supportsHint(toSource, lgSize)
val b = Wire(new TLBundleB(bundle))
b.opcode := TLMessages.Hint
b.param := param
b.size := lgSize
b.source := toSource
b.address := fromAddress
b.mask := mask(fromAddress, lgSize)
b.data := DontCare
b.corrupt := false.B
(legal, b)
}
def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size)
def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied)
def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data)
def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B)
def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.AccessAckData
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := data
d.corrupt := corrupt
d
}
def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B)
def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied)
def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B)
def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = {
val d = Wire(new TLBundleD(bundle))
d.opcode := TLMessages.HintAck
d.param := 0.U
d.size := lgSize
d.source := toSource
d.sink := 0.U
d.denied := denied
d.user := DontCare
d.echo := DontCare
d.data := DontCare
d.corrupt := false.B
d
}
}
| module TLMonitor_65( // @[Monitor.scala:36:7]
input clock, // @[Monitor.scala:36:7]
input reset, // @[Monitor.scala:36:7]
input io_in_a_ready, // @[Monitor.scala:20:14]
input io_in_a_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14]
input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14]
input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14]
input io_in_a_bits_corrupt, // @[Monitor.scala:20:14]
input io_in_d_ready, // @[Monitor.scala:20:14]
input io_in_d_valid, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14]
input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14]
input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14]
input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14]
input io_in_d_bits_sink, // @[Monitor.scala:20:14]
input io_in_d_bits_denied, // @[Monitor.scala:20:14]
input io_in_d_bits_corrupt // @[Monitor.scala:20:14]
);
wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11]
wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11]
wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71]
wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35]
reg [2:0] a_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode; // @[Monitor.scala:387:22]
reg [2:0] param; // @[Monitor.scala:388:22]
reg [2:0] size; // @[Monitor.scala:389:22]
reg [3:0] source; // @[Monitor.scala:390:22]
reg [31:0] address; // @[Monitor.scala:391:22]
reg [2:0] d_first_counter; // @[Edges.scala:229:27]
reg [2:0] opcode_1; // @[Monitor.scala:538:22]
reg [1:0] param_1; // @[Monitor.scala:539:22]
reg [2:0] size_1; // @[Monitor.scala:540:22]
reg [3:0] source_1; // @[Monitor.scala:541:22]
reg sink; // @[Monitor.scala:542:22]
reg denied; // @[Monitor.scala:543:22]
reg [9:0] inflight; // @[Monitor.scala:614:27]
reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35]
reg [39:0] inflight_sizes; // @[Monitor.scala:618:33]
reg [2:0] a_first_counter_1; // @[Edges.scala:229:27]
wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [2:0] d_first_counter_1; // @[Edges.scala:229:27]
wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25]
wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35]
wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35]
wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46]
wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74]
wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35]
reg [31:0] watchdog; // @[Monitor.scala:709:27]
reg [9:0] inflight_1; // @[Monitor.scala:726:35]
reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35]
reg [2:0] d_first_counter_2; // @[Edges.scala:229:27]
wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25]
reg [31:0] watchdog_1; // @[Monitor.scala:818:27] |
Generate the Verilog code corresponding to the following Chisel files.
File InputUnit.scala:
package constellation.router
import chisel3._
import chisel3.util._
import org.chipsalliance.cde.config.{Field, Parameters}
import freechips.rocketchip.util._
import constellation.channel._
import constellation.routing.{FlowRoutingBundle}
import constellation.noc.{HasNoCParams}
class AbstractInputUnitIO(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams],
)(implicit val p: Parameters) extends Bundle with HasRouterOutputParams {
val nodeId = cParam.destId
val router_req = Decoupled(new RouteComputerReq)
val router_resp = Input(new RouteComputerResp(outParams, egressParams))
val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams))
val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams))
val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }))
val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams)))
val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams)))
val debug = Output(new Bundle {
val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W)
})
val block = Input(Bool())
}
abstract class AbstractInputUnit(
val cParam: BaseChannelParams,
val outParams: Seq[ChannelParams],
val egressParams: Seq[EgressChannelParams]
)(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams {
val nodeId = cParam.destId
def io: AbstractInputUnitIO
}
class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module {
val nVirtualChannels = cParam.nVirtualChannels
val io = IO(new Bundle {
val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits))))
val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits)))
})
val useOutputQueues = cParam.useOutputQueues
val delims = if (useOutputQueues) {
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_)
} else {
// If no queuing, have to add an additional slot since head == tail implies empty
// TODO this should be fixed, should use all slots available
cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_)
}
val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val ends = delims.tail.zipWithIndex.map { case (s,i) =>
if (cParam.virtualChannelParams(i).traversable) s else 0
}
val fullSize = delims.last
// Ugly case. Use multiple queues
if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) {
require(useOutputQueues)
val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize)))
qs.zipWithIndex.foreach { case (q,i) =>
val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U)
q.io.enq.valid := sel.orR
q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head))
q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail))
q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload))
io.deq(i) <> q.io.deq
}
} else {
val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits))
val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W))))
val empty = (heads zip tails).map(t => t._1 === t._2)
val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) }
qs.foreach(_.io.enq.valid := false.B)
qs.foreach(_.io.enq.bits := DontCare)
val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id)
val flit = Wire(new BaseFlit(cParam.payloadBits))
val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B
flit.head := io.enq(0).bits.head
flit.tail := io.enq(0).bits.tail
flit.payload := io.enq(0).bits.payload
when (io.enq(0).valid && !direct_to_q) {
val tail = tails(io.enq(0).bits.virt_channel_id)
mem.write(tail, flit)
tails(io.enq(0).bits.virt_channel_id) := Mux(
tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(vc_sel, starts.map(_.U)),
tail + 1.U)
} .elsewhen (io.enq(0).valid && direct_to_q) {
for (i <- 0 until nVirtualChannels) {
when (io.enq(0).bits.virt_channel_id === i.U) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := flit
}
}
}
if (useOutputQueues) {
val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready }
val to_q_oh = PriorityEncoderOH(can_to_q)
val to_q = OHToUInt(to_q_oh)
when (can_to_q.orR) {
val head = Mux1H(to_q_oh, heads)
heads(to_q) := Mux(
head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(to_q_oh, starts.map(_.U)),
head + 1.U)
for (i <- 0 until nVirtualChannels) {
when (to_q_oh(i)) {
qs(i).io.enq.valid := true.B
qs(i).io.enq.bits := mem.read(head)
}
}
}
for (i <- 0 until nVirtualChannels) {
io.deq(i) <> qs(i).io.deq
}
} else {
qs.map(_.io.deq.ready := false.B)
val ready_sel = io.deq.map(_.ready)
val fire = io.deq.map(_.fire)
assert(PopCount(fire) <= 1.U)
val head = Mux1H(fire, heads)
when (fire.orR) {
val fire_idx = OHToUInt(fire)
heads(fire_idx) := Mux(
head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)),
Mux1H(fire, starts.map(_.U)),
head + 1.U)
}
val read_flit = mem.read(head)
for (i <- 0 until nVirtualChannels) {
io.deq(i).valid := !empty(i)
io.deq(i).bits := read_flit
}
}
}
}
class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams],
egressParams: Seq[EgressChannelParams],
combineRCVA: Boolean, combineSAST: Boolean
)
(implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) {
val nVirtualChannels = cParam.nVirtualChannels
val virtualChannelParams = cParam.virtualChannelParams
class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) {
val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams]))
}
val io = IO(new InputUnitIO)
val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5)
class InputState extends Bundle {
val g = UInt(3.W)
val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })
val flow = new FlowRoutingBundle
val fifo_deps = UInt(nVirtualChannels.W)
}
val input_buffer = Module(new InputBuffer(cParam))
for (i <- 0 until cParam.srcSpeedup) {
input_buffer.io.enq(i) := io.in.flit(i)
}
input_buffer.io.deq.foreach(_.ready := false.B)
val route_arbiter = Module(new Arbiter(
new RouteComputerReq, nVirtualChannels
))
io.router_req <> route_arbiter.io.out
val states = Reg(Vec(nVirtualChannels, new InputState))
val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_)
val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_)
if (anyFifo) {
val idle_mask = VecInit(states.map(_.g === g_i)).asUInt
for (s <- states)
for (i <- 0 until nVirtualChannels)
s.fifo_deps := s.fifo_deps & ~idle_mask
}
for (i <- 0 until cParam.srcSpeedup) {
when (io.in.flit(i).fire && io.in.flit(i).bits.head) {
val id = io.in.flit(i).bits.virt_channel_id
assert(id < nVirtualChannels.U)
assert(states(id).g === g_i)
val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U
states(id).g := Mux(at_dest, g_v, g_r)
states(id).vc_sel.foreach(_.foreach(_ := false.B))
for (o <- 0 until nEgress) {
when (o.U === io.in.flit(i).bits.flow.egress_node_id) {
states(id).vc_sel(o+nOutputs)(0) := true.B
}
}
states(id).flow := io.in.flit(i).bits.flow
if (anyFifo) {
val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR
states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) =>
s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id
}).asUInt
}
}
}
(route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) =>
if (virtualChannelParams(idx).traversable) {
i.valid := s.g === g_r
i.bits.flow := s.flow
i.bits.src_virt_id := idx.U
when (i.fire) { s.g := g_v }
} else {
i.valid := false.B
i.bits := DontCare
}
}
when (io.router_req.fire) {
val id = io.router_req.bits.src_virt_id
assert(states(id).g === g_r)
states(id).g := g_v
for (i <- 0 until nVirtualChannels) {
when (i.U === id) {
states(i).vc_sel := io.router_resp.vc_sel
}
}
}
val mask = RegInit(0.U(nVirtualChannels.W))
val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams)))
val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool()))
val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask))
val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels)
// Prioritize incoming packetes
when (io.router_req.fire) {
mask := (1.U << io.router_req.bits.src_virt_id) - 1.U
} .elsewhen (vcalloc_vals.orR) {
mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) })
}
io.vcalloc_req.valid := vcalloc_vals.orR
io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs)
states.zipWithIndex.map { case (s,idx) =>
if (virtualChannelParams(idx).traversable) {
vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U
vcalloc_reqs(idx).in_vc := idx.U
vcalloc_reqs(idx).vc_sel := s.vc_sel
vcalloc_reqs(idx).flow := s.flow
when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a }
if (combineRCVA) {
when (route_arbiter.io.in(idx).fire) {
vcalloc_vals(idx) := true.B
vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel
}
}
} else {
vcalloc_vals(idx) := false.B
vcalloc_reqs(idx) := DontCare
}
}
io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready
when (io.vcalloc_req.fire) {
for (i <- 0 until nVirtualChannels) {
when (vcalloc_sel(i)) {
states(i).vc_sel := io.vcalloc_resp.vc_sel
states(i).g := g_a
if (!combineRCVA) {
assert(states(i).g === g_v)
}
}
}
}
val salloc_arb = Module(new SwitchArbiter(
nVirtualChannels,
cParam.destSpeedup,
outParams, egressParams
))
(states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) =>
if (virtualChannelParams(i).traversable) {
val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U
r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid
r.bits.vc_sel := s.vc_sel
val deq_tail = input_buffer.io.deq(i).bits.tail
r.bits.tail := deq_tail
when (r.fire && deq_tail) {
s.g := g_i
}
input_buffer.io.deq(i).ready := r.ready
} else {
r.valid := false.B
r.bits := DontCare
}
}
io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready))
io.salloc_req <> salloc_arb.io.out
when (io.block) {
salloc_arb.io.out.foreach(_.ready := false.B)
io.salloc_req.foreach(_.valid := false.B)
}
class OutBundle extends Bundle {
val valid = Bool()
val vid = UInt(virtualChannelBits.W)
val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W)
val flit = new Flit(cParam.payloadBits)
}
val salloc_outs = if (combineSAST) {
Wire(Vec(cParam.destSpeedup, new OutBundle))
} else {
Reg(Vec(cParam.destSpeedup, new OutBundle))
}
io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) =>
Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)),
salloc_arb.io.chosen_oh(i), 0.U)
}.reduce(_|_)
for (i <- 0 until cParam.destSpeedup) {
val salloc_out = salloc_outs(i)
salloc_out.valid := salloc_arb.io.out(i).fire
salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i))
val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel))
val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq
val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq)
when (salloc_arb.io.out(i).fire) {
salloc_out.out_vid := virt_channel
salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload))
salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head))
salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail))
salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow))
} .otherwise {
salloc_out.out_vid := DontCare
salloc_out.flit := DontCare
}
salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch
io.out(i).valid := salloc_out.valid
io.out(i).bits.flit := salloc_out.flit
io.out(i).bits.out_virt_channel := salloc_out.out_vid
}
def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = {
if (virtualChannelParams(srcV).traversable) {
outParams.zipWithIndex.map { case (oP, oI) =>
(0 until oP.nVirtualChannels).map { oV =>
var allow = false
virtualChannelParams(srcV).possibleFlows.foreach { pI =>
allow = allow || routingRelation(
cParam.channelRoutingInfos(srcV),
oP.channelRoutingInfos(oV),
pI
)
}
if (!allow)
sel(oI)(oV) := false.B
}
}
}
}
(0 until nVirtualChannels).map { i =>
if (!virtualChannelParams(i).traversable) states(i) := DontCare
filterVCSel(states(i).vc_sel, i)
}
when (reset.asBool) {
states.foreach(_.g := g_i)
}
}
| module mem_56x75( // @[InputUnit.scala:85:18]
input [5:0] R0_addr,
input R0_en,
input R0_clk,
output [74:0] R0_data,
input [5:0] R1_addr,
input R1_en,
input R1_clk,
output [74:0] R1_data,
input [5:0] R2_addr,
input R2_en,
input R2_clk,
output [74:0] R2_data,
input [5:0] R3_addr,
input R3_en,
input R3_clk,
output [74:0] R3_data,
input [5:0] R4_addr,
input R4_en,
input R4_clk,
output [74:0] R4_data,
input [5:0] R5_addr,
input R5_en,
input R5_clk,
output [74:0] R5_data,
input [5:0] R6_addr,
input R6_en,
input R6_clk,
output [74:0] R6_data,
input [5:0] R7_addr,
input R7_en,
input R7_clk,
output [74:0] R7_data,
input [5:0] R8_addr,
input R8_en,
input R8_clk,
output [74:0] R8_data,
input [5:0] R9_addr,
input R9_en,
input R9_clk,
output [74:0] R9_data,
input [5:0] R10_addr,
input R10_en,
input R10_clk,
output [74:0] R10_data,
input [5:0] R11_addr,
input R11_en,
input R11_clk,
output [74:0] R11_data,
input [5:0] R12_addr,
input R12_en,
input R12_clk,
output [74:0] R12_data,
input [5:0] R13_addr,
input R13_en,
input R13_clk,
output [74:0] R13_data,
input [5:0] R14_addr,
input R14_en,
input R14_clk,
output [74:0] R14_data,
input [5:0] R15_addr,
input R15_en,
input R15_clk,
output [74:0] R15_data,
input [5:0] R16_addr,
input R16_en,
input R16_clk,
output [74:0] R16_data,
input [5:0] R17_addr,
input R17_en,
input R17_clk,
output [74:0] R17_data,
input [5:0] R18_addr,
input R18_en,
input R18_clk,
output [74:0] R18_data,
input [5:0] R19_addr,
input R19_en,
input R19_clk,
output [74:0] R19_data,
input [5:0] R20_addr,
input R20_en,
input R20_clk,
output [74:0] R20_data,
input [5:0] R21_addr,
input R21_en,
input R21_clk,
output [74:0] R21_data,
input [5:0] W0_addr,
input W0_en,
input W0_clk,
input [74:0] W0_data
);
reg [74:0] Memory[0:55]; // @[InputUnit.scala:85:18]
always @(posedge W0_clk) begin // @[InputUnit.scala:85:18]
if (W0_en & 1'h1) // @[InputUnit.scala:85:18]
Memory[W0_addr] <= W0_data; // @[InputUnit.scala:85:18]
always @(posedge) |
Generate the Verilog code corresponding to the following Chisel files.
File ShiftReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
// Similar to the Chisel ShiftRegister but allows the user to suggest a
// name to the registers that get instantiated, and
// to provide a reset value.
object ShiftRegInit {
def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T =
(0 until n).foldRight(in) {
case (i, next) => {
val r = RegNext(next, init)
name.foreach { na => r.suggestName(s"${na}_${i}") }
r
}
}
}
/** These wrap behavioral
* shift registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
* The different types vary in their reset behavior:
* AsyncResetShiftReg -- Asynchronously reset register array
* A W(width) x D(depth) sized array is constructed from D instantiations of a
* W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg,
* but only used for timing applications
*/
abstract class AbstractPipelineReg(w: Int = 1) extends Module {
val io = IO(new Bundle {
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
}
)
}
object AbstractPipelineReg {
def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = {
val chain = Module(gen)
name.foreach{ chain.suggestName(_) }
chain.io.d := in.asUInt
chain.io.q.asTypeOf(in)
}
}
class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) {
require(depth > 0, "Depth must be greater than 0.")
override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}"
val chain = List.tabulate(depth) { i =>
Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}")
}
chain.last.io.d := io.d
chain.last.io.en := true.B
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink.io.d := source.io.q
sink.io.en := true.B
}
io.q := chain.head.io.q
}
object AsyncResetShiftReg {
def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name)
def apply [T <: Data](in: T, depth: Int, name: Option[String]): T =
apply(in, depth, 0, name)
def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T =
apply(in, depth, init.litValue.toInt, name)
def apply [T <: Data](in: T, depth: Int, init: T): T =
apply (in, depth, init.litValue.toInt, None)
}
File SynchronizerReg.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip.util
import chisel3._
import chisel3.util.{RegEnable, Cat}
/** These wrap behavioral
* shift and next registers into specific modules to allow for
* backend flows to replace or constrain
* them properly when used for CDC synchronization,
* rather than buffering.
*
*
* These are built up of *ResetSynchronizerPrimitiveShiftReg,
* intended to be replaced by the integrator's metastable flops chains or replaced
* at this level if they have a multi-bit wide synchronizer primitive.
* The different types vary in their reset behavior:
* NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin
* AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep
* 1-bit-wide shift registers.
* SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg
*
* [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference.
*
* ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross
* Clock Domains.
*/
object SynchronizerResetType extends Enumeration {
val NonSync, Inferred, Sync, Async = Value
}
// Note: this should not be used directly.
// Use the companion object to generate this with the correct reset type mixin.
private class SynchronizerPrimitiveShiftReg(
sync: Int,
init: Boolean,
resetType: SynchronizerResetType.Value)
extends AbstractPipelineReg(1) {
val initInt = if (init) 1 else 0
val initPostfix = resetType match {
case SynchronizerResetType.NonSync => ""
case _ => s"_i${initInt}"
}
override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}"
val chain = List.tabulate(sync) { i =>
val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B)
reg.suggestName(s"sync_$i")
}
chain.last := io.d.asBool
(chain.init zip chain.tail).foreach { case (sink, source) =>
sink := source
}
io.q := chain.head.asUInt
}
private object SynchronizerPrimitiveShiftReg {
def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = {
val gen: () => SynchronizerPrimitiveShiftReg = resetType match {
case SynchronizerResetType.NonSync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
case SynchronizerResetType.Async =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset
case SynchronizerResetType.Sync =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset
case SynchronizerResetType.Inferred =>
() => new SynchronizerPrimitiveShiftReg(sync, init, resetType)
}
AbstractPipelineReg(gen(), in)
}
}
// Note: This module may end up with a non-AsyncReset type reset.
// But the Primitives within will always have AsyncReset type.
class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int)
extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asAsyncReset){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async)
}
}
io.q := Cat(output.reverse)
}
object AsyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
// Note: This module may end up with a non-Bool type reset.
// But the Primitives within will always have Bool reset type.
@deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2")
class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
withReset(reset.asBool){
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync)
}
}
io.q := Cat(output.reverse)
}
object SyncResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}"
val output = Seq.tabulate(w) { i =>
val initBit = ((init >> i) & 1) > 0
SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred)
}
io.q := Cat(output.reverse)
}
object ResetSynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T =
AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name)
def apply [T <: Data](in: T, sync: Int, name: Option[String]): T =
apply (in, sync, 0, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, 0, None)
def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T =
apply(in, sync, init.litValue.toInt, name)
def apply [T <: Data](in: T, sync: Int, init: T): T =
apply (in, sync, init.litValue.toInt, None)
}
class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) {
require(sync > 1, s"Sync must be greater than 1, not ${sync}.")
override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}"
val output = Seq.tabulate(w) { i =>
SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync)
}
io.q := Cat(output.reverse)
}
object SynchronizerShiftReg {
def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T =
if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name)
def apply [T <: Data](in: T, sync: Int): T =
apply (in, sync, None)
def apply [T <: Data](in: T): T =
apply (in, 3, None)
}
class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module {
override def desiredName = s"ClockCrossingReg_w${w}"
val io = IO(new Bundle{
val d = Input(UInt(w.W))
val q = Output(UInt(w.W))
val en = Input(Bool())
})
val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en)
io.q := cdc_reg
}
object ClockCrossingReg {
def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = {
val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit))
name.foreach{ cdc_reg.suggestName(_) }
cdc_reg.io.d := in.asUInt
cdc_reg.io.en := en
cdc_reg.io.q.asTypeOf(in)
}
}
| module AsyncResetSynchronizerShiftReg_w1_d3_i0_210( // @[SynchronizerReg.scala:80:7]
input clock, // @[SynchronizerReg.scala:80:7]
input reset, // @[SynchronizerReg.scala:80:7]
input io_d, // @[ShiftReg.scala:36:14]
output io_q // @[ShiftReg.scala:36:14]
);
wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7]
wire _output_T = reset; // @[SynchronizerReg.scala:86:21]
wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41]
wire output_0; // @[ShiftReg.scala:48:24]
wire io_q_0; // @[SynchronizerReg.scala:80:7]
assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7]
AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_382 output_chain ( // @[ShiftReg.scala:45:23]
.clock (clock),
.reset (_output_T), // @[SynchronizerReg.scala:86:21]
.io_d (_output_T_1), // @[SynchronizerReg.scala:87:41]
.io_q (output_0)
); // @[ShiftReg.scala:45:23]
assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File Tile.scala:
// See README.md for license details.
package gemmini
import chisel3._
import chisel3.util._
import Util._
/**
* A Tile is a purely combinational 2D array of passThrough PEs.
* a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs
* @param width The data width of each PE in bits
* @param rows Number of PEs on each row
* @param columns Number of PEs on each column
*/
class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module {
val io = IO(new Bundle {
val in_a = Input(Vec(rows, inputType))
val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it
val in_d = Input(Vec(columns, outputType))
val in_control = Input(Vec(columns, new PEControl(accType)))
val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val in_last = Input(Vec(columns, Bool()))
val out_a = Output(Vec(rows, inputType))
val out_c = Output(Vec(columns, outputType))
val out_b = Output(Vec(columns, outputType))
val out_control = Output(Vec(columns, new PEControl(accType)))
val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W)))
val out_last = Output(Vec(columns, Bool()))
val in_valid = Input(Vec(columns, Bool()))
val out_valid = Output(Vec(columns, Bool()))
val bad_dataflow = Output(Bool())
})
import ev._
val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls)))
val tileT = tile.transpose
// TODO: abstract hori/vert broadcast, all these connections look the same
// Broadcast 'a' horizontally across the Tile
for (r <- 0 until rows) {
tile(r).foldLeft(io.in_a(r)) {
case (in_a, pe) =>
pe.io.in_a := in_a
pe.io.out_a
}
}
// Broadcast 'b' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_b(c)) {
case (in_b, pe) =>
pe.io.in_b := (if (tree_reduction) in_b.zero else in_b)
pe.io.out_b
}
}
// Broadcast 'd' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_d(c)) {
case (in_d, pe) =>
pe.io.in_d := in_d
pe.io.out_c
}
}
// Broadcast 'control' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_control(c)) {
case (in_ctrl, pe) =>
pe.io.in_control := in_ctrl
pe.io.out_control
}
}
// Broadcast 'garbage' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_valid(c)) {
case (v, pe) =>
pe.io.in_valid := v
pe.io.out_valid
}
}
// Broadcast 'id' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_id(c)) {
case (id, pe) =>
pe.io.in_id := id
pe.io.out_id
}
}
// Broadcast 'last' vertically across the Tile
for (c <- 0 until columns) {
tileT(c).foldLeft(io.in_last(c)) {
case (last, pe) =>
pe.io.in_last := last
pe.io.out_last
}
}
// Drive the Tile's bottom IO
for (c <- 0 until columns) {
io.out_c(c) := tile(rows-1)(c).io.out_c
io.out_control(c) := tile(rows-1)(c).io.out_control
io.out_id(c) := tile(rows-1)(c).io.out_id
io.out_last(c) := tile(rows-1)(c).io.out_last
io.out_valid(c) := tile(rows-1)(c).io.out_valid
io.out_b(c) := {
if (tree_reduction) {
val prods = tileT(c).map(_.io.out_b)
accumulateTree(prods :+ io.in_b(c))
} else {
tile(rows - 1)(c).io.out_b
}
}
}
io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_)
// Drive the Tile's right IO
for (r <- 0 until rows) {
io.out_a(r) := tile(r)(columns-1).io.out_a
}
}
| module Tile_36( // @[Tile.scala:16:7]
input clock, // @[Tile.scala:16:7]
input reset, // @[Tile.scala:16:7]
input [7:0] io_in_a_0, // @[Tile.scala:17:14]
input [19:0] io_in_b_0, // @[Tile.scala:17:14]
input [19:0] io_in_d_0, // @[Tile.scala:17:14]
input io_in_control_0_dataflow, // @[Tile.scala:17:14]
input io_in_control_0_propagate, // @[Tile.scala:17:14]
input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14]
input [2:0] io_in_id_0, // @[Tile.scala:17:14]
input io_in_last_0, // @[Tile.scala:17:14]
output [7:0] io_out_a_0, // @[Tile.scala:17:14]
output [19:0] io_out_c_0, // @[Tile.scala:17:14]
output [19:0] io_out_b_0, // @[Tile.scala:17:14]
output io_out_control_0_dataflow, // @[Tile.scala:17:14]
output io_out_control_0_propagate, // @[Tile.scala:17:14]
output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14]
output [2:0] io_out_id_0, // @[Tile.scala:17:14]
output io_out_last_0, // @[Tile.scala:17:14]
input io_in_valid_0, // @[Tile.scala:17:14]
output io_out_valid_0 // @[Tile.scala:17:14]
);
wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7]
wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7]
wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7]
wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7]
wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7]
wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7]
wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7]
wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7]
wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7]
wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44]
wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7]
wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7]
wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
wire io_out_control_0_propagate_0; // @[Tile.scala:16:7]
wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7]
wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7]
wire io_out_last_0_0; // @[Tile.scala:16:7]
wire io_out_valid_0_0; // @[Tile.scala:16:7]
PE_292 tile_0_0 ( // @[Tile.scala:42:44]
.clock (clock),
.reset (reset),
.io_in_a (io_in_a_0_0), // @[Tile.scala:16:7]
.io_in_b (io_in_b_0_0), // @[Tile.scala:16:7]
.io_in_d (io_in_d_0_0), // @[Tile.scala:16:7]
.io_out_a (io_out_a_0_0),
.io_out_b (io_out_b_0_0),
.io_out_c (io_out_c_0_0),
.io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7]
.io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7]
.io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7]
.io_out_control_dataflow (io_out_control_0_dataflow_0),
.io_out_control_propagate (io_out_control_0_propagate_0),
.io_out_control_shift (io_out_control_0_shift_0),
.io_in_id (io_in_id_0_0), // @[Tile.scala:16:7]
.io_out_id (io_out_id_0_0),
.io_in_last (io_in_last_0_0), // @[Tile.scala:16:7]
.io_out_last (io_out_last_0_0),
.io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7]
.io_out_valid (io_out_valid_0_0)
); // @[Tile.scala:42:44]
assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7]
assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7]
assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7]
assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7]
assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7]
assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7]
assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7]
assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7]
assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7]
endmodule |
Generate the Verilog code corresponding to the following Chisel files.
File package.scala:
// See LICENSE.SiFive for license details.
package freechips.rocketchip
import chisel3._
import chisel3.util._
import scala.math.min
import scala.collection.{immutable, mutable}
package object util {
implicit class UnzippableOption[S, T](val x: Option[(S, T)]) {
def unzip = (x.map(_._1), x.map(_._2))
}
implicit class UIntIsOneOf(private val x: UInt) extends AnyVal {
def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR
def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq)
}
implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal {
/** Like Vec.apply(idx), but tolerates indices of mismatched width */
def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0))
}
implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal {
def apply(idx: UInt): T = {
if (x.size <= 1) {
x.head
} else if (!isPow2(x.size)) {
// For non-power-of-2 seqs, reflect elements to simplify decoder
(x ++ x.takeRight(x.size & -x.size)).toSeq(idx)
} else {
// Ignore MSBs of idx
val truncIdx =
if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx
else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0)
x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) }
}
}
def extract(idx: UInt): T = VecInit(x).extract(idx)
def asUInt: UInt = Cat(x.map(_.asUInt).reverse)
def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n)
def rotate(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n)
def rotateRight(n: UInt): Seq[T] = {
if (x.size <= 1) {
x
} else {
require(isPow2(x.size))
val amt = n.padTo(log2Ceil(x.size))
(0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) })
}
}
}
// allow bitwise ops on Seq[Bool] just like UInt
implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal {
def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b }
def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b }
def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b }
def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x
def >> (n: Int): Seq[Bool] = x drop n
def unary_~ : Seq[Bool] = x.map(!_)
def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_)
def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_)
def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_)
private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B)
}
implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal {
def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable))
def getElements: Seq[Element] = x match {
case e: Element => Seq(e)
case a: Aggregate => a.getElements.flatMap(_.getElements)
}
}
/** Any Data subtype that has a Bool member named valid. */
type DataCanBeValid = Data { val valid: Bool }
implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal {
def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable)
}
implicit class StringToAugmentedString(private val x: String) extends AnyVal {
/** converts from camel case to to underscores, also removing all spaces */
def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") {
case (acc, c) if c.isUpper => acc + "_" + c.toLower
case (acc, c) if c == ' ' => acc
case (acc, c) => acc + c
}
/** converts spaces or underscores to hyphens, also lowering case */
def kebab: String = x.toLowerCase map {
case ' ' => '-'
case '_' => '-'
case c => c
}
def named(name: Option[String]): String = {
x + name.map("_named_" + _ ).getOrElse("_with_no_name")
}
def named(name: String): String = named(Some(name))
}
implicit def uintToBitPat(x: UInt): BitPat = BitPat(x)
implicit def wcToUInt(c: WideCounter): UInt = c.value
implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal {
def sextTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x)
}
def padTo(n: Int): UInt = {
require(x.getWidth <= n)
if (x.getWidth == n) x
else Cat(0.U((n - x.getWidth).W), x)
}
// shifts left by n if n >= 0, or right by -n if n < 0
def << (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << n(w-1, 0)
Mux(n(w), shifted >> (1 << w), shifted)
}
// shifts right by n if n >= 0, or left by -n if n < 0
def >> (n: SInt): UInt = {
val w = n.getWidth - 1
require(w <= 30)
val shifted = x << (1 << w) >> n(w-1, 0)
Mux(n(w), shifted, shifted >> (1 << w))
}
// Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts
def extract(hi: Int, lo: Int): UInt = {
require(hi >= lo-1)
if (hi == lo-1) 0.U
else x(hi, lo)
}
// Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts
def extractOption(hi: Int, lo: Int): Option[UInt] = {
require(hi >= lo-1)
if (hi == lo-1) None
else Some(x(hi, lo))
}
// like x & ~y, but first truncate or zero-extend y to x's width
def andNot(y: UInt): UInt = x & ~(y | (x & 0.U))
def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n)
def rotateRight(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r))
}
}
def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n))
def rotateLeft(n: UInt): UInt = {
if (x.getWidth <= 1) {
x
} else {
val amt = n.padTo(log2Ceil(x.getWidth))
(0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r))
}
}
// compute (this + y) % n, given (this < n) and (y < n)
def addWrap(y: UInt, n: Int): UInt = {
val z = x +& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0)
}
// compute (this - y) % n, given (this < n) and (y < n)
def subWrap(y: UInt, n: Int): UInt = {
val z = x -& y
if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x)
// Like >=, but prevents x-prop for ('x >= 0)
def >== (y: UInt): Bool = x >= y || y === 0.U
}
implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal {
def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y)
def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y)
}
implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal {
def toInt: Int = if (x) 1 else 0
// this one's snagged from scalaz
def option[T](z: => T): Option[T] = if (x) Some(z) else None
}
implicit class IntToAugmentedInt(private val x: Int) extends AnyVal {
// exact log2
def log2: Int = {
require(isPow2(x))
log2Ceil(x)
}
}
def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x)
def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x))
def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0)
def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1)
def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None
// Fill 1s from low bits to high bits
def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth)
def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0))
helper(1, x)(width-1, 0)
}
// Fill 1s form high bits to low bits
def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth)
def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = {
val stop = min(width, cap)
def helper(s: Int, x: UInt): UInt =
if (s >= stop) x else helper(s+s, x | (x >> s))
helper(1, x)(width-1, 0)
}
def OptimizationBarrier[T <: Data](in: T): T = {
val barrier = Module(new Module {
val io = IO(new Bundle {
val x = Input(chiselTypeOf(in))
val y = Output(chiselTypeOf(in))
})
io.y := io.x
override def desiredName = s"OptimizationBarrier_${in.typeName}"
})
barrier.io.x := in
barrier.io.y
}
/** Similar to Seq.groupBy except this returns a Seq instead of a Map
* Useful for deterministic code generation
*/
def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = {
val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]]
for (x <- xs) {
val key = f(x)
val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A])
l += x
}
map.view.map({ case (k, vs) => k -> vs.toList }).toList
}
def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match {
case 1 => List.fill(n)(in.head)
case x if x == n => in
case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in")
}
// HeterogeneousBag moved to standalond diplomacy
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts)
@deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0")
val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag
}
| module OptimizationBarrier_TLBEntryData_297( // @[package.scala:267:30]
input clock, // @[package.scala:267:30]
input reset, // @[package.scala:267:30]
input [19:0] io_x_ppn, // @[package.scala:268:18]
input io_x_u, // @[package.scala:268:18]
input io_x_g, // @[package.scala:268:18]
input io_x_ae_ptw, // @[package.scala:268:18]
input io_x_ae_final, // @[package.scala:268:18]
input io_x_ae_stage2, // @[package.scala:268:18]
input io_x_pf, // @[package.scala:268:18]
input io_x_gf, // @[package.scala:268:18]
input io_x_sw, // @[package.scala:268:18]
input io_x_sx, // @[package.scala:268:18]
input io_x_sr, // @[package.scala:268:18]
input io_x_hw, // @[package.scala:268:18]
input io_x_hx, // @[package.scala:268:18]
input io_x_hr, // @[package.scala:268:18]
input io_x_pw, // @[package.scala:268:18]
input io_x_px, // @[package.scala:268:18]
input io_x_pr, // @[package.scala:268:18]
input io_x_ppp, // @[package.scala:268:18]
input io_x_pal, // @[package.scala:268:18]
input io_x_paa, // @[package.scala:268:18]
input io_x_eff, // @[package.scala:268:18]
input io_x_c, // @[package.scala:268:18]
input io_x_fragmented_superpage, // @[package.scala:268:18]
output [19:0] io_y_ppn, // @[package.scala:268:18]
output io_y_u, // @[package.scala:268:18]
output io_y_ae_ptw, // @[package.scala:268:18]
output io_y_ae_final, // @[package.scala:268:18]
output io_y_ae_stage2, // @[package.scala:268:18]
output io_y_pf, // @[package.scala:268:18]
output io_y_gf, // @[package.scala:268:18]
output io_y_sw, // @[package.scala:268:18]
output io_y_sx, // @[package.scala:268:18]
output io_y_sr, // @[package.scala:268:18]
output io_y_hw, // @[package.scala:268:18]
output io_y_hx, // @[package.scala:268:18]
output io_y_hr, // @[package.scala:268:18]
output io_y_pw, // @[package.scala:268:18]
output io_y_px, // @[package.scala:268:18]
output io_y_pr, // @[package.scala:268:18]
output io_y_ppp, // @[package.scala:268:18]
output io_y_pal, // @[package.scala:268:18]
output io_y_paa, // @[package.scala:268:18]
output io_y_eff, // @[package.scala:268:18]
output io_y_c // @[package.scala:268:18]
);
wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30]
wire io_x_u_0 = io_x_u; // @[package.scala:267:30]
wire io_x_g_0 = io_x_g; // @[package.scala:267:30]
wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30]
wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30]
wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30]
wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30]
wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30]
wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30]
wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30]
wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30]
wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30]
wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30]
wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30]
wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30]
wire io_x_px_0 = io_x_px; // @[package.scala:267:30]
wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30]
wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30]
wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30]
wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30]
wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30]
wire io_x_c_0 = io_x_c; // @[package.scala:267:30]
wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30]
wire [19:0] io_y_ppn_0 = io_x_ppn_0; // @[package.scala:267:30]
wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30]
wire io_y_g = io_x_g_0; // @[package.scala:267:30]
wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30]
wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30]
wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30]
wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30]
wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30]
wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30]
wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30]
wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30]
wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30]
wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30]
wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30]
wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30]
wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30]
wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30]
wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30]
wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30]
wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30]
wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30]
wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30]
wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30]
assign io_y_ppn = io_y_ppn_0; // @[package.scala:267:30]
assign io_y_u = io_y_u_0; // @[package.scala:267:30]
assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30]
assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30]
assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30]
assign io_y_pf = io_y_pf_0; // @[package.scala:267:30]
assign io_y_gf = io_y_gf_0; // @[package.scala:267:30]
assign io_y_sw = io_y_sw_0; // @[package.scala:267:30]
assign io_y_sx = io_y_sx_0; // @[package.scala:267:30]
assign io_y_sr = io_y_sr_0; // @[package.scala:267:30]
assign io_y_hw = io_y_hw_0; // @[package.scala:267:30]
assign io_y_hx = io_y_hx_0; // @[package.scala:267:30]
assign io_y_hr = io_y_hr_0; // @[package.scala:267:30]
assign io_y_pw = io_y_pw_0; // @[package.scala:267:30]
assign io_y_px = io_y_px_0; // @[package.scala:267:30]
assign io_y_pr = io_y_pr_0; // @[package.scala:267:30]
assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30]
assign io_y_pal = io_y_pal_0; // @[package.scala:267:30]
assign io_y_paa = io_y_paa_0; // @[package.scala:267:30]
assign io_y_eff = io_y_eff_0; // @[package.scala:267:30]
assign io_y_c = io_y_c_0; // @[package.scala:267:30]
endmodule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.