module_content
stringlengths 18
1.05M
|
---|
module mig_7series_v2_3_poc_edge_store #
(parameter TCQ = 100,
parameter TAPCNTRWIDTH = 7,
parameter TAPSPERKCLK = 112)
(/*AUTOARG*/
// Outputs
fall_lead, fall_trail, rise_lead, rise_trail,
// Inputs
clk, run_polarity, run_end, select0, select1, tap, run
);
input clk;
input run_polarity;
input run_end;
input select0;
input select1;
input [TAPCNTRWIDTH-1:0] tap;
input [TAPCNTRWIDTH-1:0] run;
wire [TAPCNTRWIDTH:0] trailing_edge = run > tap ? tap + TAPSPERKCLK[TAPCNTRWIDTH-1:0] - run
: tap - run;
wire run_end_this = run_end && select0 && select1;
reg [TAPCNTRWIDTH-1:0] fall_lead_r, fall_trail_r, rise_lead_r, rise_trail_r;
output [TAPCNTRWIDTH-1:0] fall_lead, fall_trail, rise_lead, rise_trail;
assign fall_lead = fall_lead_r;
assign fall_trail = fall_trail_r;
assign rise_lead = rise_lead_r;
assign rise_trail = rise_trail_r;
wire [TAPCNTRWIDTH-1:0] fall_lead_ns = run_end_this & run_polarity ? tap : fall_lead_r;
wire [TAPCNTRWIDTH-1:0] rise_trail_ns = run_end_this & run_polarity ? trailing_edge[TAPCNTRWIDTH-1:0]
: rise_trail_r;
wire [TAPCNTRWIDTH-1:0] rise_lead_ns = run_end_this & ~run_polarity ? tap : rise_lead_r;
wire [TAPCNTRWIDTH-1:0] fall_trail_ns = run_end_this & ~run_polarity ? trailing_edge[TAPCNTRWIDTH-1:0]
: fall_trail_r;
always @(posedge clk) fall_lead_r <= #TCQ fall_lead_ns;
always @(posedge clk) fall_trail_r <= #TCQ fall_trail_ns;
always @(posedge clk) rise_lead_r <= #TCQ rise_lead_ns;
always @(posedge clk) rise_trail_r <= #TCQ rise_trail_ns;
endmodule
|
module mig_7series_v2_3_poc_edge_store #
(parameter TCQ = 100,
parameter TAPCNTRWIDTH = 7,
parameter TAPSPERKCLK = 112)
(/*AUTOARG*/
// Outputs
fall_lead, fall_trail, rise_lead, rise_trail,
// Inputs
clk, run_polarity, run_end, select0, select1, tap, run
);
input clk;
input run_polarity;
input run_end;
input select0;
input select1;
input [TAPCNTRWIDTH-1:0] tap;
input [TAPCNTRWIDTH-1:0] run;
wire [TAPCNTRWIDTH:0] trailing_edge = run > tap ? tap + TAPSPERKCLK[TAPCNTRWIDTH-1:0] - run
: tap - run;
wire run_end_this = run_end && select0 && select1;
reg [TAPCNTRWIDTH-1:0] fall_lead_r, fall_trail_r, rise_lead_r, rise_trail_r;
output [TAPCNTRWIDTH-1:0] fall_lead, fall_trail, rise_lead, rise_trail;
assign fall_lead = fall_lead_r;
assign fall_trail = fall_trail_r;
assign rise_lead = rise_lead_r;
assign rise_trail = rise_trail_r;
wire [TAPCNTRWIDTH-1:0] fall_lead_ns = run_end_this & run_polarity ? tap : fall_lead_r;
wire [TAPCNTRWIDTH-1:0] rise_trail_ns = run_end_this & run_polarity ? trailing_edge[TAPCNTRWIDTH-1:0]
: rise_trail_r;
wire [TAPCNTRWIDTH-1:0] rise_lead_ns = run_end_this & ~run_polarity ? tap : rise_lead_r;
wire [TAPCNTRWIDTH-1:0] fall_trail_ns = run_end_this & ~run_polarity ? trailing_edge[TAPCNTRWIDTH-1:0]
: fall_trail_r;
always @(posedge clk) fall_lead_r <= #TCQ fall_lead_ns;
always @(posedge clk) fall_trail_r <= #TCQ fall_trail_ns;
always @(posedge clk) rise_lead_r <= #TCQ rise_lead_ns;
always @(posedge clk) rise_trail_r <= #TCQ rise_trail_ns;
endmodule
|
module mig_7series_v2_3_ddr_phy_ocd_cntlr #
(parameter TCQ = 100,
parameter DQS_CNT_WIDTH = 3,
parameter DQS_WIDTH = 8)
(/*AUTOARG*/
// Outputs
wrlvl_final, complex_wrlvl_final, oclk_init_delay_done,
ocd_prech_req, lim_start, complex_oclkdelay_calib_done,
oclkdelay_calib_done, phy_rddata_en_1, phy_rddata_en_2,
phy_rddata_en_3, ocd_cntlr2stg2_dec, oclkdelay_calib_cnt,
reset_scan,
// Inputs
clk, rst, prech_done, oclkdelay_calib_start,
complex_oclkdelay_calib_start, lim_done, phy_rddata_en,
po_counter_read_val, po_rdy, scan_done
);
localparam ONE = 1;
input clk;
input rst;
output wrlvl_final, complex_wrlvl_final;
reg wrlvl_final_ns, wrlvl_final_r, complex_wrlvl_final_ns, complex_wrlvl_final_r;
always @(posedge clk) wrlvl_final_r <= #TCQ wrlvl_final_ns;
always @(posedge clk) complex_wrlvl_final_r <= #TCQ complex_wrlvl_final_ns;
assign wrlvl_final = wrlvl_final_r;
assign complex_wrlvl_final = complex_wrlvl_final_r;
// Completed initial delay increment
output oclk_init_delay_done; // may not need this... maybe for fast cal mode.
assign oclk_init_delay_done = 1'b1;
// Precharge done status from ddr_phy_init
input prech_done;
reg ocd_prech_req_ns, ocd_prech_req_r;
always @(posedge clk) ocd_prech_req_r <= #TCQ ocd_prech_req_ns;
output ocd_prech_req;
assign ocd_prech_req = ocd_prech_req_r;
input oclkdelay_calib_start, complex_oclkdelay_calib_start;
input lim_done;
reg lim_start_ns, lim_start_r;
always @(posedge clk) lim_start_r <= #TCQ lim_start_ns;
output lim_start;
assign lim_start = lim_start_r;
reg complex_oclkdelay_calib_done_ns, complex_oclkdelay_calib_done_r;
always @(posedge clk) complex_oclkdelay_calib_done_r <= #TCQ complex_oclkdelay_calib_done_ns;
output complex_oclkdelay_calib_done;
assign complex_oclkdelay_calib_done = complex_oclkdelay_calib_done_r;
reg oclkdelay_calib_done_ns, oclkdelay_calib_done_r;
always @(posedge clk) oclkdelay_calib_done_r <= #TCQ oclkdelay_calib_done_ns;
output oclkdelay_calib_done;
assign oclkdelay_calib_done = oclkdelay_calib_done_r;
input phy_rddata_en;
reg prde_r1, prde_r2;
always @(posedge clk) prde_r1 <= #TCQ phy_rddata_en;
always @(posedge clk) prde_r2 <= #TCQ prde_r1;
wire prde = complex_oclkdelay_calib_start ? prde_r2 : phy_rddata_en;
reg phy_rddata_en_r1, phy_rddata_en_r2, phy_rddata_en_r3;
always @(posedge clk) phy_rddata_en_r1 <= #TCQ prde;
always @(posedge clk) phy_rddata_en_r2 <= #TCQ phy_rddata_en_r1;
always @(posedge clk) phy_rddata_en_r3 <= #TCQ phy_rddata_en_r2;
output phy_rddata_en_1, phy_rddata_en_2, phy_rddata_en_3;
assign phy_rddata_en_1 = phy_rddata_en_r1;
assign phy_rddata_en_2 = phy_rddata_en_r2;
assign phy_rddata_en_3 = phy_rddata_en_r3;
input [8:0] po_counter_read_val;
reg ocd_cntlr2stg2_dec_r;
output ocd_cntlr2stg2_dec;
assign ocd_cntlr2stg2_dec = ocd_cntlr2stg2_dec_r;
input po_rdy;
reg [3:0] po_rd_wait_ns, po_rd_wait_r;
always @(posedge clk) po_rd_wait_r <= #TCQ po_rd_wait_ns;
reg [DQS_CNT_WIDTH-1:0] byte_ns, byte_r;
always @(posedge clk) byte_r <= #TCQ byte_ns;
output [DQS_CNT_WIDTH:0] oclkdelay_calib_cnt;
assign oclkdelay_calib_cnt = {1'b0, byte_r};
reg reset_scan_ns, reset_scan_r;
always @(posedge clk) reset_scan_r <= #TCQ reset_scan_ns;
output reset_scan;
assign reset_scan = reset_scan_r;
input scan_done;
reg [2:0] sm_ns, sm_r;
always @(posedge clk) sm_r <= #TCQ sm_ns;
// Primary state machine.
always @(*) begin
// Default next state assignments.
byte_ns = byte_r;
complex_wrlvl_final_ns = complex_wrlvl_final_r;
lim_start_ns = lim_start_r;
oclkdelay_calib_done_ns = oclkdelay_calib_done_r;
complex_oclkdelay_calib_done_ns = complex_oclkdelay_calib_done_r;
ocd_cntlr2stg2_dec_r = 1'b0;
po_rd_wait_ns = po_rd_wait_r;
if (|po_rd_wait_r) po_rd_wait_ns = po_rd_wait_r - 4'b1;
reset_scan_ns = reset_scan_r;
wrlvl_final_ns = wrlvl_final_r;
sm_ns = sm_r;
ocd_prech_req_ns= 1'b0;
if (rst == 1'b1) begin
// RESET next states
complex_oclkdelay_calib_done_ns = 1'b0;
complex_wrlvl_final_ns = 1'b0;
sm_ns = /*AK("READY")*/3'd0;
lim_start_ns = 1'b0;
oclkdelay_calib_done_ns = 1'b0;
reset_scan_ns = 1'b1;
wrlvl_final_ns = 1'b0;
end else
// State based actions and next states.
case (sm_r)
/*AL("READY")*/3'd0: begin
byte_ns = {DQS_CNT_WIDTH{1'b0}};
if (oclkdelay_calib_start && ~oclkdelay_calib_done_r ||
complex_oclkdelay_calib_start && ~complex_oclkdelay_calib_done_r)
begin
sm_ns = /*AK("LIMIT_START")*/3'd1;
lim_start_ns = 1'b1;
end
end
/*AL("LIMIT_START")*/3'd1:
sm_ns = /*AK("LIMIT_WAIT")*/3'd2;
/*AL("LIMIT_WAIT")*/3'd2:begin
if (lim_done) begin
lim_start_ns = 1'b0;
sm_ns = /*AK("SCAN")*/3'd3;
reset_scan_ns = 1'b0;
end
end
/*AL("SCAN")*/3'd3:begin
if (scan_done) begin
reset_scan_ns = 1'b1;
sm_ns = /*AK("COMPUTE")*/3'd4;
end
end
/*AL("COMPUTE")*/3'd4:begin
sm_ns = /*AK("PRECHARGE")*/3'd5;
ocd_prech_req_ns = 1'b1;
end
/*AL("PRECHARGE")*/3'd5:begin
if (prech_done) sm_ns = /*AK("DONE")*/3'd6;
end
/*AL("DONE")*/3'd6:begin
byte_ns = byte_r + ONE[DQS_CNT_WIDTH-1:0];
if ({1'b0, byte_r} == DQS_WIDTH[DQS_CNT_WIDTH:0] - ONE[DQS_WIDTH:0]) begin
byte_ns = {DQS_CNT_WIDTH{1'b0}};
po_rd_wait_ns = 4'd8;
sm_ns = /*AK("STG2_2_ZERO")*/3'd7;
end else begin
sm_ns = /*AK("LIMIT_START")*/3'd1;
lim_start_ns = 1'b1;
end
end
/*AL("STG2_2_ZERO")*/3'd7:
if (~|po_rd_wait_r && po_rdy)
if (|po_counter_read_val[5:0]) ocd_cntlr2stg2_dec_r = 1'b1;
else begin
if ({1'b0, byte_r} == DQS_WIDTH[DQS_CNT_WIDTH:0] - ONE[DQS_WIDTH:0]) begin
sm_ns = /*AK("READY")*/3'd0;
oclkdelay_calib_done_ns= 1'b1;
wrlvl_final_ns = 1'b1;
if (complex_oclkdelay_calib_start) begin
complex_oclkdelay_calib_done_ns = 1'b1;
complex_wrlvl_final_ns = 1'b1;
end
end else begin
byte_ns = byte_r + ONE[DQS_CNT_WIDTH-1:0];
po_rd_wait_ns = 4'd8;
end
end // else: !if(|po_counter_read_val[5:0])
endcase // case (sm_r)
end // always @ begin
endmodule
|
module mig_7series_v2_3_ddr_phy_ocd_cntlr #
(parameter TCQ = 100,
parameter DQS_CNT_WIDTH = 3,
parameter DQS_WIDTH = 8)
(/*AUTOARG*/
// Outputs
wrlvl_final, complex_wrlvl_final, oclk_init_delay_done,
ocd_prech_req, lim_start, complex_oclkdelay_calib_done,
oclkdelay_calib_done, phy_rddata_en_1, phy_rddata_en_2,
phy_rddata_en_3, ocd_cntlr2stg2_dec, oclkdelay_calib_cnt,
reset_scan,
// Inputs
clk, rst, prech_done, oclkdelay_calib_start,
complex_oclkdelay_calib_start, lim_done, phy_rddata_en,
po_counter_read_val, po_rdy, scan_done
);
localparam ONE = 1;
input clk;
input rst;
output wrlvl_final, complex_wrlvl_final;
reg wrlvl_final_ns, wrlvl_final_r, complex_wrlvl_final_ns, complex_wrlvl_final_r;
always @(posedge clk) wrlvl_final_r <= #TCQ wrlvl_final_ns;
always @(posedge clk) complex_wrlvl_final_r <= #TCQ complex_wrlvl_final_ns;
assign wrlvl_final = wrlvl_final_r;
assign complex_wrlvl_final = complex_wrlvl_final_r;
// Completed initial delay increment
output oclk_init_delay_done; // may not need this... maybe for fast cal mode.
assign oclk_init_delay_done = 1'b1;
// Precharge done status from ddr_phy_init
input prech_done;
reg ocd_prech_req_ns, ocd_prech_req_r;
always @(posedge clk) ocd_prech_req_r <= #TCQ ocd_prech_req_ns;
output ocd_prech_req;
assign ocd_prech_req = ocd_prech_req_r;
input oclkdelay_calib_start, complex_oclkdelay_calib_start;
input lim_done;
reg lim_start_ns, lim_start_r;
always @(posedge clk) lim_start_r <= #TCQ lim_start_ns;
output lim_start;
assign lim_start = lim_start_r;
reg complex_oclkdelay_calib_done_ns, complex_oclkdelay_calib_done_r;
always @(posedge clk) complex_oclkdelay_calib_done_r <= #TCQ complex_oclkdelay_calib_done_ns;
output complex_oclkdelay_calib_done;
assign complex_oclkdelay_calib_done = complex_oclkdelay_calib_done_r;
reg oclkdelay_calib_done_ns, oclkdelay_calib_done_r;
always @(posedge clk) oclkdelay_calib_done_r <= #TCQ oclkdelay_calib_done_ns;
output oclkdelay_calib_done;
assign oclkdelay_calib_done = oclkdelay_calib_done_r;
input phy_rddata_en;
reg prde_r1, prde_r2;
always @(posedge clk) prde_r1 <= #TCQ phy_rddata_en;
always @(posedge clk) prde_r2 <= #TCQ prde_r1;
wire prde = complex_oclkdelay_calib_start ? prde_r2 : phy_rddata_en;
reg phy_rddata_en_r1, phy_rddata_en_r2, phy_rddata_en_r3;
always @(posedge clk) phy_rddata_en_r1 <= #TCQ prde;
always @(posedge clk) phy_rddata_en_r2 <= #TCQ phy_rddata_en_r1;
always @(posedge clk) phy_rddata_en_r3 <= #TCQ phy_rddata_en_r2;
output phy_rddata_en_1, phy_rddata_en_2, phy_rddata_en_3;
assign phy_rddata_en_1 = phy_rddata_en_r1;
assign phy_rddata_en_2 = phy_rddata_en_r2;
assign phy_rddata_en_3 = phy_rddata_en_r3;
input [8:0] po_counter_read_val;
reg ocd_cntlr2stg2_dec_r;
output ocd_cntlr2stg2_dec;
assign ocd_cntlr2stg2_dec = ocd_cntlr2stg2_dec_r;
input po_rdy;
reg [3:0] po_rd_wait_ns, po_rd_wait_r;
always @(posedge clk) po_rd_wait_r <= #TCQ po_rd_wait_ns;
reg [DQS_CNT_WIDTH-1:0] byte_ns, byte_r;
always @(posedge clk) byte_r <= #TCQ byte_ns;
output [DQS_CNT_WIDTH:0] oclkdelay_calib_cnt;
assign oclkdelay_calib_cnt = {1'b0, byte_r};
reg reset_scan_ns, reset_scan_r;
always @(posedge clk) reset_scan_r <= #TCQ reset_scan_ns;
output reset_scan;
assign reset_scan = reset_scan_r;
input scan_done;
reg [2:0] sm_ns, sm_r;
always @(posedge clk) sm_r <= #TCQ sm_ns;
// Primary state machine.
always @(*) begin
// Default next state assignments.
byte_ns = byte_r;
complex_wrlvl_final_ns = complex_wrlvl_final_r;
lim_start_ns = lim_start_r;
oclkdelay_calib_done_ns = oclkdelay_calib_done_r;
complex_oclkdelay_calib_done_ns = complex_oclkdelay_calib_done_r;
ocd_cntlr2stg2_dec_r = 1'b0;
po_rd_wait_ns = po_rd_wait_r;
if (|po_rd_wait_r) po_rd_wait_ns = po_rd_wait_r - 4'b1;
reset_scan_ns = reset_scan_r;
wrlvl_final_ns = wrlvl_final_r;
sm_ns = sm_r;
ocd_prech_req_ns= 1'b0;
if (rst == 1'b1) begin
// RESET next states
complex_oclkdelay_calib_done_ns = 1'b0;
complex_wrlvl_final_ns = 1'b0;
sm_ns = /*AK("READY")*/3'd0;
lim_start_ns = 1'b0;
oclkdelay_calib_done_ns = 1'b0;
reset_scan_ns = 1'b1;
wrlvl_final_ns = 1'b0;
end else
// State based actions and next states.
case (sm_r)
/*AL("READY")*/3'd0: begin
byte_ns = {DQS_CNT_WIDTH{1'b0}};
if (oclkdelay_calib_start && ~oclkdelay_calib_done_r ||
complex_oclkdelay_calib_start && ~complex_oclkdelay_calib_done_r)
begin
sm_ns = /*AK("LIMIT_START")*/3'd1;
lim_start_ns = 1'b1;
end
end
/*AL("LIMIT_START")*/3'd1:
sm_ns = /*AK("LIMIT_WAIT")*/3'd2;
/*AL("LIMIT_WAIT")*/3'd2:begin
if (lim_done) begin
lim_start_ns = 1'b0;
sm_ns = /*AK("SCAN")*/3'd3;
reset_scan_ns = 1'b0;
end
end
/*AL("SCAN")*/3'd3:begin
if (scan_done) begin
reset_scan_ns = 1'b1;
sm_ns = /*AK("COMPUTE")*/3'd4;
end
end
/*AL("COMPUTE")*/3'd4:begin
sm_ns = /*AK("PRECHARGE")*/3'd5;
ocd_prech_req_ns = 1'b1;
end
/*AL("PRECHARGE")*/3'd5:begin
if (prech_done) sm_ns = /*AK("DONE")*/3'd6;
end
/*AL("DONE")*/3'd6:begin
byte_ns = byte_r + ONE[DQS_CNT_WIDTH-1:0];
if ({1'b0, byte_r} == DQS_WIDTH[DQS_CNT_WIDTH:0] - ONE[DQS_WIDTH:0]) begin
byte_ns = {DQS_CNT_WIDTH{1'b0}};
po_rd_wait_ns = 4'd8;
sm_ns = /*AK("STG2_2_ZERO")*/3'd7;
end else begin
sm_ns = /*AK("LIMIT_START")*/3'd1;
lim_start_ns = 1'b1;
end
end
/*AL("STG2_2_ZERO")*/3'd7:
if (~|po_rd_wait_r && po_rdy)
if (|po_counter_read_val[5:0]) ocd_cntlr2stg2_dec_r = 1'b1;
else begin
if ({1'b0, byte_r} == DQS_WIDTH[DQS_CNT_WIDTH:0] - ONE[DQS_WIDTH:0]) begin
sm_ns = /*AK("READY")*/3'd0;
oclkdelay_calib_done_ns= 1'b1;
wrlvl_final_ns = 1'b1;
if (complex_oclkdelay_calib_start) begin
complex_oclkdelay_calib_done_ns = 1'b1;
complex_wrlvl_final_ns = 1'b1;
end
end else begin
byte_ns = byte_r + ONE[DQS_CNT_WIDTH-1:0];
po_rd_wait_ns = 4'd8;
end
end // else: !if(|po_counter_read_val[5:0])
endcase // case (sm_r)
end // always @ begin
endmodule
|
module outputs)
wire cs_en0; // From arb_row_col0 of arb_row_col.v
wire cs_en1; // From arb_row_col0 of arb_row_col.v
wire [nBANK_MACHS-1:0] grant_col_r; // From arb_row_col0 of arb_row_col.v
wire [nBANK_MACHS-1:0] grant_col_wr; // From arb_row_col0 of arb_row_col.v
wire [nBANK_MACHS-1:0] grant_config_r; // From arb_row_col0 of arb_row_col.v
wire [nBANK_MACHS-1:0] grant_row_r; // From arb_row_col0 of arb_row_col.v
wire [nBANK_MACHS-1:0] grant_pre_r; // From arb_row_col0 of arb_row_col.v
wire send_cmd0_row; // From arb_row_col0 of arb_row_col.v
wire send_cmd0_col; // From arb_row_col0 of arb_row_col.v
wire send_cmd1_row; // From arb_row_col0 of arb_row_col.v
wire send_cmd1_col;
wire send_cmd2_row;
wire send_cmd2_col;
wire send_cmd2_pre;
wire send_cmd3_col;
wire [5:0] col_channel_offset;
// End of automatics
wire sent_col_i;
wire cs_en2;
wire cs_en3;
assign sent_col = sent_col_i;
mig_7series_v2_3_arb_row_col #
(/*AUTOINSTPARAM*/
// Parameters
.TCQ (TCQ),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.CWL (CWL),
.EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR),
.nBANK_MACHS (nBANK_MACHS),
.nCK_PER_CLK (nCK_PER_CLK),
.nRAS (nRAS),
.nRCD (nRCD),
.nWR (nWR))
arb_row_col0
(/*AUTOINST*/
// Outputs
.grant_row_r (grant_row_r[nBANK_MACHS-1:0]),
.grant_pre_r (grant_pre_r[nBANK_MACHS-1:0]),
.sent_row (sent_row),
.sending_row (sending_row[nBANK_MACHS-1:0]),
.sending_pre (sending_pre[nBANK_MACHS-1:0]),
.grant_config_r (grant_config_r[nBANK_MACHS-1:0]),
.rnk_config_strobe (rnk_config_strobe),
.rnk_config_kill_rts_col (rnk_config_kill_rts_col),
.rnk_config_valid_r (rnk_config_valid_r),
.grant_col_r (grant_col_r[nBANK_MACHS-1:0]),
.sending_col (sending_col[nBANK_MACHS-1:0]),
.sent_col (sent_col_i),
.sent_col_r (sent_col_r),
.grant_col_wr (grant_col_wr[nBANK_MACHS-1:0]),
.send_cmd0_row (send_cmd0_row),
.send_cmd0_col (send_cmd0_col),
.send_cmd1_row (send_cmd1_row),
.send_cmd1_col (send_cmd1_col),
.send_cmd2_row (send_cmd2_row),
.send_cmd2_col (send_cmd2_col),
.send_cmd2_pre (send_cmd2_pre),
.send_cmd3_col (send_cmd3_col),
.col_channel_offset (col_channel_offset),
.cs_en0 (cs_en0),
.cs_en1 (cs_en1),
.cs_en2 (cs_en2),
.cs_en3 (cs_en3),
.insert_maint_r1 (insert_maint_r1),
// Inputs
.clk (clk),
.rst (rst),
.rts_row (rts_row[nBANK_MACHS-1:0]),
.rts_pre (rts_pre[nBANK_MACHS-1:0]),
.insert_maint_r (insert_maint_r),
.rts_col (rts_col[nBANK_MACHS-1:0]),
.rtc (rtc[nBANK_MACHS-1:0]),
.col_rdy_wr (col_rdy_wr[nBANK_MACHS-1:0]));
mig_7series_v2_3_arb_select #
(/*AUTOINSTPARAM*/
// Parameters
.TCQ (TCQ),
.EVEN_CWL_2T_MODE (EVEN_CWL_2T_MODE),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BANK_VECT_INDX (BANK_VECT_INDX),
.BANK_WIDTH (BANK_WIDTH),
.BURST_MODE (BURST_MODE),
.CS_WIDTH (CS_WIDTH),
.CL (CL),
.CWL (CWL),
.DATA_BUF_ADDR_VECT_INDX (DATA_BUF_ADDR_VECT_INDX),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.DRAM_TYPE (DRAM_TYPE),
.EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR),
.ECC (ECC),
.CKE_ODT_AUX (CKE_ODT_AUX),
.nBANK_MACHS (nBANK_MACHS),
.nCK_PER_CLK (nCK_PER_CLK),
.nCS_PER_RANK (nCS_PER_RANK),
.nSLOTS (nSLOTS),
.RANKS (RANKS),
.RANK_VECT_INDX (RANK_VECT_INDX),
.RANK_WIDTH (RANK_WIDTH),
.ROW_VECT_INDX (ROW_VECT_INDX),
.ROW_WIDTH (ROW_WIDTH),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.SLOT_0_CONFIG (SLOT_0_CONFIG),
.SLOT_1_CONFIG (SLOT_1_CONFIG))
arb_select0
(/*AUTOINST*/
// Outputs
.col_periodic_rd (col_periodic_rd),
.col_ra (col_ra[RANK_WIDTH-1:0]),
.col_ba (col_ba[BANK_WIDTH-1:0]),
.col_a (col_a[ROW_WIDTH-1:0]),
.col_rmw (col_rmw),
.col_rd_wr (col_rd_wr),
.col_size (col_size),
.col_row (col_row[ROW_WIDTH-1:0]),
.col_data_buf_addr (col_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.col_wr_data_buf_addr (col_wr_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.mc_bank (mc_bank),
.mc_address (mc_address),
.mc_ras_n (mc_ras_n),
.mc_cas_n (mc_cas_n),
.mc_we_n (mc_we_n),
.mc_cs_n (mc_cs_n),
.mc_odt (mc_odt),
.mc_cke (mc_cke),
.mc_aux_out0 (mc_aux_out0),
.mc_aux_out1 (mc_aux_out1),
.mc_cmd (mc_cmd),
.mc_data_offset (mc_data_offset),
.mc_data_offset_1 (mc_data_offset_1),
.mc_data_offset_2 (mc_data_offset_2),
.mc_cas_slot (mc_cas_slot),
.col_channel_offset (col_channel_offset),
.rnk_config (rnk_config),
// Inputs
.clk (clk),
.rst (rst),
.init_calib_complete (init_calib_complete),
.calib_rddata_offset (calib_rddata_offset),
.calib_rddata_offset_1 (calib_rddata_offset_1),
.calib_rddata_offset_2 (calib_rddata_offset_2),
.req_rank_r (req_rank_r[RANK_VECT_INDX:0]),
.req_bank_r (req_bank_r[BANK_VECT_INDX:0]),
.req_ras (req_ras[nBANK_MACHS-1:0]),
.req_cas (req_cas[nBANK_MACHS-1:0]),
.req_wr_r (req_wr_r[nBANK_MACHS-1:0]),
.grant_row_r (grant_row_r[nBANK_MACHS-1:0]),
.grant_pre_r (grant_pre_r[nBANK_MACHS-1:0]),
.row_addr (row_addr[ROW_VECT_INDX:0]),
.row_cmd_wr (row_cmd_wr[nBANK_MACHS-1:0]),
.insert_maint_r1 (insert_maint_r1),
.maint_zq_r (maint_zq_r),
.maint_sre_r (maint_sre_r),
.maint_srx_r (maint_srx_r),
.maint_rank_r (maint_rank_r[RANK_WIDTH-1:0]),
.req_periodic_rd_r (req_periodic_rd_r[nBANK_MACHS-1:0]),
.req_size_r (req_size_r[nBANK_MACHS-1:0]),
.rd_wr_r (rd_wr_r[nBANK_MACHS-1:0]),
.req_row_r (req_row_r[ROW_VECT_INDX:0]),
.col_addr (col_addr[ROW_VECT_INDX:0]),
.req_data_buf_addr_r (req_data_buf_addr_r[DATA_BUF_ADDR_VECT_INDX:0]),
.grant_col_r (grant_col_r[nBANK_MACHS-1:0]),
.grant_col_wr (grant_col_wr[nBANK_MACHS-1:0]),
.send_cmd0_row (send_cmd0_row),
.send_cmd0_col (send_cmd0_col),
.send_cmd1_row (send_cmd1_row),
.send_cmd1_col (send_cmd1_col),
.send_cmd2_row (send_cmd2_row),
.send_cmd2_col (send_cmd2_col),
.send_cmd2_pre (send_cmd2_pre),
.send_cmd3_col (send_cmd3_col),
.sent_col (EVEN_CWL_2T_MODE == "ON" ? sent_col_r : sent_col),
.cs_en0 (cs_en0),
.cs_en1 (cs_en1),
.cs_en2 (cs_en2),
.cs_en3 (cs_en3),
.grant_config_r (grant_config_r[nBANK_MACHS-1:0]),
.rnk_config_strobe (rnk_config_strobe),
.slot_0_present (slot_0_present[7:0]),
.slot_1_present (slot_1_present[7:0]));
endmodule
|
module mig_7series_v2_3_clk_ibuf #
(
parameter SYSCLK_TYPE = "DIFFERENTIAL",
// input clock type
parameter DIFF_TERM_SYSCLK = "TRUE"
// Differential Termination
)
(
// Clock inputs
input sys_clk_p, // System clock diff input
input sys_clk_n,
input sys_clk_i,
output mmcm_clk
);
(* KEEP = "TRUE" *) wire sys_clk_ibufg /* synthesis syn_keep = 1 */;
generate
if (SYSCLK_TYPE == "DIFFERENTIAL") begin: diff_input_clk
//***********************************************************************
// Differential input clock input buffers
//***********************************************************************
IBUFGDS #
(
.DIFF_TERM (DIFF_TERM_SYSCLK),
.IBUF_LOW_PWR ("FALSE")
)
u_ibufg_sys_clk
(
.I (sys_clk_p),
.IB (sys_clk_n),
.O (sys_clk_ibufg)
);
end else if (SYSCLK_TYPE == "SINGLE_ENDED") begin: se_input_clk
//***********************************************************************
// SINGLE_ENDED input clock input buffers
//***********************************************************************
IBUFG #
(
.IBUF_LOW_PWR ("FALSE")
)
u_ibufg_sys_clk
(
.I (sys_clk_i),
.O (sys_clk_ibufg)
);
end else if (SYSCLK_TYPE == "NO_BUFFER") begin: internal_clk
//***********************************************************************
// System clock is driven from FPGA internal clock (clock from fabric)
//***********************************************************************
assign sys_clk_ibufg = sys_clk_i;
end
endgenerate
assign mmcm_clk = sys_clk_ibufg;
endmodule
|
module mig_7series_v2_3_mem_intfc #
(
parameter TCQ = 100,
parameter DDR3_VDD_OP_VOLT = "135", // Voltage mode used for DDR3
parameter PAYLOAD_WIDTH = 64,
parameter ADDR_CMD_MODE = "1T",
parameter AL = "0", // Additive Latency option
parameter BANK_WIDTH = 3, // # of bank bits
parameter BM_CNT_WIDTH = 2, // Bank machine counter width
parameter BURST_MODE = "8", // Burst length
parameter BURST_TYPE = "SEQ", // Burst type
parameter CA_MIRROR = "OFF", // C/A mirror opt for DDR3 dual rank
parameter CK_WIDTH = 1, // # of CK/CK# outputs to memory
// five fields, one per possible I/O bank, 4 bits in each field, 1 per lane
// data=1/ctl=0
parameter DATA_CTL_B0 = 4'hc,
parameter DATA_CTL_B1 = 4'hf,
parameter DATA_CTL_B2 = 4'hf,
parameter DATA_CTL_B3 = 4'hf,
parameter DATA_CTL_B4 = 4'hf,
// defines the byte lanes in I/O banks being used in the interface
// 1- Used, 0- Unused
parameter BYTE_LANES_B0 = 4'b1111,
parameter BYTE_LANES_B1 = 4'b0000,
parameter BYTE_LANES_B2 = 4'b0000,
parameter BYTE_LANES_B3 = 4'b0000,
parameter BYTE_LANES_B4 = 4'b0000,
// defines the bit lanes in I/O banks being used in the interface. Each
// parameter = 1 I/O bank = 4 byte lanes = 48 bit lanes. 1-Used, 0-Unused
parameter PHY_0_BITLANES = 48'h0000_0000_0000,
parameter PHY_1_BITLANES = 48'h0000_0000_0000,
parameter PHY_2_BITLANES = 48'h0000_0000_0000,
// control/address/data pin mapping parameters
parameter CK_BYTE_MAP
= 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00,
parameter ADDR_MAP
= 192'h000_000_000_000_000_000_000_000_000_000_000_000_000_000_000_000,
parameter BANK_MAP = 36'h000_000_000,
parameter CAS_MAP = 12'h000,
parameter CKE_ODT_BYTE_MAP = 8'h00,
parameter CKE_MAP = 96'h000_000_000_000_000_000_000_000,
parameter ODT_MAP = 96'h000_000_000_000_000_000_000_000,
parameter CKE_ODT_AUX = "FALSE",
parameter CS_MAP = 120'h000_000_000_000_000_000_000_000_000_000,
parameter PARITY_MAP = 12'h000,
parameter RAS_MAP = 12'h000,
parameter WE_MAP = 12'h000,
parameter DQS_BYTE_MAP
= 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00,
parameter DATA0_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA1_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA2_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA3_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA4_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA5_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA6_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA7_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA8_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA9_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA10_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA11_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA12_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA13_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA14_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA15_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA16_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA17_MAP = 96'h000_000_000_000_000_000_000_000,
parameter MASK0_MAP = 108'h000_000_000_000_000_000_000_000_000,
parameter MASK1_MAP = 108'h000_000_000_000_000_000_000_000_000,
// calibration Address. The address given below will be used for calibration
// read and write operations.
parameter CALIB_ROW_ADD = 16'h0000,// Calibration row address
parameter CALIB_COL_ADD = 12'h000, // Calibration column address
parameter CALIB_BA_ADD = 3'h0, // Calibration bank address
parameter CL = 5,
parameter COL_WIDTH = 12, // column address width
parameter CMD_PIPE_PLUS1 = "ON", // add pipeline stage between MC and PHY
parameter CS_WIDTH = 1, // # of unique CS outputs
parameter CKE_WIDTH = 1, // # of cke outputs
parameter CWL = 5,
parameter DATA_WIDTH = 64,
parameter DATA_BUF_ADDR_WIDTH = 8,
parameter DATA_BUF_OFFSET_WIDTH = 1,
parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2
parameter DM_WIDTH = 8, // # of DM (data mask)
parameter DQ_CNT_WIDTH = 6, // = ceil(log2(DQ_WIDTH))
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_TYPE = "DDR3",
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter ECC = "OFF",
parameter ECC_WIDTH = 8,
parameter MC_ERR_ADDR_WIDTH = 31,
parameter nAL = 0, // Additive latency (in clk cyc)
parameter nBANK_MACHS = 4,
parameter PRE_REV3ES = "OFF", // Delay O/Ps using Phaser_Out fine dly
parameter nCK_PER_CLK = 4, // # of memory CKs per fabric CLK
parameter nCS_PER_RANK = 1, // # of unique CS outputs per rank
// Hard PHY parameters
parameter PHYCTL_CMD_FIFO = "FALSE",
parameter ORDERING = "NORM",
parameter PHASE_DETECT = "OFF" , // to phy_top
parameter IBUF_LPWR_MODE = "OFF", // to phy_top
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter DATA_IO_PRIM_TYPE = "DEFAULT", // # = "HP_LP", "HR_LP", "DEFAULT"
parameter DATA_IO_IDLE_PWRDWN = "ON", // "ON" or "OFF"
parameter IODELAY_GRP = "IODELAY_MIG", //to phy_top
parameter FPGA_SPEED_GRADE = 1,
parameter OUTPUT_DRV = "HIGH" , // to phy_top
parameter REG_CTRL = "OFF" , // to phy_top
parameter RTT_NOM = "60" , // to phy_top
parameter RTT_WR = "120" , // to phy_top
parameter STARVE_LIMIT = 2,
parameter tCK = 2500, // pS
parameter tCKE = 10000, // pS
parameter tFAW = 40000, // pS
parameter tPRDI = 1_000_000, // pS
parameter tRAS = 37500, // pS
parameter tRCD = 12500, // pS
parameter tREFI = 7800000, // pS
parameter tRFC = 110000, // pS
parameter tRP = 12500, // pS
parameter tRRD = 10000, // pS
parameter tRTP = 7500, // pS
parameter tWTR = 7500, // pS
parameter tZQI = 128_000_000, // nS
parameter tZQCS = 64, // CKs
parameter WRLVL = "OFF" , // to phy_top
parameter DEBUG_PORT = "OFF" , // to phy_top
parameter CAL_WIDTH = "HALF" , // to phy_top
parameter RANK_WIDTH = 1,
parameter RANKS = 4,
parameter ODT_WIDTH = 1,
parameter ROW_WIDTH = 16, // DRAM address bus width
parameter [7:0] SLOT_0_CONFIG = 8'b0000_0001,
parameter [7:0] SLOT_1_CONFIG = 8'b0000_0000,
parameter SIM_BYPASS_INIT_CAL = "OFF",
parameter REFCLK_FREQ = 300.0,
parameter nDQS_COL0 = DQS_WIDTH,
parameter nDQS_COL1 = 0,
parameter nDQS_COL2 = 0,
parameter nDQS_COL3 = 0,
parameter DQS_LOC_COL0 = 144'h11100F0E0D0C0B0A09080706050403020100,
parameter DQS_LOC_COL1 = 0,
parameter DQS_LOC_COL2 = 0,
parameter DQS_LOC_COL3 = 0,
parameter USE_CS_PORT = 1, // Support chip select output
parameter USE_DM_PORT = 1, // Support data mask output
parameter USE_ODT_PORT = 1, // Support ODT output
parameter MASTER_PHY_CTL = 0, // The bank number where master PHY_CONTROL resides
parameter USER_REFRESH = "OFF", // Choose whether MC or User manages REF
parameter TEMP_MON_EN = "ON", // Enable/disable temperature monitoring
parameter IDELAY_ADJ = "ON", // Adjust IDELAY value (-1)
parameter FINE_PER_BIT = "ON", // Use finedelay per-bit de-skew
parameter CENTER_COMP_MODE = "ON", // Use Center compensation table for PI
parameter PI_VAL_ADJ = "ON", // Adjust PI final value (-1)
parameter TAPSPERKCLK = 56
)
(
input clk_ref,
input freq_refclk,
input mem_refclk,
input pll_lock,
input sync_pulse,
input mmcm_ps_clk,
input poc_sample_pd,
input error,
input reset,
output rst_tg_mc,
input [BANK_WIDTH-1:0] bank, // To mc0 of mc.v
input clk ,
input [2:0] cmd, // To mc0 of mc.v
input [COL_WIDTH-1:0] col, // To mc0 of mc.v
input correct_en,
input [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr, // To mc0 of mc.v
input dbg_idel_down_all,
input dbg_idel_down_cpt,
input dbg_idel_up_all,
input dbg_idel_up_cpt,
input dbg_sel_all_idel_cpt,
input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt,
input hi_priority, // To mc0 of mc.v
input [RANK_WIDTH-1:0] rank, // To mc0 of mc.v
input [2*nCK_PER_CLK-1:0] raw_not_ecc,
input [ROW_WIDTH-1:0] row, // To mc0 of mc.v
input rst, // To mc0 of mc.v, ...
input size, // To mc0 of mc.v
input [7:0] slot_0_present, // To mc0 of mc.v
input [7:0] slot_1_present, // To mc0 of mc.v
input use_addr, // To mc0 of mc.v
input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data,
input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask,
output accept, // From mc0 of mc.v
output accept_ns, // From mc0 of mc.v
output [BM_CNT_WIDTH-1:0] bank_mach_next, // From mc0 of mc.v
input app_sr_req,
output app_sr_active,
input app_ref_req,
output app_ref_ack,
input app_zq_req,
output app_zq_ack,
output [255:0] dbg_calib_top,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt,
output [255:0] dbg_phy_rdlvl,
output [99:0] dbg_phy_wrcal,
output [6*DQS_WIDTH-1:0] dbg_final_po_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_final_po_coarse_tap_cnt,
output [DQS_WIDTH-1:0] dbg_rd_data_edge_detect,
output [2*nCK_PER_CLK*DQ_WIDTH-1:0] dbg_rddata,
output [1:0] dbg_rdlvl_done,
output [1:0] dbg_rdlvl_err,
output [1:0] dbg_rdlvl_start,
output [5:0] dbg_tap_cnt_during_wrlvl,
output dbg_wl_edge_detect_valid,
output dbg_wrlvl_done,
output dbg_wrlvl_err,
output dbg_wrlvl_start,
output [ROW_WIDTH-1:0] ddr_addr, // From phy_top0 of phy_top.v
output [BANK_WIDTH-1:0] ddr_ba, // From phy_top0 of phy_top.v
output ddr_cas_n, // From phy_top0 of phy_top.v
output [CK_WIDTH-1:0] ddr_ck_n, // From phy_top0 of phy_top.v
output [CK_WIDTH-1:0] ddr_ck , // From phy_top0 of phy_top.v
output [CKE_WIDTH-1:0] ddr_cke, // From phy_top0 of phy_top.v
output [CS_WIDTH*nCS_PER_RANK-1:0] ddr_cs_n, // From phy_top0 of phy_top.v
output [DM_WIDTH-1:0] ddr_dm, // From phy_top0 of phy_top.v
output [ODT_WIDTH-1:0] ddr_odt, // From phy_top0 of phy_top.v
output ddr_ras_n, // From phy_top0 of phy_top.v
output ddr_reset_n, // From phy_top0 of phy_top.v
output ddr_parity,
output ddr_we_n, // From phy_top0 of phy_top.v
output init_calib_complete,
output init_wrcal_complete,
output [MC_ERR_ADDR_WIDTH-1:0] ecc_err_addr,
output [2*nCK_PER_CLK-1:0] ecc_multiple,
output [2*nCK_PER_CLK-1:0] ecc_single,
output wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data,
output [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr,
// From mc0 of mc.v
output rd_data_en, // From mc0 of mc.v
output rd_data_end, // From mc0 of mc.v
output [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset, // From mc0 of mc.v
output [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr, // From mc0 of mc.v
output wr_data_en, // From mc0 of mc.v
output [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset, // From mc0 of mc.v
inout [DQ_WIDTH-1:0] ddr_dq, // To/From phy_top0 of phy_top.v
inout [DQS_WIDTH-1:0] ddr_dqs_n, // To/From phy_top0 of phy_top.v
inout [DQS_WIDTH-1:0] ddr_dqs // To/From phy_top0 of phy_top.v
,input [11:0] device_temp
//phase shift clock control
,output psen
,output psincdec
,input psdone
,input [DQ_WIDTH/8-1:0] fi_xor_we
,input [DQ_WIDTH-1:0] fi_xor_wrdata
,input dbg_sel_pi_incdec
,input dbg_sel_po_incdec
,input [DQS_CNT_WIDTH:0] dbg_byte_sel
,input dbg_pi_f_inc
,input dbg_pi_f_dec
,input dbg_po_f_inc
,input dbg_po_f_stg23_sel
,input dbg_po_f_dec
,output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt
,output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt
,output dbg_rddata_valid
,output [6*DQS_WIDTH-1:0] dbg_wrlvl_fine_tap_cnt
,output [3*DQS_WIDTH-1:0] dbg_wrlvl_coarse_tap_cnt
,output [255:0] dbg_phy_wrlvl
,output [5:0] dbg_pi_counter_read_val
,output [8:0] dbg_po_counter_read_val
,output ref_dll_lock
,input rst_phaser_ref
,input iddr_rst
,output [6*RANKS-1:0] dbg_rd_data_offset
,output [255:0] dbg_phy_init
,output [255:0] dbg_prbs_rdlvl
,output [255:0] dbg_dqs_found_cal
,output dbg_pi_phaselock_start
,output dbg_pi_phaselocked_done
,output dbg_pi_phaselock_err
,output dbg_pi_dqsfound_start
,output dbg_pi_dqsfound_done
,output dbg_pi_dqsfound_err
,output dbg_wrcal_start
,output dbg_wrcal_done
,output dbg_wrcal_err
,output [11:0] dbg_pi_dqs_found_lanes_phy4lanes
,output [11:0] dbg_pi_phase_locked_phy4lanes
,output [6*RANKS-1:0] dbg_calib_rd_data_offset_1
,output [6*RANKS-1:0] dbg_calib_rd_data_offset_2
,output [5:0] dbg_data_offset
,output [5:0] dbg_data_offset_1
,output [5:0] dbg_data_offset_2
,output dbg_oclkdelay_calib_start
,output dbg_oclkdelay_calib_done
,output [255:0] dbg_phy_oclkdelay_cal
,output [DRAM_WIDTH*16 -1:0]dbg_oclkdelay_rd_data
,output [6*DQS_WIDTH*RANKS-1:0] prbs_final_dqs_tap_cnt_r
,output [6*DQS_WIDTH*RANKS-1:0] dbg_prbs_first_edge_taps
,output [6*DQS_WIDTH*RANKS-1:0] dbg_prbs_second_edge_taps
);
localparam nSLOTS = 1 + (|SLOT_1_CONFIG ? 1 : 0);
localparam SLOT_0_CONFIG_MC = (nSLOTS == 2)? 8'b0000_0101 : 8'b0000_1111;
localparam SLOT_1_CONFIG_MC = (nSLOTS == 2)? 8'b0000_1010 : 8'b0000_0000;
// 8*tREFI in ps is divided by fabric clock period also in ps. 270 is the number
// of fabric clock cycles that accounts for the Writes, read, and PRECHARGE time
localparam REFRESH_TIMER = (8*tREFI/(tCK*nCK_PER_CLK)) - 270;
reg [7:0] slot_0_present_mc;
reg [7:0] slot_1_present_mc;
reg user_periodic_rd_req = 1'b0;
reg user_ref_req = 1'b0;
reg user_zq_req = 1'b0;
// MC/PHY interface
wire [nCK_PER_CLK-1:0] mc_ras_n;
wire [nCK_PER_CLK-1:0] mc_cas_n;
wire [nCK_PER_CLK-1:0] mc_we_n;
wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address;
wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank;
wire [nCK_PER_CLK-1 :0] mc_cke ;
wire [1:0] mc_odt ;
wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n;
wire mc_reset_n;
wire [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata;
wire [2*nCK_PER_CLK*DQ_WIDTH/8-1:0] mc_wrdata_mask;
wire mc_wrdata_en;
wire mc_ref_zq_wip;
wire tempmon_sample_en;
wire idle;
wire mc_cmd_wren;
wire mc_ctl_wren;
wire [2:0] mc_cmd;
wire [1:0] mc_cas_slot;
wire [5:0] mc_data_offset;
wire [5:0] mc_data_offset_1;
wire [5:0] mc_data_offset_2;
wire [3:0] mc_aux_out0;
wire [3:0] mc_aux_out1;
wire [1:0] mc_rank_cnt;
wire phy_mc_ctl_full;
wire phy_mc_cmd_full;
wire phy_mc_data_full;
wire [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_rd_data;
wire phy_rddata_valid;
wire [6*RANKS-1:0] calib_rd_data_offset_0;
wire [6*RANKS-1:0] calib_rd_data_offset_1;
wire [6*RANKS-1:0] calib_rd_data_offset_2;
wire init_calib_complete_w;
wire init_wrcal_complete_w;
wire mux_rst;
wire mux_calib_complete;
// assigning CWL = CL -1 for DDR2. DDR2 customers will not know anything
// about CWL. There is also nCWL parameter. Need to clean it up.
localparam CWL_T = (DRAM_TYPE == "DDR3") ? CWL : CL-1;
assign init_calib_complete = init_calib_complete_w;
assign init_wrcal_complete = init_wrcal_complete_w;
assign mux_calib_complete = (PRE_REV3ES == "OFF") ? init_calib_complete_w :
(init_calib_complete_w | init_wrcal_complete_w);
assign mux_rst = (PRE_REV3ES == "OFF") ? rst : reset;
assign dbg_calib_rd_data_offset_1 = calib_rd_data_offset_1;
assign dbg_calib_rd_data_offset_2 = calib_rd_data_offset_2;
assign dbg_data_offset = mc_data_offset;
assign dbg_data_offset_1 = mc_data_offset_1;
assign dbg_data_offset_2 = mc_data_offset_2;
// Enable / disable temperature monitoring
assign tempmon_sample_en = TEMP_MON_EN == "OFF" ? 1'b0 : mc_ref_zq_wip;
generate
if (nSLOTS == 1) begin: gen_single_slot_odt
always @ (slot_0_present or slot_1_present) begin
slot_0_present_mc = slot_0_present;
slot_1_present_mc = slot_1_present;
end
end else if (nSLOTS == 2) begin: gen_dual_slot_odt
always @ (slot_0_present[0] or slot_0_present[1]
or slot_1_present[0] or slot_1_present[1]) begin
case ({slot_0_present[0],slot_0_present[1],
slot_1_present[0],slot_1_present[1]})
//Two slot configuration, one slot present, single rank
4'b1000: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_0000;
end
4'b0010: begin
slot_0_present_mc = 8'b0000_0000;
slot_1_present_mc = 8'b0000_0010;
end
// Two slot configuration, one slot present, dual rank
4'b1100: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_0000;
end
4'b0011: begin
slot_0_present_mc = 8'b0000_0000;
slot_1_present_mc = 8'b0000_1010;
end
// Two slot configuration, one rank per slot
4'b1010: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_0010;
end
// Two Slots - One slot with dual rank and the other with single rank
4'b1011: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_1010;
end
4'b1110: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_0010;
end
// Two Slots - two ranks per slot
4'b1111: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_1010;
end
endcase
end
end
endgenerate
mig_7series_v2_3_mc #
(
.TCQ (TCQ),
.PAYLOAD_WIDTH (PAYLOAD_WIDTH),
.MC_ERR_ADDR_WIDTH (MC_ERR_ADDR_WIDTH),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BANK_WIDTH (BANK_WIDTH),
.BM_CNT_WIDTH (BM_CNT_WIDTH),
.BURST_MODE (BURST_MODE),
.COL_WIDTH (COL_WIDTH),
.CMD_PIPE_PLUS1 (CMD_PIPE_PLUS1),
.CS_WIDTH (CS_WIDTH),
.DATA_WIDTH (DATA_WIDTH),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.DATA_BUF_OFFSET_WIDTH (DATA_BUF_OFFSET_WIDTH),
.DRAM_TYPE (DRAM_TYPE),
.CKE_ODT_AUX (CKE_ODT_AUX),
.DQS_WIDTH (DQS_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.ECC (ECC),
.ECC_WIDTH (ECC_WIDTH),
.nBANK_MACHS (nBANK_MACHS),
.nCK_PER_CLK (nCK_PER_CLK),
.nSLOTS (nSLOTS),
.CL (CL),
.nCS_PER_RANK (nCS_PER_RANK),
.CWL (CWL_T),
.ORDERING (ORDERING),
.RANK_WIDTH (RANK_WIDTH),
.RANKS (RANKS),
.REG_CTRL (REG_CTRL),
.ROW_WIDTH (ROW_WIDTH),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.STARVE_LIMIT (STARVE_LIMIT),
.SLOT_0_CONFIG (SLOT_0_CONFIG_MC),
.SLOT_1_CONFIG (SLOT_1_CONFIG_MC),
.tCK (tCK),
.tCKE (tCKE),
.tFAW (tFAW),
.tRAS (tRAS),
.tRCD (tRCD),
.tREFI (tREFI),
.tRFC (tRFC),
.tRP (tRP),
.tRRD (tRRD),
.tRTP (tRTP),
.tWTR (tWTR),
.tZQI (tZQI),
.tZQCS (tZQCS),
.tPRDI (tPRDI),
.USER_REFRESH (USER_REFRESH))
mc0
(.app_periodic_rd_req (1'b0),
.app_sr_req (app_sr_req),
.app_sr_active (app_sr_active),
.app_ref_req (app_ref_req),
.app_ref_ack (app_ref_ack),
.app_zq_req (app_zq_req),
.app_zq_ack (app_zq_ack),
.ecc_single (ecc_single),
.ecc_multiple (ecc_multiple),
.ecc_err_addr (ecc_err_addr),
.mc_address (mc_address),
.mc_aux_out0 (mc_aux_out0),
.mc_aux_out1 (mc_aux_out1),
.mc_bank (mc_bank),
.mc_cke (mc_cke),
.mc_odt (mc_odt),
.mc_cas_n (mc_cas_n),
.mc_cmd (mc_cmd),
.mc_cmd_wren (mc_cmd_wren),
.mc_cs_n (mc_cs_n),
.mc_ctl_wren (mc_ctl_wren),
.mc_data_offset (mc_data_offset),
.mc_data_offset_1 (mc_data_offset_1),
.mc_data_offset_2 (mc_data_offset_2),
.mc_cas_slot (mc_cas_slot),
.mc_rank_cnt (mc_rank_cnt),
.mc_ras_n (mc_ras_n),
.mc_reset_n (mc_reset_n),
.mc_we_n (mc_we_n),
.mc_wrdata (mc_wrdata),
.mc_wrdata_en (mc_wrdata_en),
.mc_wrdata_mask (mc_wrdata_mask),
// Outputs
.accept (accept),
.accept_ns (accept_ns),
.bank_mach_next (bank_mach_next[BM_CNT_WIDTH-1:0]),
.rd_data_addr (rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.rd_data_en (rd_data_en),
.rd_data_end (rd_data_end),
.rd_data_offset (rd_data_offset),
.wr_data_addr (wr_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.wr_data_en (wr_data_en),
.wr_data_offset (wr_data_offset),
.rd_data (rd_data),
.wr_data (wr_data),
.wr_data_mask (wr_data_mask),
.mc_read_idle (idle),
.mc_ref_zq_wip (mc_ref_zq_wip),
// Inputs
.init_calib_complete (mux_calib_complete),
.calib_rd_data_offset (calib_rd_data_offset_0),
.calib_rd_data_offset_1 (calib_rd_data_offset_1),
.calib_rd_data_offset_2 (calib_rd_data_offset_2),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_data_full (phy_mc_data_full),
.phy_rd_data (phy_rd_data),
.phy_rddata_valid (phy_rddata_valid),
.correct_en (correct_en),
.bank (bank[BANK_WIDTH-1:0]),
.clk (clk),
.cmd (cmd[2:0]),
.col (col[COL_WIDTH-1:0]),
.data_buf_addr (data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.hi_priority (hi_priority),
.rank (rank[RANK_WIDTH-1:0]),
.raw_not_ecc (raw_not_ecc[2*nCK_PER_CLK-1 :0]),
.row (row[ROW_WIDTH-1:0]),
.rst (mux_rst),
.size (size),
.slot_0_present (slot_0_present_mc[7:0]),
.slot_1_present (slot_1_present_mc[7:0]),
.fi_xor_we (fi_xor_we),
.fi_xor_wrdata (fi_xor_wrdata),
.use_addr (use_addr));
// following calculations should be moved inside PHY
// odt bus should be added to PHY.
localparam CLK_PERIOD = tCK * nCK_PER_CLK;
localparam nCL = CL;
localparam nCWL = CWL_T;
`ifdef MC_SVA
ddr2_improper_CL: assert property
(@(posedge clk) (~((DRAM_TYPE == "DDR2") && ((CL > 6) || (CL < 3)))));
// Not needed after the CWL fix for DDR2
// ddr2_improper_CWL: assert property
// (@(posedge clk) (~((DRAM_TYPE == "DDR2") && ((CL - CWL) != 1))));
`endif
mig_7series_v2_3_ddr_phy_top #
(
.TCQ (TCQ),
.DDR3_VDD_OP_VOLT (DDR3_VDD_OP_VOLT),
.REFCLK_FREQ (REFCLK_FREQ),
.BYTE_LANES_B0 (BYTE_LANES_B0),
.BYTE_LANES_B1 (BYTE_LANES_B1),
.BYTE_LANES_B2 (BYTE_LANES_B2),
.BYTE_LANES_B3 (BYTE_LANES_B3),
.BYTE_LANES_B4 (BYTE_LANES_B4),
.PHY_0_BITLANES (PHY_0_BITLANES),
.PHY_1_BITLANES (PHY_1_BITLANES),
.PHY_2_BITLANES (PHY_2_BITLANES),
.CA_MIRROR (CA_MIRROR),
.CK_BYTE_MAP (CK_BYTE_MAP),
.ADDR_MAP (ADDR_MAP),
.BANK_MAP (BANK_MAP),
.CAS_MAP (CAS_MAP),
.CKE_ODT_BYTE_MAP (CKE_ODT_BYTE_MAP),
.CKE_MAP (CKE_MAP),
.ODT_MAP (ODT_MAP),
.CKE_ODT_AUX (CKE_ODT_AUX),
.CS_MAP (CS_MAP),
.PARITY_MAP (PARITY_MAP),
.RAS_MAP (RAS_MAP),
.WE_MAP (WE_MAP),
.DQS_BYTE_MAP (DQS_BYTE_MAP),
.DATA0_MAP (DATA0_MAP),
.DATA1_MAP (DATA1_MAP),
.DATA2_MAP (DATA2_MAP),
.DATA3_MAP (DATA3_MAP),
.DATA4_MAP (DATA4_MAP),
.DATA5_MAP (DATA5_MAP),
.DATA6_MAP (DATA6_MAP),
.DATA7_MAP (DATA7_MAP),
.DATA8_MAP (DATA8_MAP),
.DATA9_MAP (DATA9_MAP),
.DATA10_MAP (DATA10_MAP),
.DATA11_MAP (DATA11_MAP),
.DATA12_MAP (DATA12_MAP),
.DATA13_MAP (DATA13_MAP),
.DATA14_MAP (DATA14_MAP),
.DATA15_MAP (DATA15_MAP),
.DATA16_MAP (DATA16_MAP),
.DATA17_MAP (DATA17_MAP),
.MASK0_MAP (MASK0_MAP),
.MASK1_MAP (MASK1_MAP),
.CALIB_ROW_ADD (CALIB_ROW_ADD),
.CALIB_COL_ADD (CALIB_COL_ADD),
.CALIB_BA_ADD (CALIB_BA_ADD),
.nCS_PER_RANK (nCS_PER_RANK),
.CS_WIDTH (CS_WIDTH),
.nCK_PER_CLK (nCK_PER_CLK),
.PRE_REV3ES (PRE_REV3ES),
.CKE_WIDTH (CKE_WIDTH),
.DATA_CTL_B0 (DATA_CTL_B0),
.DATA_CTL_B1 (DATA_CTL_B1),
.DATA_CTL_B2 (DATA_CTL_B2),
.DATA_CTL_B3 (DATA_CTL_B3),
.DATA_CTL_B4 (DATA_CTL_B4),
.DDR2_DQSN_ENABLE (DDR2_DQSN_ENABLE),
.DRAM_TYPE (DRAM_TYPE),
.BANK_WIDTH (BANK_WIDTH),
.CK_WIDTH (CK_WIDTH),
.COL_WIDTH (COL_WIDTH),
.DM_WIDTH (DM_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.DQS_CNT_WIDTH (DQS_CNT_WIDTH),
.DQS_WIDTH (DQS_WIDTH),
.DRAM_WIDTH (DRAM_WIDTH),
.PHYCTL_CMD_FIFO (PHYCTL_CMD_FIFO),
.ROW_WIDTH (ROW_WIDTH),
.AL (AL),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BURST_MODE (BURST_MODE),
.BURST_TYPE (BURST_TYPE),
.CL (nCL),
.CWL (nCWL),
.tRFC (tRFC),
.tREFI (tREFI),
.tCK (tCK),
.OUTPUT_DRV (OUTPUT_DRV),
.RANKS (RANKS),
.ODT_WIDTH (ODT_WIDTH),
.REG_CTRL (REG_CTRL),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.SLOT_1_CONFIG (SLOT_1_CONFIG),
.WRLVL (WRLVL),
.BANK_TYPE (BANK_TYPE),
.DATA_IO_PRIM_TYPE (DATA_IO_PRIM_TYPE),
.DATA_IO_IDLE_PWRDWN(DATA_IO_IDLE_PWRDWN),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
// Prevent the following simulation-related parameters from
// being overridden for synthesis - for synthesis only the
// default values of these parameters should be used
// synthesis translate_off
.SIM_BYPASS_INIT_CAL (SIM_BYPASS_INIT_CAL),
// synthesis translate_on
.USE_CS_PORT (USE_CS_PORT),
.USE_DM_PORT (USE_DM_PORT),
.USE_ODT_PORT (USE_ODT_PORT),
.MASTER_PHY_CTL (MASTER_PHY_CTL),
.DEBUG_PORT (DEBUG_PORT),
.IDELAY_ADJ (IDELAY_ADJ),
.FINE_PER_BIT (FINE_PER_BIT),
.CENTER_COMP_MODE (CENTER_COMP_MODE),
.PI_VAL_ADJ (PI_VAL_ADJ),
.TAPSPERKCLK (TAPSPERKCLK)
)
ddr_phy_top0
(
// Outputs
.calib_rd_data_offset_0 (calib_rd_data_offset_0),
.calib_rd_data_offset_1 (calib_rd_data_offset_1),
.calib_rd_data_offset_2 (calib_rd_data_offset_2),
.ddr_ck (ddr_ck),
.ddr_ck_n (ddr_ck_n),
.ddr_addr (ddr_addr),
.ddr_ba (ddr_ba),
.ddr_ras_n (ddr_ras_n),
.ddr_cas_n (ddr_cas_n),
.ddr_we_n (ddr_we_n),
.ddr_cs_n (ddr_cs_n),
.ddr_cke (ddr_cke),
.ddr_odt (ddr_odt),
.ddr_reset_n (ddr_reset_n),
.ddr_parity (ddr_parity),
.ddr_dm (ddr_dm),
.dbg_calib_top (dbg_calib_top),
.dbg_cpt_first_edge_cnt (dbg_cpt_first_edge_cnt),
.dbg_cpt_second_edge_cnt (dbg_cpt_second_edge_cnt),
.dbg_phy_rdlvl (dbg_phy_rdlvl),
.dbg_phy_wrcal (dbg_phy_wrcal),
.dbg_final_po_fine_tap_cnt (dbg_final_po_fine_tap_cnt),
.dbg_final_po_coarse_tap_cnt (dbg_final_po_coarse_tap_cnt),
.dbg_rd_data_edge_detect (dbg_rd_data_edge_detect),
.dbg_rddata (dbg_rddata),
.dbg_rdlvl_done (dbg_rdlvl_done),
.dbg_rdlvl_err (dbg_rdlvl_err),
.dbg_rdlvl_start (dbg_rdlvl_start),
.dbg_tap_cnt_during_wrlvl (dbg_tap_cnt_during_wrlvl),
.dbg_wl_edge_detect_valid (dbg_wl_edge_detect_valid),
.dbg_wrlvl_done (dbg_wrlvl_done),
.dbg_wrlvl_err (dbg_wrlvl_err),
.dbg_wrlvl_start (dbg_wrlvl_start),
.dbg_pi_phase_locked_phy4lanes (dbg_pi_phase_locked_phy4lanes),
.dbg_pi_dqs_found_lanes_phy4lanes (dbg_pi_dqs_found_lanes_phy4lanes),
.init_calib_complete (init_calib_complete_w),
.init_wrcal_complete (init_wrcal_complete_w),
.mc_address (mc_address),
.mc_aux_out0 (mc_aux_out0),
.mc_aux_out1 (mc_aux_out1),
.mc_bank (mc_bank),
.mc_cke (mc_cke),
.mc_odt (mc_odt),
.mc_cas_n (mc_cas_n),
.mc_cmd (mc_cmd),
.mc_cmd_wren (mc_cmd_wren),
.mc_cas_slot (mc_cas_slot),
.mc_cs_n (mc_cs_n),
.mc_ctl_wren (mc_ctl_wren),
.mc_data_offset (mc_data_offset),
.mc_data_offset_1 (mc_data_offset_1),
.mc_data_offset_2 (mc_data_offset_2),
.mc_rank_cnt (mc_rank_cnt),
.mc_ras_n (mc_ras_n),
.mc_reset_n (mc_reset_n),
.mc_we_n (mc_we_n),
.mc_wrdata (mc_wrdata),
.mc_wrdata_en (mc_wrdata_en),
.mc_wrdata_mask (mc_wrdata_mask),
.idle (idle),
.mem_refclk (mem_refclk),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_data_full (phy_mc_data_full),
.phy_rd_data (phy_rd_data),
.phy_rddata_valid (phy_rddata_valid),
.pll_lock (pll_lock),
.sync_pulse (sync_pulse),
// Inouts
.ddr_dqs (ddr_dqs),
.ddr_dqs_n (ddr_dqs_n),
.ddr_dq (ddr_dq),
// Inputs
.clk_ref (clk_ref),
.freq_refclk (freq_refclk),
.clk (clk),
.mmcm_ps_clk (mmcm_ps_clk),
.poc_sample_pd (poc_sample_pd),
.rst (rst),
.error (error),
.rst_tg_mc (rst_tg_mc),
.slot_0_present (slot_0_present),
.slot_1_present (slot_1_present),
.dbg_idel_up_all (dbg_idel_up_all),
.dbg_idel_down_all (dbg_idel_down_all),
.dbg_idel_up_cpt (dbg_idel_up_cpt),
.dbg_idel_down_cpt (dbg_idel_down_cpt),
.dbg_sel_idel_cpt (dbg_sel_idel_cpt),
.dbg_sel_all_idel_cpt (dbg_sel_all_idel_cpt)
,.device_temp (device_temp)
,.tempmon_sample_en (tempmon_sample_en)
,.psen (psen)
,.psincdec (psincdec)
,.psdone (psdone)
,.dbg_sel_pi_incdec (dbg_sel_pi_incdec)
,.dbg_sel_po_incdec (dbg_sel_po_incdec)
,.dbg_byte_sel (dbg_byte_sel)
,.dbg_pi_f_inc (dbg_pi_f_inc)
,.dbg_po_f_inc (dbg_po_f_inc)
,.dbg_po_f_stg23_sel (dbg_po_f_stg23_sel)
,.dbg_pi_f_dec (dbg_pi_f_dec)
,.dbg_po_f_dec (dbg_po_f_dec)
,.dbg_cpt_tap_cnt (dbg_cpt_tap_cnt)
,.dbg_dq_idelay_tap_cnt (dbg_dq_idelay_tap_cnt)
,.dbg_rddata_valid (dbg_rddata_valid)
,.dbg_wrlvl_fine_tap_cnt (dbg_wrlvl_fine_tap_cnt)
,.dbg_wrlvl_coarse_tap_cnt (dbg_wrlvl_coarse_tap_cnt)
,.dbg_phy_wrlvl (dbg_phy_wrlvl)
,.ref_dll_lock (ref_dll_lock)
,.rst_phaser_ref (rst_phaser_ref)
,.iddr_rst (iddr_rst)
,.dbg_rd_data_offset (dbg_rd_data_offset)
,.dbg_phy_init (dbg_phy_init)
,.dbg_prbs_rdlvl (dbg_prbs_rdlvl)
,.dbg_dqs_found_cal (dbg_dqs_found_cal)
,.dbg_po_counter_read_val (dbg_po_counter_read_val)
,.dbg_pi_counter_read_val (dbg_pi_counter_read_val)
,.dbg_pi_phaselock_start (dbg_pi_phaselock_start)
,.dbg_pi_phaselocked_done (dbg_pi_phaselocked_done)
,.dbg_pi_phaselock_err (dbg_pi_phaselock_err)
,.dbg_pi_dqsfound_start (dbg_pi_dqsfound_start)
,.dbg_pi_dqsfound_done (dbg_pi_dqsfound_done)
,.dbg_pi_dqsfound_err (dbg_pi_dqsfound_err)
,.dbg_wrcal_start (dbg_wrcal_start)
,.dbg_wrcal_done (dbg_wrcal_done)
,.dbg_wrcal_err (dbg_wrcal_err)
,.dbg_phy_oclkdelay_cal (dbg_phy_oclkdelay_cal)
,.dbg_oclkdelay_rd_data (dbg_oclkdelay_rd_data)
,.dbg_oclkdelay_calib_start (dbg_oclkdelay_calib_start)
,.dbg_oclkdelay_calib_done (dbg_oclkdelay_calib_done)
,.prbs_final_dqs_tap_cnt_r (prbs_final_dqs_tap_cnt_r)
,.dbg_prbs_first_edge_taps (dbg_prbs_first_edge_taps)
,.dbg_prbs_second_edge_taps (dbg_prbs_second_edge_taps)
);
endmodule
|
module mig_7series_v2_3_mem_intfc #
(
parameter TCQ = 100,
parameter DDR3_VDD_OP_VOLT = "135", // Voltage mode used for DDR3
parameter PAYLOAD_WIDTH = 64,
parameter ADDR_CMD_MODE = "1T",
parameter AL = "0", // Additive Latency option
parameter BANK_WIDTH = 3, // # of bank bits
parameter BM_CNT_WIDTH = 2, // Bank machine counter width
parameter BURST_MODE = "8", // Burst length
parameter BURST_TYPE = "SEQ", // Burst type
parameter CA_MIRROR = "OFF", // C/A mirror opt for DDR3 dual rank
parameter CK_WIDTH = 1, // # of CK/CK# outputs to memory
// five fields, one per possible I/O bank, 4 bits in each field, 1 per lane
// data=1/ctl=0
parameter DATA_CTL_B0 = 4'hc,
parameter DATA_CTL_B1 = 4'hf,
parameter DATA_CTL_B2 = 4'hf,
parameter DATA_CTL_B3 = 4'hf,
parameter DATA_CTL_B4 = 4'hf,
// defines the byte lanes in I/O banks being used in the interface
// 1- Used, 0- Unused
parameter BYTE_LANES_B0 = 4'b1111,
parameter BYTE_LANES_B1 = 4'b0000,
parameter BYTE_LANES_B2 = 4'b0000,
parameter BYTE_LANES_B3 = 4'b0000,
parameter BYTE_LANES_B4 = 4'b0000,
// defines the bit lanes in I/O banks being used in the interface. Each
// parameter = 1 I/O bank = 4 byte lanes = 48 bit lanes. 1-Used, 0-Unused
parameter PHY_0_BITLANES = 48'h0000_0000_0000,
parameter PHY_1_BITLANES = 48'h0000_0000_0000,
parameter PHY_2_BITLANES = 48'h0000_0000_0000,
// control/address/data pin mapping parameters
parameter CK_BYTE_MAP
= 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00,
parameter ADDR_MAP
= 192'h000_000_000_000_000_000_000_000_000_000_000_000_000_000_000_000,
parameter BANK_MAP = 36'h000_000_000,
parameter CAS_MAP = 12'h000,
parameter CKE_ODT_BYTE_MAP = 8'h00,
parameter CKE_MAP = 96'h000_000_000_000_000_000_000_000,
parameter ODT_MAP = 96'h000_000_000_000_000_000_000_000,
parameter CKE_ODT_AUX = "FALSE",
parameter CS_MAP = 120'h000_000_000_000_000_000_000_000_000_000,
parameter PARITY_MAP = 12'h000,
parameter RAS_MAP = 12'h000,
parameter WE_MAP = 12'h000,
parameter DQS_BYTE_MAP
= 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00,
parameter DATA0_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA1_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA2_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA3_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA4_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA5_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA6_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA7_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA8_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA9_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA10_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA11_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA12_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA13_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA14_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA15_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA16_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA17_MAP = 96'h000_000_000_000_000_000_000_000,
parameter MASK0_MAP = 108'h000_000_000_000_000_000_000_000_000,
parameter MASK1_MAP = 108'h000_000_000_000_000_000_000_000_000,
// calibration Address. The address given below will be used for calibration
// read and write operations.
parameter CALIB_ROW_ADD = 16'h0000,// Calibration row address
parameter CALIB_COL_ADD = 12'h000, // Calibration column address
parameter CALIB_BA_ADD = 3'h0, // Calibration bank address
parameter CL = 5,
parameter COL_WIDTH = 12, // column address width
parameter CMD_PIPE_PLUS1 = "ON", // add pipeline stage between MC and PHY
parameter CS_WIDTH = 1, // # of unique CS outputs
parameter CKE_WIDTH = 1, // # of cke outputs
parameter CWL = 5,
parameter DATA_WIDTH = 64,
parameter DATA_BUF_ADDR_WIDTH = 8,
parameter DATA_BUF_OFFSET_WIDTH = 1,
parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2
parameter DM_WIDTH = 8, // # of DM (data mask)
parameter DQ_CNT_WIDTH = 6, // = ceil(log2(DQ_WIDTH))
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_TYPE = "DDR3",
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter ECC = "OFF",
parameter ECC_WIDTH = 8,
parameter MC_ERR_ADDR_WIDTH = 31,
parameter nAL = 0, // Additive latency (in clk cyc)
parameter nBANK_MACHS = 4,
parameter PRE_REV3ES = "OFF", // Delay O/Ps using Phaser_Out fine dly
parameter nCK_PER_CLK = 4, // # of memory CKs per fabric CLK
parameter nCS_PER_RANK = 1, // # of unique CS outputs per rank
// Hard PHY parameters
parameter PHYCTL_CMD_FIFO = "FALSE",
parameter ORDERING = "NORM",
parameter PHASE_DETECT = "OFF" , // to phy_top
parameter IBUF_LPWR_MODE = "OFF", // to phy_top
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter DATA_IO_PRIM_TYPE = "DEFAULT", // # = "HP_LP", "HR_LP", "DEFAULT"
parameter DATA_IO_IDLE_PWRDWN = "ON", // "ON" or "OFF"
parameter IODELAY_GRP = "IODELAY_MIG", //to phy_top
parameter FPGA_SPEED_GRADE = 1,
parameter OUTPUT_DRV = "HIGH" , // to phy_top
parameter REG_CTRL = "OFF" , // to phy_top
parameter RTT_NOM = "60" , // to phy_top
parameter RTT_WR = "120" , // to phy_top
parameter STARVE_LIMIT = 2,
parameter tCK = 2500, // pS
parameter tCKE = 10000, // pS
parameter tFAW = 40000, // pS
parameter tPRDI = 1_000_000, // pS
parameter tRAS = 37500, // pS
parameter tRCD = 12500, // pS
parameter tREFI = 7800000, // pS
parameter tRFC = 110000, // pS
parameter tRP = 12500, // pS
parameter tRRD = 10000, // pS
parameter tRTP = 7500, // pS
parameter tWTR = 7500, // pS
parameter tZQI = 128_000_000, // nS
parameter tZQCS = 64, // CKs
parameter WRLVL = "OFF" , // to phy_top
parameter DEBUG_PORT = "OFF" , // to phy_top
parameter CAL_WIDTH = "HALF" , // to phy_top
parameter RANK_WIDTH = 1,
parameter RANKS = 4,
parameter ODT_WIDTH = 1,
parameter ROW_WIDTH = 16, // DRAM address bus width
parameter [7:0] SLOT_0_CONFIG = 8'b0000_0001,
parameter [7:0] SLOT_1_CONFIG = 8'b0000_0000,
parameter SIM_BYPASS_INIT_CAL = "OFF",
parameter REFCLK_FREQ = 300.0,
parameter nDQS_COL0 = DQS_WIDTH,
parameter nDQS_COL1 = 0,
parameter nDQS_COL2 = 0,
parameter nDQS_COL3 = 0,
parameter DQS_LOC_COL0 = 144'h11100F0E0D0C0B0A09080706050403020100,
parameter DQS_LOC_COL1 = 0,
parameter DQS_LOC_COL2 = 0,
parameter DQS_LOC_COL3 = 0,
parameter USE_CS_PORT = 1, // Support chip select output
parameter USE_DM_PORT = 1, // Support data mask output
parameter USE_ODT_PORT = 1, // Support ODT output
parameter MASTER_PHY_CTL = 0, // The bank number where master PHY_CONTROL resides
parameter USER_REFRESH = "OFF", // Choose whether MC or User manages REF
parameter TEMP_MON_EN = "ON", // Enable/disable temperature monitoring
parameter IDELAY_ADJ = "ON", // Adjust IDELAY value (-1)
parameter FINE_PER_BIT = "ON", // Use finedelay per-bit de-skew
parameter CENTER_COMP_MODE = "ON", // Use Center compensation table for PI
parameter PI_VAL_ADJ = "ON", // Adjust PI final value (-1)
parameter TAPSPERKCLK = 56
)
(
input clk_ref,
input freq_refclk,
input mem_refclk,
input pll_lock,
input sync_pulse,
input mmcm_ps_clk,
input poc_sample_pd,
input error,
input reset,
output rst_tg_mc,
input [BANK_WIDTH-1:0] bank, // To mc0 of mc.v
input clk ,
input [2:0] cmd, // To mc0 of mc.v
input [COL_WIDTH-1:0] col, // To mc0 of mc.v
input correct_en,
input [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr, // To mc0 of mc.v
input dbg_idel_down_all,
input dbg_idel_down_cpt,
input dbg_idel_up_all,
input dbg_idel_up_cpt,
input dbg_sel_all_idel_cpt,
input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt,
input hi_priority, // To mc0 of mc.v
input [RANK_WIDTH-1:0] rank, // To mc0 of mc.v
input [2*nCK_PER_CLK-1:0] raw_not_ecc,
input [ROW_WIDTH-1:0] row, // To mc0 of mc.v
input rst, // To mc0 of mc.v, ...
input size, // To mc0 of mc.v
input [7:0] slot_0_present, // To mc0 of mc.v
input [7:0] slot_1_present, // To mc0 of mc.v
input use_addr, // To mc0 of mc.v
input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data,
input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask,
output accept, // From mc0 of mc.v
output accept_ns, // From mc0 of mc.v
output [BM_CNT_WIDTH-1:0] bank_mach_next, // From mc0 of mc.v
input app_sr_req,
output app_sr_active,
input app_ref_req,
output app_ref_ack,
input app_zq_req,
output app_zq_ack,
output [255:0] dbg_calib_top,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt,
output [255:0] dbg_phy_rdlvl,
output [99:0] dbg_phy_wrcal,
output [6*DQS_WIDTH-1:0] dbg_final_po_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_final_po_coarse_tap_cnt,
output [DQS_WIDTH-1:0] dbg_rd_data_edge_detect,
output [2*nCK_PER_CLK*DQ_WIDTH-1:0] dbg_rddata,
output [1:0] dbg_rdlvl_done,
output [1:0] dbg_rdlvl_err,
output [1:0] dbg_rdlvl_start,
output [5:0] dbg_tap_cnt_during_wrlvl,
output dbg_wl_edge_detect_valid,
output dbg_wrlvl_done,
output dbg_wrlvl_err,
output dbg_wrlvl_start,
output [ROW_WIDTH-1:0] ddr_addr, // From phy_top0 of phy_top.v
output [BANK_WIDTH-1:0] ddr_ba, // From phy_top0 of phy_top.v
output ddr_cas_n, // From phy_top0 of phy_top.v
output [CK_WIDTH-1:0] ddr_ck_n, // From phy_top0 of phy_top.v
output [CK_WIDTH-1:0] ddr_ck , // From phy_top0 of phy_top.v
output [CKE_WIDTH-1:0] ddr_cke, // From phy_top0 of phy_top.v
output [CS_WIDTH*nCS_PER_RANK-1:0] ddr_cs_n, // From phy_top0 of phy_top.v
output [DM_WIDTH-1:0] ddr_dm, // From phy_top0 of phy_top.v
output [ODT_WIDTH-1:0] ddr_odt, // From phy_top0 of phy_top.v
output ddr_ras_n, // From phy_top0 of phy_top.v
output ddr_reset_n, // From phy_top0 of phy_top.v
output ddr_parity,
output ddr_we_n, // From phy_top0 of phy_top.v
output init_calib_complete,
output init_wrcal_complete,
output [MC_ERR_ADDR_WIDTH-1:0] ecc_err_addr,
output [2*nCK_PER_CLK-1:0] ecc_multiple,
output [2*nCK_PER_CLK-1:0] ecc_single,
output wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data,
output [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr,
// From mc0 of mc.v
output rd_data_en, // From mc0 of mc.v
output rd_data_end, // From mc0 of mc.v
output [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset, // From mc0 of mc.v
output [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr, // From mc0 of mc.v
output wr_data_en, // From mc0 of mc.v
output [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset, // From mc0 of mc.v
inout [DQ_WIDTH-1:0] ddr_dq, // To/From phy_top0 of phy_top.v
inout [DQS_WIDTH-1:0] ddr_dqs_n, // To/From phy_top0 of phy_top.v
inout [DQS_WIDTH-1:0] ddr_dqs // To/From phy_top0 of phy_top.v
,input [11:0] device_temp
//phase shift clock control
,output psen
,output psincdec
,input psdone
,input [DQ_WIDTH/8-1:0] fi_xor_we
,input [DQ_WIDTH-1:0] fi_xor_wrdata
,input dbg_sel_pi_incdec
,input dbg_sel_po_incdec
,input [DQS_CNT_WIDTH:0] dbg_byte_sel
,input dbg_pi_f_inc
,input dbg_pi_f_dec
,input dbg_po_f_inc
,input dbg_po_f_stg23_sel
,input dbg_po_f_dec
,output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt
,output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt
,output dbg_rddata_valid
,output [6*DQS_WIDTH-1:0] dbg_wrlvl_fine_tap_cnt
,output [3*DQS_WIDTH-1:0] dbg_wrlvl_coarse_tap_cnt
,output [255:0] dbg_phy_wrlvl
,output [5:0] dbg_pi_counter_read_val
,output [8:0] dbg_po_counter_read_val
,output ref_dll_lock
,input rst_phaser_ref
,input iddr_rst
,output [6*RANKS-1:0] dbg_rd_data_offset
,output [255:0] dbg_phy_init
,output [255:0] dbg_prbs_rdlvl
,output [255:0] dbg_dqs_found_cal
,output dbg_pi_phaselock_start
,output dbg_pi_phaselocked_done
,output dbg_pi_phaselock_err
,output dbg_pi_dqsfound_start
,output dbg_pi_dqsfound_done
,output dbg_pi_dqsfound_err
,output dbg_wrcal_start
,output dbg_wrcal_done
,output dbg_wrcal_err
,output [11:0] dbg_pi_dqs_found_lanes_phy4lanes
,output [11:0] dbg_pi_phase_locked_phy4lanes
,output [6*RANKS-1:0] dbg_calib_rd_data_offset_1
,output [6*RANKS-1:0] dbg_calib_rd_data_offset_2
,output [5:0] dbg_data_offset
,output [5:0] dbg_data_offset_1
,output [5:0] dbg_data_offset_2
,output dbg_oclkdelay_calib_start
,output dbg_oclkdelay_calib_done
,output [255:0] dbg_phy_oclkdelay_cal
,output [DRAM_WIDTH*16 -1:0]dbg_oclkdelay_rd_data
,output [6*DQS_WIDTH*RANKS-1:0] prbs_final_dqs_tap_cnt_r
,output [6*DQS_WIDTH*RANKS-1:0] dbg_prbs_first_edge_taps
,output [6*DQS_WIDTH*RANKS-1:0] dbg_prbs_second_edge_taps
);
localparam nSLOTS = 1 + (|SLOT_1_CONFIG ? 1 : 0);
localparam SLOT_0_CONFIG_MC = (nSLOTS == 2)? 8'b0000_0101 : 8'b0000_1111;
localparam SLOT_1_CONFIG_MC = (nSLOTS == 2)? 8'b0000_1010 : 8'b0000_0000;
// 8*tREFI in ps is divided by fabric clock period also in ps. 270 is the number
// of fabric clock cycles that accounts for the Writes, read, and PRECHARGE time
localparam REFRESH_TIMER = (8*tREFI/(tCK*nCK_PER_CLK)) - 270;
reg [7:0] slot_0_present_mc;
reg [7:0] slot_1_present_mc;
reg user_periodic_rd_req = 1'b0;
reg user_ref_req = 1'b0;
reg user_zq_req = 1'b0;
// MC/PHY interface
wire [nCK_PER_CLK-1:0] mc_ras_n;
wire [nCK_PER_CLK-1:0] mc_cas_n;
wire [nCK_PER_CLK-1:0] mc_we_n;
wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address;
wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank;
wire [nCK_PER_CLK-1 :0] mc_cke ;
wire [1:0] mc_odt ;
wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n;
wire mc_reset_n;
wire [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata;
wire [2*nCK_PER_CLK*DQ_WIDTH/8-1:0] mc_wrdata_mask;
wire mc_wrdata_en;
wire mc_ref_zq_wip;
wire tempmon_sample_en;
wire idle;
wire mc_cmd_wren;
wire mc_ctl_wren;
wire [2:0] mc_cmd;
wire [1:0] mc_cas_slot;
wire [5:0] mc_data_offset;
wire [5:0] mc_data_offset_1;
wire [5:0] mc_data_offset_2;
wire [3:0] mc_aux_out0;
wire [3:0] mc_aux_out1;
wire [1:0] mc_rank_cnt;
wire phy_mc_ctl_full;
wire phy_mc_cmd_full;
wire phy_mc_data_full;
wire [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_rd_data;
wire phy_rddata_valid;
wire [6*RANKS-1:0] calib_rd_data_offset_0;
wire [6*RANKS-1:0] calib_rd_data_offset_1;
wire [6*RANKS-1:0] calib_rd_data_offset_2;
wire init_calib_complete_w;
wire init_wrcal_complete_w;
wire mux_rst;
wire mux_calib_complete;
// assigning CWL = CL -1 for DDR2. DDR2 customers will not know anything
// about CWL. There is also nCWL parameter. Need to clean it up.
localparam CWL_T = (DRAM_TYPE == "DDR3") ? CWL : CL-1;
assign init_calib_complete = init_calib_complete_w;
assign init_wrcal_complete = init_wrcal_complete_w;
assign mux_calib_complete = (PRE_REV3ES == "OFF") ? init_calib_complete_w :
(init_calib_complete_w | init_wrcal_complete_w);
assign mux_rst = (PRE_REV3ES == "OFF") ? rst : reset;
assign dbg_calib_rd_data_offset_1 = calib_rd_data_offset_1;
assign dbg_calib_rd_data_offset_2 = calib_rd_data_offset_2;
assign dbg_data_offset = mc_data_offset;
assign dbg_data_offset_1 = mc_data_offset_1;
assign dbg_data_offset_2 = mc_data_offset_2;
// Enable / disable temperature monitoring
assign tempmon_sample_en = TEMP_MON_EN == "OFF" ? 1'b0 : mc_ref_zq_wip;
generate
if (nSLOTS == 1) begin: gen_single_slot_odt
always @ (slot_0_present or slot_1_present) begin
slot_0_present_mc = slot_0_present;
slot_1_present_mc = slot_1_present;
end
end else if (nSLOTS == 2) begin: gen_dual_slot_odt
always @ (slot_0_present[0] or slot_0_present[1]
or slot_1_present[0] or slot_1_present[1]) begin
case ({slot_0_present[0],slot_0_present[1],
slot_1_present[0],slot_1_present[1]})
//Two slot configuration, one slot present, single rank
4'b1000: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_0000;
end
4'b0010: begin
slot_0_present_mc = 8'b0000_0000;
slot_1_present_mc = 8'b0000_0010;
end
// Two slot configuration, one slot present, dual rank
4'b1100: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_0000;
end
4'b0011: begin
slot_0_present_mc = 8'b0000_0000;
slot_1_present_mc = 8'b0000_1010;
end
// Two slot configuration, one rank per slot
4'b1010: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_0010;
end
// Two Slots - One slot with dual rank and the other with single rank
4'b1011: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_1010;
end
4'b1110: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_0010;
end
// Two Slots - two ranks per slot
4'b1111: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_1010;
end
endcase
end
end
endgenerate
mig_7series_v2_3_mc #
(
.TCQ (TCQ),
.PAYLOAD_WIDTH (PAYLOAD_WIDTH),
.MC_ERR_ADDR_WIDTH (MC_ERR_ADDR_WIDTH),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BANK_WIDTH (BANK_WIDTH),
.BM_CNT_WIDTH (BM_CNT_WIDTH),
.BURST_MODE (BURST_MODE),
.COL_WIDTH (COL_WIDTH),
.CMD_PIPE_PLUS1 (CMD_PIPE_PLUS1),
.CS_WIDTH (CS_WIDTH),
.DATA_WIDTH (DATA_WIDTH),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.DATA_BUF_OFFSET_WIDTH (DATA_BUF_OFFSET_WIDTH),
.DRAM_TYPE (DRAM_TYPE),
.CKE_ODT_AUX (CKE_ODT_AUX),
.DQS_WIDTH (DQS_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.ECC (ECC),
.ECC_WIDTH (ECC_WIDTH),
.nBANK_MACHS (nBANK_MACHS),
.nCK_PER_CLK (nCK_PER_CLK),
.nSLOTS (nSLOTS),
.CL (CL),
.nCS_PER_RANK (nCS_PER_RANK),
.CWL (CWL_T),
.ORDERING (ORDERING),
.RANK_WIDTH (RANK_WIDTH),
.RANKS (RANKS),
.REG_CTRL (REG_CTRL),
.ROW_WIDTH (ROW_WIDTH),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.STARVE_LIMIT (STARVE_LIMIT),
.SLOT_0_CONFIG (SLOT_0_CONFIG_MC),
.SLOT_1_CONFIG (SLOT_1_CONFIG_MC),
.tCK (tCK),
.tCKE (tCKE),
.tFAW (tFAW),
.tRAS (tRAS),
.tRCD (tRCD),
.tREFI (tREFI),
.tRFC (tRFC),
.tRP (tRP),
.tRRD (tRRD),
.tRTP (tRTP),
.tWTR (tWTR),
.tZQI (tZQI),
.tZQCS (tZQCS),
.tPRDI (tPRDI),
.USER_REFRESH (USER_REFRESH))
mc0
(.app_periodic_rd_req (1'b0),
.app_sr_req (app_sr_req),
.app_sr_active (app_sr_active),
.app_ref_req (app_ref_req),
.app_ref_ack (app_ref_ack),
.app_zq_req (app_zq_req),
.app_zq_ack (app_zq_ack),
.ecc_single (ecc_single),
.ecc_multiple (ecc_multiple),
.ecc_err_addr (ecc_err_addr),
.mc_address (mc_address),
.mc_aux_out0 (mc_aux_out0),
.mc_aux_out1 (mc_aux_out1),
.mc_bank (mc_bank),
.mc_cke (mc_cke),
.mc_odt (mc_odt),
.mc_cas_n (mc_cas_n),
.mc_cmd (mc_cmd),
.mc_cmd_wren (mc_cmd_wren),
.mc_cs_n (mc_cs_n),
.mc_ctl_wren (mc_ctl_wren),
.mc_data_offset (mc_data_offset),
.mc_data_offset_1 (mc_data_offset_1),
.mc_data_offset_2 (mc_data_offset_2),
.mc_cas_slot (mc_cas_slot),
.mc_rank_cnt (mc_rank_cnt),
.mc_ras_n (mc_ras_n),
.mc_reset_n (mc_reset_n),
.mc_we_n (mc_we_n),
.mc_wrdata (mc_wrdata),
.mc_wrdata_en (mc_wrdata_en),
.mc_wrdata_mask (mc_wrdata_mask),
// Outputs
.accept (accept),
.accept_ns (accept_ns),
.bank_mach_next (bank_mach_next[BM_CNT_WIDTH-1:0]),
.rd_data_addr (rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.rd_data_en (rd_data_en),
.rd_data_end (rd_data_end),
.rd_data_offset (rd_data_offset),
.wr_data_addr (wr_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.wr_data_en (wr_data_en),
.wr_data_offset (wr_data_offset),
.rd_data (rd_data),
.wr_data (wr_data),
.wr_data_mask (wr_data_mask),
.mc_read_idle (idle),
.mc_ref_zq_wip (mc_ref_zq_wip),
// Inputs
.init_calib_complete (mux_calib_complete),
.calib_rd_data_offset (calib_rd_data_offset_0),
.calib_rd_data_offset_1 (calib_rd_data_offset_1),
.calib_rd_data_offset_2 (calib_rd_data_offset_2),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_data_full (phy_mc_data_full),
.phy_rd_data (phy_rd_data),
.phy_rddata_valid (phy_rddata_valid),
.correct_en (correct_en),
.bank (bank[BANK_WIDTH-1:0]),
.clk (clk),
.cmd (cmd[2:0]),
.col (col[COL_WIDTH-1:0]),
.data_buf_addr (data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.hi_priority (hi_priority),
.rank (rank[RANK_WIDTH-1:0]),
.raw_not_ecc (raw_not_ecc[2*nCK_PER_CLK-1 :0]),
.row (row[ROW_WIDTH-1:0]),
.rst (mux_rst),
.size (size),
.slot_0_present (slot_0_present_mc[7:0]),
.slot_1_present (slot_1_present_mc[7:0]),
.fi_xor_we (fi_xor_we),
.fi_xor_wrdata (fi_xor_wrdata),
.use_addr (use_addr));
// following calculations should be moved inside PHY
// odt bus should be added to PHY.
localparam CLK_PERIOD = tCK * nCK_PER_CLK;
localparam nCL = CL;
localparam nCWL = CWL_T;
`ifdef MC_SVA
ddr2_improper_CL: assert property
(@(posedge clk) (~((DRAM_TYPE == "DDR2") && ((CL > 6) || (CL < 3)))));
// Not needed after the CWL fix for DDR2
// ddr2_improper_CWL: assert property
// (@(posedge clk) (~((DRAM_TYPE == "DDR2") && ((CL - CWL) != 1))));
`endif
mig_7series_v2_3_ddr_phy_top #
(
.TCQ (TCQ),
.DDR3_VDD_OP_VOLT (DDR3_VDD_OP_VOLT),
.REFCLK_FREQ (REFCLK_FREQ),
.BYTE_LANES_B0 (BYTE_LANES_B0),
.BYTE_LANES_B1 (BYTE_LANES_B1),
.BYTE_LANES_B2 (BYTE_LANES_B2),
.BYTE_LANES_B3 (BYTE_LANES_B3),
.BYTE_LANES_B4 (BYTE_LANES_B4),
.PHY_0_BITLANES (PHY_0_BITLANES),
.PHY_1_BITLANES (PHY_1_BITLANES),
.PHY_2_BITLANES (PHY_2_BITLANES),
.CA_MIRROR (CA_MIRROR),
.CK_BYTE_MAP (CK_BYTE_MAP),
.ADDR_MAP (ADDR_MAP),
.BANK_MAP (BANK_MAP),
.CAS_MAP (CAS_MAP),
.CKE_ODT_BYTE_MAP (CKE_ODT_BYTE_MAP),
.CKE_MAP (CKE_MAP),
.ODT_MAP (ODT_MAP),
.CKE_ODT_AUX (CKE_ODT_AUX),
.CS_MAP (CS_MAP),
.PARITY_MAP (PARITY_MAP),
.RAS_MAP (RAS_MAP),
.WE_MAP (WE_MAP),
.DQS_BYTE_MAP (DQS_BYTE_MAP),
.DATA0_MAP (DATA0_MAP),
.DATA1_MAP (DATA1_MAP),
.DATA2_MAP (DATA2_MAP),
.DATA3_MAP (DATA3_MAP),
.DATA4_MAP (DATA4_MAP),
.DATA5_MAP (DATA5_MAP),
.DATA6_MAP (DATA6_MAP),
.DATA7_MAP (DATA7_MAP),
.DATA8_MAP (DATA8_MAP),
.DATA9_MAP (DATA9_MAP),
.DATA10_MAP (DATA10_MAP),
.DATA11_MAP (DATA11_MAP),
.DATA12_MAP (DATA12_MAP),
.DATA13_MAP (DATA13_MAP),
.DATA14_MAP (DATA14_MAP),
.DATA15_MAP (DATA15_MAP),
.DATA16_MAP (DATA16_MAP),
.DATA17_MAP (DATA17_MAP),
.MASK0_MAP (MASK0_MAP),
.MASK1_MAP (MASK1_MAP),
.CALIB_ROW_ADD (CALIB_ROW_ADD),
.CALIB_COL_ADD (CALIB_COL_ADD),
.CALIB_BA_ADD (CALIB_BA_ADD),
.nCS_PER_RANK (nCS_PER_RANK),
.CS_WIDTH (CS_WIDTH),
.nCK_PER_CLK (nCK_PER_CLK),
.PRE_REV3ES (PRE_REV3ES),
.CKE_WIDTH (CKE_WIDTH),
.DATA_CTL_B0 (DATA_CTL_B0),
.DATA_CTL_B1 (DATA_CTL_B1),
.DATA_CTL_B2 (DATA_CTL_B2),
.DATA_CTL_B3 (DATA_CTL_B3),
.DATA_CTL_B4 (DATA_CTL_B4),
.DDR2_DQSN_ENABLE (DDR2_DQSN_ENABLE),
.DRAM_TYPE (DRAM_TYPE),
.BANK_WIDTH (BANK_WIDTH),
.CK_WIDTH (CK_WIDTH),
.COL_WIDTH (COL_WIDTH),
.DM_WIDTH (DM_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.DQS_CNT_WIDTH (DQS_CNT_WIDTH),
.DQS_WIDTH (DQS_WIDTH),
.DRAM_WIDTH (DRAM_WIDTH),
.PHYCTL_CMD_FIFO (PHYCTL_CMD_FIFO),
.ROW_WIDTH (ROW_WIDTH),
.AL (AL),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BURST_MODE (BURST_MODE),
.BURST_TYPE (BURST_TYPE),
.CL (nCL),
.CWL (nCWL),
.tRFC (tRFC),
.tREFI (tREFI),
.tCK (tCK),
.OUTPUT_DRV (OUTPUT_DRV),
.RANKS (RANKS),
.ODT_WIDTH (ODT_WIDTH),
.REG_CTRL (REG_CTRL),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.SLOT_1_CONFIG (SLOT_1_CONFIG),
.WRLVL (WRLVL),
.BANK_TYPE (BANK_TYPE),
.DATA_IO_PRIM_TYPE (DATA_IO_PRIM_TYPE),
.DATA_IO_IDLE_PWRDWN(DATA_IO_IDLE_PWRDWN),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
// Prevent the following simulation-related parameters from
// being overridden for synthesis - for synthesis only the
// default values of these parameters should be used
// synthesis translate_off
.SIM_BYPASS_INIT_CAL (SIM_BYPASS_INIT_CAL),
// synthesis translate_on
.USE_CS_PORT (USE_CS_PORT),
.USE_DM_PORT (USE_DM_PORT),
.USE_ODT_PORT (USE_ODT_PORT),
.MASTER_PHY_CTL (MASTER_PHY_CTL),
.DEBUG_PORT (DEBUG_PORT),
.IDELAY_ADJ (IDELAY_ADJ),
.FINE_PER_BIT (FINE_PER_BIT),
.CENTER_COMP_MODE (CENTER_COMP_MODE),
.PI_VAL_ADJ (PI_VAL_ADJ),
.TAPSPERKCLK (TAPSPERKCLK)
)
ddr_phy_top0
(
// Outputs
.calib_rd_data_offset_0 (calib_rd_data_offset_0),
.calib_rd_data_offset_1 (calib_rd_data_offset_1),
.calib_rd_data_offset_2 (calib_rd_data_offset_2),
.ddr_ck (ddr_ck),
.ddr_ck_n (ddr_ck_n),
.ddr_addr (ddr_addr),
.ddr_ba (ddr_ba),
.ddr_ras_n (ddr_ras_n),
.ddr_cas_n (ddr_cas_n),
.ddr_we_n (ddr_we_n),
.ddr_cs_n (ddr_cs_n),
.ddr_cke (ddr_cke),
.ddr_odt (ddr_odt),
.ddr_reset_n (ddr_reset_n),
.ddr_parity (ddr_parity),
.ddr_dm (ddr_dm),
.dbg_calib_top (dbg_calib_top),
.dbg_cpt_first_edge_cnt (dbg_cpt_first_edge_cnt),
.dbg_cpt_second_edge_cnt (dbg_cpt_second_edge_cnt),
.dbg_phy_rdlvl (dbg_phy_rdlvl),
.dbg_phy_wrcal (dbg_phy_wrcal),
.dbg_final_po_fine_tap_cnt (dbg_final_po_fine_tap_cnt),
.dbg_final_po_coarse_tap_cnt (dbg_final_po_coarse_tap_cnt),
.dbg_rd_data_edge_detect (dbg_rd_data_edge_detect),
.dbg_rddata (dbg_rddata),
.dbg_rdlvl_done (dbg_rdlvl_done),
.dbg_rdlvl_err (dbg_rdlvl_err),
.dbg_rdlvl_start (dbg_rdlvl_start),
.dbg_tap_cnt_during_wrlvl (dbg_tap_cnt_during_wrlvl),
.dbg_wl_edge_detect_valid (dbg_wl_edge_detect_valid),
.dbg_wrlvl_done (dbg_wrlvl_done),
.dbg_wrlvl_err (dbg_wrlvl_err),
.dbg_wrlvl_start (dbg_wrlvl_start),
.dbg_pi_phase_locked_phy4lanes (dbg_pi_phase_locked_phy4lanes),
.dbg_pi_dqs_found_lanes_phy4lanes (dbg_pi_dqs_found_lanes_phy4lanes),
.init_calib_complete (init_calib_complete_w),
.init_wrcal_complete (init_wrcal_complete_w),
.mc_address (mc_address),
.mc_aux_out0 (mc_aux_out0),
.mc_aux_out1 (mc_aux_out1),
.mc_bank (mc_bank),
.mc_cke (mc_cke),
.mc_odt (mc_odt),
.mc_cas_n (mc_cas_n),
.mc_cmd (mc_cmd),
.mc_cmd_wren (mc_cmd_wren),
.mc_cas_slot (mc_cas_slot),
.mc_cs_n (mc_cs_n),
.mc_ctl_wren (mc_ctl_wren),
.mc_data_offset (mc_data_offset),
.mc_data_offset_1 (mc_data_offset_1),
.mc_data_offset_2 (mc_data_offset_2),
.mc_rank_cnt (mc_rank_cnt),
.mc_ras_n (mc_ras_n),
.mc_reset_n (mc_reset_n),
.mc_we_n (mc_we_n),
.mc_wrdata (mc_wrdata),
.mc_wrdata_en (mc_wrdata_en),
.mc_wrdata_mask (mc_wrdata_mask),
.idle (idle),
.mem_refclk (mem_refclk),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_data_full (phy_mc_data_full),
.phy_rd_data (phy_rd_data),
.phy_rddata_valid (phy_rddata_valid),
.pll_lock (pll_lock),
.sync_pulse (sync_pulse),
// Inouts
.ddr_dqs (ddr_dqs),
.ddr_dqs_n (ddr_dqs_n),
.ddr_dq (ddr_dq),
// Inputs
.clk_ref (clk_ref),
.freq_refclk (freq_refclk),
.clk (clk),
.mmcm_ps_clk (mmcm_ps_clk),
.poc_sample_pd (poc_sample_pd),
.rst (rst),
.error (error),
.rst_tg_mc (rst_tg_mc),
.slot_0_present (slot_0_present),
.slot_1_present (slot_1_present),
.dbg_idel_up_all (dbg_idel_up_all),
.dbg_idel_down_all (dbg_idel_down_all),
.dbg_idel_up_cpt (dbg_idel_up_cpt),
.dbg_idel_down_cpt (dbg_idel_down_cpt),
.dbg_sel_idel_cpt (dbg_sel_idel_cpt),
.dbg_sel_all_idel_cpt (dbg_sel_all_idel_cpt)
,.device_temp (device_temp)
,.tempmon_sample_en (tempmon_sample_en)
,.psen (psen)
,.psincdec (psincdec)
,.psdone (psdone)
,.dbg_sel_pi_incdec (dbg_sel_pi_incdec)
,.dbg_sel_po_incdec (dbg_sel_po_incdec)
,.dbg_byte_sel (dbg_byte_sel)
,.dbg_pi_f_inc (dbg_pi_f_inc)
,.dbg_po_f_inc (dbg_po_f_inc)
,.dbg_po_f_stg23_sel (dbg_po_f_stg23_sel)
,.dbg_pi_f_dec (dbg_pi_f_dec)
,.dbg_po_f_dec (dbg_po_f_dec)
,.dbg_cpt_tap_cnt (dbg_cpt_tap_cnt)
,.dbg_dq_idelay_tap_cnt (dbg_dq_idelay_tap_cnt)
,.dbg_rddata_valid (dbg_rddata_valid)
,.dbg_wrlvl_fine_tap_cnt (dbg_wrlvl_fine_tap_cnt)
,.dbg_wrlvl_coarse_tap_cnt (dbg_wrlvl_coarse_tap_cnt)
,.dbg_phy_wrlvl (dbg_phy_wrlvl)
,.ref_dll_lock (ref_dll_lock)
,.rst_phaser_ref (rst_phaser_ref)
,.iddr_rst (iddr_rst)
,.dbg_rd_data_offset (dbg_rd_data_offset)
,.dbg_phy_init (dbg_phy_init)
,.dbg_prbs_rdlvl (dbg_prbs_rdlvl)
,.dbg_dqs_found_cal (dbg_dqs_found_cal)
,.dbg_po_counter_read_val (dbg_po_counter_read_val)
,.dbg_pi_counter_read_val (dbg_pi_counter_read_val)
,.dbg_pi_phaselock_start (dbg_pi_phaselock_start)
,.dbg_pi_phaselocked_done (dbg_pi_phaselocked_done)
,.dbg_pi_phaselock_err (dbg_pi_phaselock_err)
,.dbg_pi_dqsfound_start (dbg_pi_dqsfound_start)
,.dbg_pi_dqsfound_done (dbg_pi_dqsfound_done)
,.dbg_pi_dqsfound_err (dbg_pi_dqsfound_err)
,.dbg_wrcal_start (dbg_wrcal_start)
,.dbg_wrcal_done (dbg_wrcal_done)
,.dbg_wrcal_err (dbg_wrcal_err)
,.dbg_phy_oclkdelay_cal (dbg_phy_oclkdelay_cal)
,.dbg_oclkdelay_rd_data (dbg_oclkdelay_rd_data)
,.dbg_oclkdelay_calib_start (dbg_oclkdelay_calib_start)
,.dbg_oclkdelay_calib_done (dbg_oclkdelay_calib_done)
,.prbs_final_dqs_tap_cnt_r (prbs_final_dqs_tap_cnt_r)
,.dbg_prbs_first_edge_taps (dbg_prbs_first_edge_taps)
,.dbg_prbs_second_edge_taps (dbg_prbs_second_edge_taps)
);
endmodule
|
module mig_7series_v2_3_mem_intfc #
(
parameter TCQ = 100,
parameter DDR3_VDD_OP_VOLT = "135", // Voltage mode used for DDR3
parameter PAYLOAD_WIDTH = 64,
parameter ADDR_CMD_MODE = "1T",
parameter AL = "0", // Additive Latency option
parameter BANK_WIDTH = 3, // # of bank bits
parameter BM_CNT_WIDTH = 2, // Bank machine counter width
parameter BURST_MODE = "8", // Burst length
parameter BURST_TYPE = "SEQ", // Burst type
parameter CA_MIRROR = "OFF", // C/A mirror opt for DDR3 dual rank
parameter CK_WIDTH = 1, // # of CK/CK# outputs to memory
// five fields, one per possible I/O bank, 4 bits in each field, 1 per lane
// data=1/ctl=0
parameter DATA_CTL_B0 = 4'hc,
parameter DATA_CTL_B1 = 4'hf,
parameter DATA_CTL_B2 = 4'hf,
parameter DATA_CTL_B3 = 4'hf,
parameter DATA_CTL_B4 = 4'hf,
// defines the byte lanes in I/O banks being used in the interface
// 1- Used, 0- Unused
parameter BYTE_LANES_B0 = 4'b1111,
parameter BYTE_LANES_B1 = 4'b0000,
parameter BYTE_LANES_B2 = 4'b0000,
parameter BYTE_LANES_B3 = 4'b0000,
parameter BYTE_LANES_B4 = 4'b0000,
// defines the bit lanes in I/O banks being used in the interface. Each
// parameter = 1 I/O bank = 4 byte lanes = 48 bit lanes. 1-Used, 0-Unused
parameter PHY_0_BITLANES = 48'h0000_0000_0000,
parameter PHY_1_BITLANES = 48'h0000_0000_0000,
parameter PHY_2_BITLANES = 48'h0000_0000_0000,
// control/address/data pin mapping parameters
parameter CK_BYTE_MAP
= 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00,
parameter ADDR_MAP
= 192'h000_000_000_000_000_000_000_000_000_000_000_000_000_000_000_000,
parameter BANK_MAP = 36'h000_000_000,
parameter CAS_MAP = 12'h000,
parameter CKE_ODT_BYTE_MAP = 8'h00,
parameter CKE_MAP = 96'h000_000_000_000_000_000_000_000,
parameter ODT_MAP = 96'h000_000_000_000_000_000_000_000,
parameter CKE_ODT_AUX = "FALSE",
parameter CS_MAP = 120'h000_000_000_000_000_000_000_000_000_000,
parameter PARITY_MAP = 12'h000,
parameter RAS_MAP = 12'h000,
parameter WE_MAP = 12'h000,
parameter DQS_BYTE_MAP
= 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00,
parameter DATA0_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA1_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA2_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA3_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA4_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA5_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA6_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA7_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA8_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA9_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA10_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA11_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA12_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA13_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA14_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA15_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA16_MAP = 96'h000_000_000_000_000_000_000_000,
parameter DATA17_MAP = 96'h000_000_000_000_000_000_000_000,
parameter MASK0_MAP = 108'h000_000_000_000_000_000_000_000_000,
parameter MASK1_MAP = 108'h000_000_000_000_000_000_000_000_000,
// calibration Address. The address given below will be used for calibration
// read and write operations.
parameter CALIB_ROW_ADD = 16'h0000,// Calibration row address
parameter CALIB_COL_ADD = 12'h000, // Calibration column address
parameter CALIB_BA_ADD = 3'h0, // Calibration bank address
parameter CL = 5,
parameter COL_WIDTH = 12, // column address width
parameter CMD_PIPE_PLUS1 = "ON", // add pipeline stage between MC and PHY
parameter CS_WIDTH = 1, // # of unique CS outputs
parameter CKE_WIDTH = 1, // # of cke outputs
parameter CWL = 5,
parameter DATA_WIDTH = 64,
parameter DATA_BUF_ADDR_WIDTH = 8,
parameter DATA_BUF_OFFSET_WIDTH = 1,
parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2
parameter DM_WIDTH = 8, // # of DM (data mask)
parameter DQ_CNT_WIDTH = 6, // = ceil(log2(DQ_WIDTH))
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_TYPE = "DDR3",
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter ECC = "OFF",
parameter ECC_WIDTH = 8,
parameter MC_ERR_ADDR_WIDTH = 31,
parameter nAL = 0, // Additive latency (in clk cyc)
parameter nBANK_MACHS = 4,
parameter PRE_REV3ES = "OFF", // Delay O/Ps using Phaser_Out fine dly
parameter nCK_PER_CLK = 4, // # of memory CKs per fabric CLK
parameter nCS_PER_RANK = 1, // # of unique CS outputs per rank
// Hard PHY parameters
parameter PHYCTL_CMD_FIFO = "FALSE",
parameter ORDERING = "NORM",
parameter PHASE_DETECT = "OFF" , // to phy_top
parameter IBUF_LPWR_MODE = "OFF", // to phy_top
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter DATA_IO_PRIM_TYPE = "DEFAULT", // # = "HP_LP", "HR_LP", "DEFAULT"
parameter DATA_IO_IDLE_PWRDWN = "ON", // "ON" or "OFF"
parameter IODELAY_GRP = "IODELAY_MIG", //to phy_top
parameter FPGA_SPEED_GRADE = 1,
parameter OUTPUT_DRV = "HIGH" , // to phy_top
parameter REG_CTRL = "OFF" , // to phy_top
parameter RTT_NOM = "60" , // to phy_top
parameter RTT_WR = "120" , // to phy_top
parameter STARVE_LIMIT = 2,
parameter tCK = 2500, // pS
parameter tCKE = 10000, // pS
parameter tFAW = 40000, // pS
parameter tPRDI = 1_000_000, // pS
parameter tRAS = 37500, // pS
parameter tRCD = 12500, // pS
parameter tREFI = 7800000, // pS
parameter tRFC = 110000, // pS
parameter tRP = 12500, // pS
parameter tRRD = 10000, // pS
parameter tRTP = 7500, // pS
parameter tWTR = 7500, // pS
parameter tZQI = 128_000_000, // nS
parameter tZQCS = 64, // CKs
parameter WRLVL = "OFF" , // to phy_top
parameter DEBUG_PORT = "OFF" , // to phy_top
parameter CAL_WIDTH = "HALF" , // to phy_top
parameter RANK_WIDTH = 1,
parameter RANKS = 4,
parameter ODT_WIDTH = 1,
parameter ROW_WIDTH = 16, // DRAM address bus width
parameter [7:0] SLOT_0_CONFIG = 8'b0000_0001,
parameter [7:0] SLOT_1_CONFIG = 8'b0000_0000,
parameter SIM_BYPASS_INIT_CAL = "OFF",
parameter REFCLK_FREQ = 300.0,
parameter nDQS_COL0 = DQS_WIDTH,
parameter nDQS_COL1 = 0,
parameter nDQS_COL2 = 0,
parameter nDQS_COL3 = 0,
parameter DQS_LOC_COL0 = 144'h11100F0E0D0C0B0A09080706050403020100,
parameter DQS_LOC_COL1 = 0,
parameter DQS_LOC_COL2 = 0,
parameter DQS_LOC_COL3 = 0,
parameter USE_CS_PORT = 1, // Support chip select output
parameter USE_DM_PORT = 1, // Support data mask output
parameter USE_ODT_PORT = 1, // Support ODT output
parameter MASTER_PHY_CTL = 0, // The bank number where master PHY_CONTROL resides
parameter USER_REFRESH = "OFF", // Choose whether MC or User manages REF
parameter TEMP_MON_EN = "ON", // Enable/disable temperature monitoring
parameter IDELAY_ADJ = "ON", // Adjust IDELAY value (-1)
parameter FINE_PER_BIT = "ON", // Use finedelay per-bit de-skew
parameter CENTER_COMP_MODE = "ON", // Use Center compensation table for PI
parameter PI_VAL_ADJ = "ON", // Adjust PI final value (-1)
parameter TAPSPERKCLK = 56
)
(
input clk_ref,
input freq_refclk,
input mem_refclk,
input pll_lock,
input sync_pulse,
input mmcm_ps_clk,
input poc_sample_pd,
input error,
input reset,
output rst_tg_mc,
input [BANK_WIDTH-1:0] bank, // To mc0 of mc.v
input clk ,
input [2:0] cmd, // To mc0 of mc.v
input [COL_WIDTH-1:0] col, // To mc0 of mc.v
input correct_en,
input [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr, // To mc0 of mc.v
input dbg_idel_down_all,
input dbg_idel_down_cpt,
input dbg_idel_up_all,
input dbg_idel_up_cpt,
input dbg_sel_all_idel_cpt,
input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt,
input hi_priority, // To mc0 of mc.v
input [RANK_WIDTH-1:0] rank, // To mc0 of mc.v
input [2*nCK_PER_CLK-1:0] raw_not_ecc,
input [ROW_WIDTH-1:0] row, // To mc0 of mc.v
input rst, // To mc0 of mc.v, ...
input size, // To mc0 of mc.v
input [7:0] slot_0_present, // To mc0 of mc.v
input [7:0] slot_1_present, // To mc0 of mc.v
input use_addr, // To mc0 of mc.v
input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data,
input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask,
output accept, // From mc0 of mc.v
output accept_ns, // From mc0 of mc.v
output [BM_CNT_WIDTH-1:0] bank_mach_next, // From mc0 of mc.v
input app_sr_req,
output app_sr_active,
input app_ref_req,
output app_ref_ack,
input app_zq_req,
output app_zq_ack,
output [255:0] dbg_calib_top,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt,
output [255:0] dbg_phy_rdlvl,
output [99:0] dbg_phy_wrcal,
output [6*DQS_WIDTH-1:0] dbg_final_po_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_final_po_coarse_tap_cnt,
output [DQS_WIDTH-1:0] dbg_rd_data_edge_detect,
output [2*nCK_PER_CLK*DQ_WIDTH-1:0] dbg_rddata,
output [1:0] dbg_rdlvl_done,
output [1:0] dbg_rdlvl_err,
output [1:0] dbg_rdlvl_start,
output [5:0] dbg_tap_cnt_during_wrlvl,
output dbg_wl_edge_detect_valid,
output dbg_wrlvl_done,
output dbg_wrlvl_err,
output dbg_wrlvl_start,
output [ROW_WIDTH-1:0] ddr_addr, // From phy_top0 of phy_top.v
output [BANK_WIDTH-1:0] ddr_ba, // From phy_top0 of phy_top.v
output ddr_cas_n, // From phy_top0 of phy_top.v
output [CK_WIDTH-1:0] ddr_ck_n, // From phy_top0 of phy_top.v
output [CK_WIDTH-1:0] ddr_ck , // From phy_top0 of phy_top.v
output [CKE_WIDTH-1:0] ddr_cke, // From phy_top0 of phy_top.v
output [CS_WIDTH*nCS_PER_RANK-1:0] ddr_cs_n, // From phy_top0 of phy_top.v
output [DM_WIDTH-1:0] ddr_dm, // From phy_top0 of phy_top.v
output [ODT_WIDTH-1:0] ddr_odt, // From phy_top0 of phy_top.v
output ddr_ras_n, // From phy_top0 of phy_top.v
output ddr_reset_n, // From phy_top0 of phy_top.v
output ddr_parity,
output ddr_we_n, // From phy_top0 of phy_top.v
output init_calib_complete,
output init_wrcal_complete,
output [MC_ERR_ADDR_WIDTH-1:0] ecc_err_addr,
output [2*nCK_PER_CLK-1:0] ecc_multiple,
output [2*nCK_PER_CLK-1:0] ecc_single,
output wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data,
output [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr,
// From mc0 of mc.v
output rd_data_en, // From mc0 of mc.v
output rd_data_end, // From mc0 of mc.v
output [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset, // From mc0 of mc.v
output [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr, // From mc0 of mc.v
output wr_data_en, // From mc0 of mc.v
output [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset, // From mc0 of mc.v
inout [DQ_WIDTH-1:0] ddr_dq, // To/From phy_top0 of phy_top.v
inout [DQS_WIDTH-1:0] ddr_dqs_n, // To/From phy_top0 of phy_top.v
inout [DQS_WIDTH-1:0] ddr_dqs // To/From phy_top0 of phy_top.v
,input [11:0] device_temp
//phase shift clock control
,output psen
,output psincdec
,input psdone
,input [DQ_WIDTH/8-1:0] fi_xor_we
,input [DQ_WIDTH-1:0] fi_xor_wrdata
,input dbg_sel_pi_incdec
,input dbg_sel_po_incdec
,input [DQS_CNT_WIDTH:0] dbg_byte_sel
,input dbg_pi_f_inc
,input dbg_pi_f_dec
,input dbg_po_f_inc
,input dbg_po_f_stg23_sel
,input dbg_po_f_dec
,output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt
,output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt
,output dbg_rddata_valid
,output [6*DQS_WIDTH-1:0] dbg_wrlvl_fine_tap_cnt
,output [3*DQS_WIDTH-1:0] dbg_wrlvl_coarse_tap_cnt
,output [255:0] dbg_phy_wrlvl
,output [5:0] dbg_pi_counter_read_val
,output [8:0] dbg_po_counter_read_val
,output ref_dll_lock
,input rst_phaser_ref
,input iddr_rst
,output [6*RANKS-1:0] dbg_rd_data_offset
,output [255:0] dbg_phy_init
,output [255:0] dbg_prbs_rdlvl
,output [255:0] dbg_dqs_found_cal
,output dbg_pi_phaselock_start
,output dbg_pi_phaselocked_done
,output dbg_pi_phaselock_err
,output dbg_pi_dqsfound_start
,output dbg_pi_dqsfound_done
,output dbg_pi_dqsfound_err
,output dbg_wrcal_start
,output dbg_wrcal_done
,output dbg_wrcal_err
,output [11:0] dbg_pi_dqs_found_lanes_phy4lanes
,output [11:0] dbg_pi_phase_locked_phy4lanes
,output [6*RANKS-1:0] dbg_calib_rd_data_offset_1
,output [6*RANKS-1:0] dbg_calib_rd_data_offset_2
,output [5:0] dbg_data_offset
,output [5:0] dbg_data_offset_1
,output [5:0] dbg_data_offset_2
,output dbg_oclkdelay_calib_start
,output dbg_oclkdelay_calib_done
,output [255:0] dbg_phy_oclkdelay_cal
,output [DRAM_WIDTH*16 -1:0]dbg_oclkdelay_rd_data
,output [6*DQS_WIDTH*RANKS-1:0] prbs_final_dqs_tap_cnt_r
,output [6*DQS_WIDTH*RANKS-1:0] dbg_prbs_first_edge_taps
,output [6*DQS_WIDTH*RANKS-1:0] dbg_prbs_second_edge_taps
);
localparam nSLOTS = 1 + (|SLOT_1_CONFIG ? 1 : 0);
localparam SLOT_0_CONFIG_MC = (nSLOTS == 2)? 8'b0000_0101 : 8'b0000_1111;
localparam SLOT_1_CONFIG_MC = (nSLOTS == 2)? 8'b0000_1010 : 8'b0000_0000;
// 8*tREFI in ps is divided by fabric clock period also in ps. 270 is the number
// of fabric clock cycles that accounts for the Writes, read, and PRECHARGE time
localparam REFRESH_TIMER = (8*tREFI/(tCK*nCK_PER_CLK)) - 270;
reg [7:0] slot_0_present_mc;
reg [7:0] slot_1_present_mc;
reg user_periodic_rd_req = 1'b0;
reg user_ref_req = 1'b0;
reg user_zq_req = 1'b0;
// MC/PHY interface
wire [nCK_PER_CLK-1:0] mc_ras_n;
wire [nCK_PER_CLK-1:0] mc_cas_n;
wire [nCK_PER_CLK-1:0] mc_we_n;
wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address;
wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank;
wire [nCK_PER_CLK-1 :0] mc_cke ;
wire [1:0] mc_odt ;
wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n;
wire mc_reset_n;
wire [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata;
wire [2*nCK_PER_CLK*DQ_WIDTH/8-1:0] mc_wrdata_mask;
wire mc_wrdata_en;
wire mc_ref_zq_wip;
wire tempmon_sample_en;
wire idle;
wire mc_cmd_wren;
wire mc_ctl_wren;
wire [2:0] mc_cmd;
wire [1:0] mc_cas_slot;
wire [5:0] mc_data_offset;
wire [5:0] mc_data_offset_1;
wire [5:0] mc_data_offset_2;
wire [3:0] mc_aux_out0;
wire [3:0] mc_aux_out1;
wire [1:0] mc_rank_cnt;
wire phy_mc_ctl_full;
wire phy_mc_cmd_full;
wire phy_mc_data_full;
wire [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_rd_data;
wire phy_rddata_valid;
wire [6*RANKS-1:0] calib_rd_data_offset_0;
wire [6*RANKS-1:0] calib_rd_data_offset_1;
wire [6*RANKS-1:0] calib_rd_data_offset_2;
wire init_calib_complete_w;
wire init_wrcal_complete_w;
wire mux_rst;
wire mux_calib_complete;
// assigning CWL = CL -1 for DDR2. DDR2 customers will not know anything
// about CWL. There is also nCWL parameter. Need to clean it up.
localparam CWL_T = (DRAM_TYPE == "DDR3") ? CWL : CL-1;
assign init_calib_complete = init_calib_complete_w;
assign init_wrcal_complete = init_wrcal_complete_w;
assign mux_calib_complete = (PRE_REV3ES == "OFF") ? init_calib_complete_w :
(init_calib_complete_w | init_wrcal_complete_w);
assign mux_rst = (PRE_REV3ES == "OFF") ? rst : reset;
assign dbg_calib_rd_data_offset_1 = calib_rd_data_offset_1;
assign dbg_calib_rd_data_offset_2 = calib_rd_data_offset_2;
assign dbg_data_offset = mc_data_offset;
assign dbg_data_offset_1 = mc_data_offset_1;
assign dbg_data_offset_2 = mc_data_offset_2;
// Enable / disable temperature monitoring
assign tempmon_sample_en = TEMP_MON_EN == "OFF" ? 1'b0 : mc_ref_zq_wip;
generate
if (nSLOTS == 1) begin: gen_single_slot_odt
always @ (slot_0_present or slot_1_present) begin
slot_0_present_mc = slot_0_present;
slot_1_present_mc = slot_1_present;
end
end else if (nSLOTS == 2) begin: gen_dual_slot_odt
always @ (slot_0_present[0] or slot_0_present[1]
or slot_1_present[0] or slot_1_present[1]) begin
case ({slot_0_present[0],slot_0_present[1],
slot_1_present[0],slot_1_present[1]})
//Two slot configuration, one slot present, single rank
4'b1000: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_0000;
end
4'b0010: begin
slot_0_present_mc = 8'b0000_0000;
slot_1_present_mc = 8'b0000_0010;
end
// Two slot configuration, one slot present, dual rank
4'b1100: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_0000;
end
4'b0011: begin
slot_0_present_mc = 8'b0000_0000;
slot_1_present_mc = 8'b0000_1010;
end
// Two slot configuration, one rank per slot
4'b1010: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_0010;
end
// Two Slots - One slot with dual rank and the other with single rank
4'b1011: begin
slot_0_present_mc = 8'b0000_0001;
slot_1_present_mc = 8'b0000_1010;
end
4'b1110: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_0010;
end
// Two Slots - two ranks per slot
4'b1111: begin
slot_0_present_mc = 8'b0000_0101;
slot_1_present_mc = 8'b0000_1010;
end
endcase
end
end
endgenerate
mig_7series_v2_3_mc #
(
.TCQ (TCQ),
.PAYLOAD_WIDTH (PAYLOAD_WIDTH),
.MC_ERR_ADDR_WIDTH (MC_ERR_ADDR_WIDTH),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BANK_WIDTH (BANK_WIDTH),
.BM_CNT_WIDTH (BM_CNT_WIDTH),
.BURST_MODE (BURST_MODE),
.COL_WIDTH (COL_WIDTH),
.CMD_PIPE_PLUS1 (CMD_PIPE_PLUS1),
.CS_WIDTH (CS_WIDTH),
.DATA_WIDTH (DATA_WIDTH),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.DATA_BUF_OFFSET_WIDTH (DATA_BUF_OFFSET_WIDTH),
.DRAM_TYPE (DRAM_TYPE),
.CKE_ODT_AUX (CKE_ODT_AUX),
.DQS_WIDTH (DQS_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.ECC (ECC),
.ECC_WIDTH (ECC_WIDTH),
.nBANK_MACHS (nBANK_MACHS),
.nCK_PER_CLK (nCK_PER_CLK),
.nSLOTS (nSLOTS),
.CL (CL),
.nCS_PER_RANK (nCS_PER_RANK),
.CWL (CWL_T),
.ORDERING (ORDERING),
.RANK_WIDTH (RANK_WIDTH),
.RANKS (RANKS),
.REG_CTRL (REG_CTRL),
.ROW_WIDTH (ROW_WIDTH),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.STARVE_LIMIT (STARVE_LIMIT),
.SLOT_0_CONFIG (SLOT_0_CONFIG_MC),
.SLOT_1_CONFIG (SLOT_1_CONFIG_MC),
.tCK (tCK),
.tCKE (tCKE),
.tFAW (tFAW),
.tRAS (tRAS),
.tRCD (tRCD),
.tREFI (tREFI),
.tRFC (tRFC),
.tRP (tRP),
.tRRD (tRRD),
.tRTP (tRTP),
.tWTR (tWTR),
.tZQI (tZQI),
.tZQCS (tZQCS),
.tPRDI (tPRDI),
.USER_REFRESH (USER_REFRESH))
mc0
(.app_periodic_rd_req (1'b0),
.app_sr_req (app_sr_req),
.app_sr_active (app_sr_active),
.app_ref_req (app_ref_req),
.app_ref_ack (app_ref_ack),
.app_zq_req (app_zq_req),
.app_zq_ack (app_zq_ack),
.ecc_single (ecc_single),
.ecc_multiple (ecc_multiple),
.ecc_err_addr (ecc_err_addr),
.mc_address (mc_address),
.mc_aux_out0 (mc_aux_out0),
.mc_aux_out1 (mc_aux_out1),
.mc_bank (mc_bank),
.mc_cke (mc_cke),
.mc_odt (mc_odt),
.mc_cas_n (mc_cas_n),
.mc_cmd (mc_cmd),
.mc_cmd_wren (mc_cmd_wren),
.mc_cs_n (mc_cs_n),
.mc_ctl_wren (mc_ctl_wren),
.mc_data_offset (mc_data_offset),
.mc_data_offset_1 (mc_data_offset_1),
.mc_data_offset_2 (mc_data_offset_2),
.mc_cas_slot (mc_cas_slot),
.mc_rank_cnt (mc_rank_cnt),
.mc_ras_n (mc_ras_n),
.mc_reset_n (mc_reset_n),
.mc_we_n (mc_we_n),
.mc_wrdata (mc_wrdata),
.mc_wrdata_en (mc_wrdata_en),
.mc_wrdata_mask (mc_wrdata_mask),
// Outputs
.accept (accept),
.accept_ns (accept_ns),
.bank_mach_next (bank_mach_next[BM_CNT_WIDTH-1:0]),
.rd_data_addr (rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.rd_data_en (rd_data_en),
.rd_data_end (rd_data_end),
.rd_data_offset (rd_data_offset),
.wr_data_addr (wr_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.wr_data_en (wr_data_en),
.wr_data_offset (wr_data_offset),
.rd_data (rd_data),
.wr_data (wr_data),
.wr_data_mask (wr_data_mask),
.mc_read_idle (idle),
.mc_ref_zq_wip (mc_ref_zq_wip),
// Inputs
.init_calib_complete (mux_calib_complete),
.calib_rd_data_offset (calib_rd_data_offset_0),
.calib_rd_data_offset_1 (calib_rd_data_offset_1),
.calib_rd_data_offset_2 (calib_rd_data_offset_2),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_data_full (phy_mc_data_full),
.phy_rd_data (phy_rd_data),
.phy_rddata_valid (phy_rddata_valid),
.correct_en (correct_en),
.bank (bank[BANK_WIDTH-1:0]),
.clk (clk),
.cmd (cmd[2:0]),
.col (col[COL_WIDTH-1:0]),
.data_buf_addr (data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.hi_priority (hi_priority),
.rank (rank[RANK_WIDTH-1:0]),
.raw_not_ecc (raw_not_ecc[2*nCK_PER_CLK-1 :0]),
.row (row[ROW_WIDTH-1:0]),
.rst (mux_rst),
.size (size),
.slot_0_present (slot_0_present_mc[7:0]),
.slot_1_present (slot_1_present_mc[7:0]),
.fi_xor_we (fi_xor_we),
.fi_xor_wrdata (fi_xor_wrdata),
.use_addr (use_addr));
// following calculations should be moved inside PHY
// odt bus should be added to PHY.
localparam CLK_PERIOD = tCK * nCK_PER_CLK;
localparam nCL = CL;
localparam nCWL = CWL_T;
`ifdef MC_SVA
ddr2_improper_CL: assert property
(@(posedge clk) (~((DRAM_TYPE == "DDR2") && ((CL > 6) || (CL < 3)))));
// Not needed after the CWL fix for DDR2
// ddr2_improper_CWL: assert property
// (@(posedge clk) (~((DRAM_TYPE == "DDR2") && ((CL - CWL) != 1))));
`endif
mig_7series_v2_3_ddr_phy_top #
(
.TCQ (TCQ),
.DDR3_VDD_OP_VOLT (DDR3_VDD_OP_VOLT),
.REFCLK_FREQ (REFCLK_FREQ),
.BYTE_LANES_B0 (BYTE_LANES_B0),
.BYTE_LANES_B1 (BYTE_LANES_B1),
.BYTE_LANES_B2 (BYTE_LANES_B2),
.BYTE_LANES_B3 (BYTE_LANES_B3),
.BYTE_LANES_B4 (BYTE_LANES_B4),
.PHY_0_BITLANES (PHY_0_BITLANES),
.PHY_1_BITLANES (PHY_1_BITLANES),
.PHY_2_BITLANES (PHY_2_BITLANES),
.CA_MIRROR (CA_MIRROR),
.CK_BYTE_MAP (CK_BYTE_MAP),
.ADDR_MAP (ADDR_MAP),
.BANK_MAP (BANK_MAP),
.CAS_MAP (CAS_MAP),
.CKE_ODT_BYTE_MAP (CKE_ODT_BYTE_MAP),
.CKE_MAP (CKE_MAP),
.ODT_MAP (ODT_MAP),
.CKE_ODT_AUX (CKE_ODT_AUX),
.CS_MAP (CS_MAP),
.PARITY_MAP (PARITY_MAP),
.RAS_MAP (RAS_MAP),
.WE_MAP (WE_MAP),
.DQS_BYTE_MAP (DQS_BYTE_MAP),
.DATA0_MAP (DATA0_MAP),
.DATA1_MAP (DATA1_MAP),
.DATA2_MAP (DATA2_MAP),
.DATA3_MAP (DATA3_MAP),
.DATA4_MAP (DATA4_MAP),
.DATA5_MAP (DATA5_MAP),
.DATA6_MAP (DATA6_MAP),
.DATA7_MAP (DATA7_MAP),
.DATA8_MAP (DATA8_MAP),
.DATA9_MAP (DATA9_MAP),
.DATA10_MAP (DATA10_MAP),
.DATA11_MAP (DATA11_MAP),
.DATA12_MAP (DATA12_MAP),
.DATA13_MAP (DATA13_MAP),
.DATA14_MAP (DATA14_MAP),
.DATA15_MAP (DATA15_MAP),
.DATA16_MAP (DATA16_MAP),
.DATA17_MAP (DATA17_MAP),
.MASK0_MAP (MASK0_MAP),
.MASK1_MAP (MASK1_MAP),
.CALIB_ROW_ADD (CALIB_ROW_ADD),
.CALIB_COL_ADD (CALIB_COL_ADD),
.CALIB_BA_ADD (CALIB_BA_ADD),
.nCS_PER_RANK (nCS_PER_RANK),
.CS_WIDTH (CS_WIDTH),
.nCK_PER_CLK (nCK_PER_CLK),
.PRE_REV3ES (PRE_REV3ES),
.CKE_WIDTH (CKE_WIDTH),
.DATA_CTL_B0 (DATA_CTL_B0),
.DATA_CTL_B1 (DATA_CTL_B1),
.DATA_CTL_B2 (DATA_CTL_B2),
.DATA_CTL_B3 (DATA_CTL_B3),
.DATA_CTL_B4 (DATA_CTL_B4),
.DDR2_DQSN_ENABLE (DDR2_DQSN_ENABLE),
.DRAM_TYPE (DRAM_TYPE),
.BANK_WIDTH (BANK_WIDTH),
.CK_WIDTH (CK_WIDTH),
.COL_WIDTH (COL_WIDTH),
.DM_WIDTH (DM_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.DQS_CNT_WIDTH (DQS_CNT_WIDTH),
.DQS_WIDTH (DQS_WIDTH),
.DRAM_WIDTH (DRAM_WIDTH),
.PHYCTL_CMD_FIFO (PHYCTL_CMD_FIFO),
.ROW_WIDTH (ROW_WIDTH),
.AL (AL),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BURST_MODE (BURST_MODE),
.BURST_TYPE (BURST_TYPE),
.CL (nCL),
.CWL (nCWL),
.tRFC (tRFC),
.tREFI (tREFI),
.tCK (tCK),
.OUTPUT_DRV (OUTPUT_DRV),
.RANKS (RANKS),
.ODT_WIDTH (ODT_WIDTH),
.REG_CTRL (REG_CTRL),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.SLOT_1_CONFIG (SLOT_1_CONFIG),
.WRLVL (WRLVL),
.BANK_TYPE (BANK_TYPE),
.DATA_IO_PRIM_TYPE (DATA_IO_PRIM_TYPE),
.DATA_IO_IDLE_PWRDWN(DATA_IO_IDLE_PWRDWN),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
// Prevent the following simulation-related parameters from
// being overridden for synthesis - for synthesis only the
// default values of these parameters should be used
// synthesis translate_off
.SIM_BYPASS_INIT_CAL (SIM_BYPASS_INIT_CAL),
// synthesis translate_on
.USE_CS_PORT (USE_CS_PORT),
.USE_DM_PORT (USE_DM_PORT),
.USE_ODT_PORT (USE_ODT_PORT),
.MASTER_PHY_CTL (MASTER_PHY_CTL),
.DEBUG_PORT (DEBUG_PORT),
.IDELAY_ADJ (IDELAY_ADJ),
.FINE_PER_BIT (FINE_PER_BIT),
.CENTER_COMP_MODE (CENTER_COMP_MODE),
.PI_VAL_ADJ (PI_VAL_ADJ),
.TAPSPERKCLK (TAPSPERKCLK)
)
ddr_phy_top0
(
// Outputs
.calib_rd_data_offset_0 (calib_rd_data_offset_0),
.calib_rd_data_offset_1 (calib_rd_data_offset_1),
.calib_rd_data_offset_2 (calib_rd_data_offset_2),
.ddr_ck (ddr_ck),
.ddr_ck_n (ddr_ck_n),
.ddr_addr (ddr_addr),
.ddr_ba (ddr_ba),
.ddr_ras_n (ddr_ras_n),
.ddr_cas_n (ddr_cas_n),
.ddr_we_n (ddr_we_n),
.ddr_cs_n (ddr_cs_n),
.ddr_cke (ddr_cke),
.ddr_odt (ddr_odt),
.ddr_reset_n (ddr_reset_n),
.ddr_parity (ddr_parity),
.ddr_dm (ddr_dm),
.dbg_calib_top (dbg_calib_top),
.dbg_cpt_first_edge_cnt (dbg_cpt_first_edge_cnt),
.dbg_cpt_second_edge_cnt (dbg_cpt_second_edge_cnt),
.dbg_phy_rdlvl (dbg_phy_rdlvl),
.dbg_phy_wrcal (dbg_phy_wrcal),
.dbg_final_po_fine_tap_cnt (dbg_final_po_fine_tap_cnt),
.dbg_final_po_coarse_tap_cnt (dbg_final_po_coarse_tap_cnt),
.dbg_rd_data_edge_detect (dbg_rd_data_edge_detect),
.dbg_rddata (dbg_rddata),
.dbg_rdlvl_done (dbg_rdlvl_done),
.dbg_rdlvl_err (dbg_rdlvl_err),
.dbg_rdlvl_start (dbg_rdlvl_start),
.dbg_tap_cnt_during_wrlvl (dbg_tap_cnt_during_wrlvl),
.dbg_wl_edge_detect_valid (dbg_wl_edge_detect_valid),
.dbg_wrlvl_done (dbg_wrlvl_done),
.dbg_wrlvl_err (dbg_wrlvl_err),
.dbg_wrlvl_start (dbg_wrlvl_start),
.dbg_pi_phase_locked_phy4lanes (dbg_pi_phase_locked_phy4lanes),
.dbg_pi_dqs_found_lanes_phy4lanes (dbg_pi_dqs_found_lanes_phy4lanes),
.init_calib_complete (init_calib_complete_w),
.init_wrcal_complete (init_wrcal_complete_w),
.mc_address (mc_address),
.mc_aux_out0 (mc_aux_out0),
.mc_aux_out1 (mc_aux_out1),
.mc_bank (mc_bank),
.mc_cke (mc_cke),
.mc_odt (mc_odt),
.mc_cas_n (mc_cas_n),
.mc_cmd (mc_cmd),
.mc_cmd_wren (mc_cmd_wren),
.mc_cas_slot (mc_cas_slot),
.mc_cs_n (mc_cs_n),
.mc_ctl_wren (mc_ctl_wren),
.mc_data_offset (mc_data_offset),
.mc_data_offset_1 (mc_data_offset_1),
.mc_data_offset_2 (mc_data_offset_2),
.mc_rank_cnt (mc_rank_cnt),
.mc_ras_n (mc_ras_n),
.mc_reset_n (mc_reset_n),
.mc_we_n (mc_we_n),
.mc_wrdata (mc_wrdata),
.mc_wrdata_en (mc_wrdata_en),
.mc_wrdata_mask (mc_wrdata_mask),
.idle (idle),
.mem_refclk (mem_refclk),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_data_full (phy_mc_data_full),
.phy_rd_data (phy_rd_data),
.phy_rddata_valid (phy_rddata_valid),
.pll_lock (pll_lock),
.sync_pulse (sync_pulse),
// Inouts
.ddr_dqs (ddr_dqs),
.ddr_dqs_n (ddr_dqs_n),
.ddr_dq (ddr_dq),
// Inputs
.clk_ref (clk_ref),
.freq_refclk (freq_refclk),
.clk (clk),
.mmcm_ps_clk (mmcm_ps_clk),
.poc_sample_pd (poc_sample_pd),
.rst (rst),
.error (error),
.rst_tg_mc (rst_tg_mc),
.slot_0_present (slot_0_present),
.slot_1_present (slot_1_present),
.dbg_idel_up_all (dbg_idel_up_all),
.dbg_idel_down_all (dbg_idel_down_all),
.dbg_idel_up_cpt (dbg_idel_up_cpt),
.dbg_idel_down_cpt (dbg_idel_down_cpt),
.dbg_sel_idel_cpt (dbg_sel_idel_cpt),
.dbg_sel_all_idel_cpt (dbg_sel_all_idel_cpt)
,.device_temp (device_temp)
,.tempmon_sample_en (tempmon_sample_en)
,.psen (psen)
,.psincdec (psincdec)
,.psdone (psdone)
,.dbg_sel_pi_incdec (dbg_sel_pi_incdec)
,.dbg_sel_po_incdec (dbg_sel_po_incdec)
,.dbg_byte_sel (dbg_byte_sel)
,.dbg_pi_f_inc (dbg_pi_f_inc)
,.dbg_po_f_inc (dbg_po_f_inc)
,.dbg_po_f_stg23_sel (dbg_po_f_stg23_sel)
,.dbg_pi_f_dec (dbg_pi_f_dec)
,.dbg_po_f_dec (dbg_po_f_dec)
,.dbg_cpt_tap_cnt (dbg_cpt_tap_cnt)
,.dbg_dq_idelay_tap_cnt (dbg_dq_idelay_tap_cnt)
,.dbg_rddata_valid (dbg_rddata_valid)
,.dbg_wrlvl_fine_tap_cnt (dbg_wrlvl_fine_tap_cnt)
,.dbg_wrlvl_coarse_tap_cnt (dbg_wrlvl_coarse_tap_cnt)
,.dbg_phy_wrlvl (dbg_phy_wrlvl)
,.ref_dll_lock (ref_dll_lock)
,.rst_phaser_ref (rst_phaser_ref)
,.iddr_rst (iddr_rst)
,.dbg_rd_data_offset (dbg_rd_data_offset)
,.dbg_phy_init (dbg_phy_init)
,.dbg_prbs_rdlvl (dbg_prbs_rdlvl)
,.dbg_dqs_found_cal (dbg_dqs_found_cal)
,.dbg_po_counter_read_val (dbg_po_counter_read_val)
,.dbg_pi_counter_read_val (dbg_pi_counter_read_val)
,.dbg_pi_phaselock_start (dbg_pi_phaselock_start)
,.dbg_pi_phaselocked_done (dbg_pi_phaselocked_done)
,.dbg_pi_phaselock_err (dbg_pi_phaselock_err)
,.dbg_pi_dqsfound_start (dbg_pi_dqsfound_start)
,.dbg_pi_dqsfound_done (dbg_pi_dqsfound_done)
,.dbg_pi_dqsfound_err (dbg_pi_dqsfound_err)
,.dbg_wrcal_start (dbg_wrcal_start)
,.dbg_wrcal_done (dbg_wrcal_done)
,.dbg_wrcal_err (dbg_wrcal_err)
,.dbg_phy_oclkdelay_cal (dbg_phy_oclkdelay_cal)
,.dbg_oclkdelay_rd_data (dbg_oclkdelay_rd_data)
,.dbg_oclkdelay_calib_start (dbg_oclkdelay_calib_start)
,.dbg_oclkdelay_calib_done (dbg_oclkdelay_calib_done)
,.prbs_final_dqs_tap_cnt_r (prbs_final_dqs_tap_cnt_r)
,.dbg_prbs_first_edge_taps (dbg_prbs_first_edge_taps)
,.dbg_prbs_second_edge_taps (dbg_prbs_second_edge_taps)
);
endmodule
|
module mig_7series_v2_3_ui_cmd #
(
parameter TCQ = 100,
parameter ADDR_WIDTH = 33,
parameter BANK_WIDTH = 3,
parameter COL_WIDTH = 12,
parameter DATA_BUF_ADDR_WIDTH = 5,
parameter RANK_WIDTH = 2,
parameter ROW_WIDTH = 16,
parameter RANKS = 4,
parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN"
)
(/*AUTOARG*/
// Outputs
app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority,
rd_accepted, wr_accepted, data_buf_addr,
// Inputs
rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd,
app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r
);
input rst;
input clk;
input accept_ns;
input rd_buf_full;
input wr_req_16;
wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16;
reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */;
always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns;
output wire app_rdy;
assign app_rdy = app_rdy_r;
input [ADDR_WIDTH-1:0] app_addr;
input [2:0] app_cmd;
input app_sz;
input app_hi_pri;
input app_en;
reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}};
reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}};
reg [2:0] app_cmd_r1;
reg [2:0] app_cmd_r2;
reg app_sz_r1;
reg app_sz_r2;
reg app_hi_pri_r1;
reg app_hi_pri_r2;
reg app_en_r1;
reg app_en_r2;
wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1;
wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2;
wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1;
wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2;
wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1;
wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2;
wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1;
wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2;
wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1);
wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2);
always @(posedge clk) begin
if (rst) begin
app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}};
app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}};
end else begin
app_addr_r1 <= #TCQ app_addr_ns1;
app_addr_r2 <= #TCQ app_addr_ns2;
end
app_cmd_r1 <= #TCQ app_cmd_ns1;
app_cmd_r2 <= #TCQ app_cmd_ns2;
app_sz_r1 <= #TCQ app_sz_ns1;
app_sz_r2 <= #TCQ app_sz_ns2;
app_hi_pri_r1 <= #TCQ app_hi_pri_ns1;
app_hi_pri_r2 <= #TCQ app_hi_pri_ns2;
app_en_r1 <= #TCQ app_en_ns1;
app_en_r2 <= #TCQ app_en_ns2;
end // always @ (posedge clk)
wire use_addr_lcl = app_en_r2 && app_rdy_r;
output wire use_addr;
assign use_addr = use_addr_lcl;
output wire [RANK_WIDTH-1:0] rank;
output wire [BANK_WIDTH-1:0] bank;
output wire [ROW_WIDTH-1:0] row;
output wire [COL_WIDTH-1:0] col;
output wire size;
output wire [2:0] cmd;
output wire hi_priority;
/* assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];*/
generate
begin
if (MEM_ADDR_ORDER == "TG_TEST")
begin
assign col[4:0] = app_rdy_r
? app_addr_r1[0+:5]
: app_addr_r2[0+:5];
if (RANKS==1)
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7];
end
else
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7];
end
assign row[2:0] = app_rdy_r
? app_addr_r1[5+:3]
: app_addr_r2[5+:3];
if (RANKS==1)
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
else
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
assign bank = app_rdy_r
? app_addr_r1[5+3+:BANK_WIDTH]
: app_addr_r2[5+3+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH];
end
else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN")
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
else
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
end
endgenerate
/* assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/
assign size = app_rdy_r
? app_sz_r1
: app_sz_r2;
assign cmd = app_rdy_r
? app_cmd_r1
: app_cmd_r2;
assign hi_priority = app_rdy_r
? app_hi_pri_r1
: app_hi_pri_r2;
wire request_accepted = use_addr_lcl && app_rdy_r;
wire rd = app_cmd_r2[1:0] == 2'b01;
wire wr = app_cmd_r2[1:0] == 2'b00;
wire wr_bytes = app_cmd_r2[1:0] == 2'b11;
wire write = wr || wr_bytes;
output wire rd_accepted;
assign rd_accepted = request_accepted && rd;
output wire wr_accepted;
assign wr_accepted = request_accepted && write;
input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r;
output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr;
assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr;
endmodule
|
module mig_7series_v2_3_ui_cmd #
(
parameter TCQ = 100,
parameter ADDR_WIDTH = 33,
parameter BANK_WIDTH = 3,
parameter COL_WIDTH = 12,
parameter DATA_BUF_ADDR_WIDTH = 5,
parameter RANK_WIDTH = 2,
parameter ROW_WIDTH = 16,
parameter RANKS = 4,
parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN"
)
(/*AUTOARG*/
// Outputs
app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority,
rd_accepted, wr_accepted, data_buf_addr,
// Inputs
rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd,
app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r
);
input rst;
input clk;
input accept_ns;
input rd_buf_full;
input wr_req_16;
wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16;
reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */;
always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns;
output wire app_rdy;
assign app_rdy = app_rdy_r;
input [ADDR_WIDTH-1:0] app_addr;
input [2:0] app_cmd;
input app_sz;
input app_hi_pri;
input app_en;
reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}};
reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}};
reg [2:0] app_cmd_r1;
reg [2:0] app_cmd_r2;
reg app_sz_r1;
reg app_sz_r2;
reg app_hi_pri_r1;
reg app_hi_pri_r2;
reg app_en_r1;
reg app_en_r2;
wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1;
wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2;
wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1;
wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2;
wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1;
wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2;
wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1;
wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2;
wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1);
wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2);
always @(posedge clk) begin
if (rst) begin
app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}};
app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}};
end else begin
app_addr_r1 <= #TCQ app_addr_ns1;
app_addr_r2 <= #TCQ app_addr_ns2;
end
app_cmd_r1 <= #TCQ app_cmd_ns1;
app_cmd_r2 <= #TCQ app_cmd_ns2;
app_sz_r1 <= #TCQ app_sz_ns1;
app_sz_r2 <= #TCQ app_sz_ns2;
app_hi_pri_r1 <= #TCQ app_hi_pri_ns1;
app_hi_pri_r2 <= #TCQ app_hi_pri_ns2;
app_en_r1 <= #TCQ app_en_ns1;
app_en_r2 <= #TCQ app_en_ns2;
end // always @ (posedge clk)
wire use_addr_lcl = app_en_r2 && app_rdy_r;
output wire use_addr;
assign use_addr = use_addr_lcl;
output wire [RANK_WIDTH-1:0] rank;
output wire [BANK_WIDTH-1:0] bank;
output wire [ROW_WIDTH-1:0] row;
output wire [COL_WIDTH-1:0] col;
output wire size;
output wire [2:0] cmd;
output wire hi_priority;
/* assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];*/
generate
begin
if (MEM_ADDR_ORDER == "TG_TEST")
begin
assign col[4:0] = app_rdy_r
? app_addr_r1[0+:5]
: app_addr_r2[0+:5];
if (RANKS==1)
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7];
end
else
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7];
end
assign row[2:0] = app_rdy_r
? app_addr_r1[5+:3]
: app_addr_r2[5+:3];
if (RANKS==1)
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
else
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
assign bank = app_rdy_r
? app_addr_r1[5+3+:BANK_WIDTH]
: app_addr_r2[5+3+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH];
end
else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN")
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
else
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
end
endgenerate
/* assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/
assign size = app_rdy_r
? app_sz_r1
: app_sz_r2;
assign cmd = app_rdy_r
? app_cmd_r1
: app_cmd_r2;
assign hi_priority = app_rdy_r
? app_hi_pri_r1
: app_hi_pri_r2;
wire request_accepted = use_addr_lcl && app_rdy_r;
wire rd = app_cmd_r2[1:0] == 2'b01;
wire wr = app_cmd_r2[1:0] == 2'b00;
wire wr_bytes = app_cmd_r2[1:0] == 2'b11;
wire write = wr || wr_bytes;
output wire rd_accepted;
assign rd_accepted = request_accepted && rd;
output wire wr_accepted;
assign wr_accepted = request_accepted && write;
input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r;
output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr;
assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr;
endmodule
|
module mig_7series_v2_3_ui_cmd #
(
parameter TCQ = 100,
parameter ADDR_WIDTH = 33,
parameter BANK_WIDTH = 3,
parameter COL_WIDTH = 12,
parameter DATA_BUF_ADDR_WIDTH = 5,
parameter RANK_WIDTH = 2,
parameter ROW_WIDTH = 16,
parameter RANKS = 4,
parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN"
)
(/*AUTOARG*/
// Outputs
app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority,
rd_accepted, wr_accepted, data_buf_addr,
// Inputs
rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd,
app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r
);
input rst;
input clk;
input accept_ns;
input rd_buf_full;
input wr_req_16;
wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16;
reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */;
always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns;
output wire app_rdy;
assign app_rdy = app_rdy_r;
input [ADDR_WIDTH-1:0] app_addr;
input [2:0] app_cmd;
input app_sz;
input app_hi_pri;
input app_en;
reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}};
reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}};
reg [2:0] app_cmd_r1;
reg [2:0] app_cmd_r2;
reg app_sz_r1;
reg app_sz_r2;
reg app_hi_pri_r1;
reg app_hi_pri_r2;
reg app_en_r1;
reg app_en_r2;
wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1;
wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2;
wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1;
wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2;
wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1;
wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2;
wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1;
wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2;
wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1);
wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2);
always @(posedge clk) begin
if (rst) begin
app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}};
app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}};
end else begin
app_addr_r1 <= #TCQ app_addr_ns1;
app_addr_r2 <= #TCQ app_addr_ns2;
end
app_cmd_r1 <= #TCQ app_cmd_ns1;
app_cmd_r2 <= #TCQ app_cmd_ns2;
app_sz_r1 <= #TCQ app_sz_ns1;
app_sz_r2 <= #TCQ app_sz_ns2;
app_hi_pri_r1 <= #TCQ app_hi_pri_ns1;
app_hi_pri_r2 <= #TCQ app_hi_pri_ns2;
app_en_r1 <= #TCQ app_en_ns1;
app_en_r2 <= #TCQ app_en_ns2;
end // always @ (posedge clk)
wire use_addr_lcl = app_en_r2 && app_rdy_r;
output wire use_addr;
assign use_addr = use_addr_lcl;
output wire [RANK_WIDTH-1:0] rank;
output wire [BANK_WIDTH-1:0] bank;
output wire [ROW_WIDTH-1:0] row;
output wire [COL_WIDTH-1:0] col;
output wire size;
output wire [2:0] cmd;
output wire hi_priority;
/* assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];*/
generate
begin
if (MEM_ADDR_ORDER == "TG_TEST")
begin
assign col[4:0] = app_rdy_r
? app_addr_r1[0+:5]
: app_addr_r2[0+:5];
if (RANKS==1)
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7];
end
else
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7];
end
assign row[2:0] = app_rdy_r
? app_addr_r1[5+:3]
: app_addr_r2[5+:3];
if (RANKS==1)
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
else
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
assign bank = app_rdy_r
? app_addr_r1[5+3+:BANK_WIDTH]
: app_addr_r2[5+3+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH];
end
else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN")
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
else
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
end
endgenerate
/* assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/
assign size = app_rdy_r
? app_sz_r1
: app_sz_r2;
assign cmd = app_rdy_r
? app_cmd_r1
: app_cmd_r2;
assign hi_priority = app_rdy_r
? app_hi_pri_r1
: app_hi_pri_r2;
wire request_accepted = use_addr_lcl && app_rdy_r;
wire rd = app_cmd_r2[1:0] == 2'b01;
wire wr = app_cmd_r2[1:0] == 2'b00;
wire wr_bytes = app_cmd_r2[1:0] == 2'b11;
wire write = wr || wr_bytes;
output wire rd_accepted;
assign rd_accepted = request_accepted && rd;
output wire wr_accepted;
assign wr_accepted = request_accepted && write;
input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r;
output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr;
assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr;
endmodule
|
module mig_7series_v2_3_ui_cmd #
(
parameter TCQ = 100,
parameter ADDR_WIDTH = 33,
parameter BANK_WIDTH = 3,
parameter COL_WIDTH = 12,
parameter DATA_BUF_ADDR_WIDTH = 5,
parameter RANK_WIDTH = 2,
parameter ROW_WIDTH = 16,
parameter RANKS = 4,
parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN"
)
(/*AUTOARG*/
// Outputs
app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority,
rd_accepted, wr_accepted, data_buf_addr,
// Inputs
rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd,
app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r
);
input rst;
input clk;
input accept_ns;
input rd_buf_full;
input wr_req_16;
wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16;
reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */;
always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns;
output wire app_rdy;
assign app_rdy = app_rdy_r;
input [ADDR_WIDTH-1:0] app_addr;
input [2:0] app_cmd;
input app_sz;
input app_hi_pri;
input app_en;
reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}};
reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}};
reg [2:0] app_cmd_r1;
reg [2:0] app_cmd_r2;
reg app_sz_r1;
reg app_sz_r2;
reg app_hi_pri_r1;
reg app_hi_pri_r2;
reg app_en_r1;
reg app_en_r2;
wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1;
wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2;
wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1;
wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2;
wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1;
wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2;
wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1;
wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2;
wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1);
wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2);
always @(posedge clk) begin
if (rst) begin
app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}};
app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}};
end else begin
app_addr_r1 <= #TCQ app_addr_ns1;
app_addr_r2 <= #TCQ app_addr_ns2;
end
app_cmd_r1 <= #TCQ app_cmd_ns1;
app_cmd_r2 <= #TCQ app_cmd_ns2;
app_sz_r1 <= #TCQ app_sz_ns1;
app_sz_r2 <= #TCQ app_sz_ns2;
app_hi_pri_r1 <= #TCQ app_hi_pri_ns1;
app_hi_pri_r2 <= #TCQ app_hi_pri_ns2;
app_en_r1 <= #TCQ app_en_ns1;
app_en_r2 <= #TCQ app_en_ns2;
end // always @ (posedge clk)
wire use_addr_lcl = app_en_r2 && app_rdy_r;
output wire use_addr;
assign use_addr = use_addr_lcl;
output wire [RANK_WIDTH-1:0] rank;
output wire [BANK_WIDTH-1:0] bank;
output wire [ROW_WIDTH-1:0] row;
output wire [COL_WIDTH-1:0] col;
output wire size;
output wire [2:0] cmd;
output wire hi_priority;
/* assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];*/
generate
begin
if (MEM_ADDR_ORDER == "TG_TEST")
begin
assign col[4:0] = app_rdy_r
? app_addr_r1[0+:5]
: app_addr_r2[0+:5];
if (RANKS==1)
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7];
end
else
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7];
end
assign row[2:0] = app_rdy_r
? app_addr_r1[5+:3]
: app_addr_r2[5+:3];
if (RANKS==1)
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
else
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
assign bank = app_rdy_r
? app_addr_r1[5+3+:BANK_WIDTH]
: app_addr_r2[5+3+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH];
end
else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN")
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
else
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
end
endgenerate
/* assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/
assign size = app_rdy_r
? app_sz_r1
: app_sz_r2;
assign cmd = app_rdy_r
? app_cmd_r1
: app_cmd_r2;
assign hi_priority = app_rdy_r
? app_hi_pri_r1
: app_hi_pri_r2;
wire request_accepted = use_addr_lcl && app_rdy_r;
wire rd = app_cmd_r2[1:0] == 2'b01;
wire wr = app_cmd_r2[1:0] == 2'b00;
wire wr_bytes = app_cmd_r2[1:0] == 2'b11;
wire write = wr || wr_bytes;
output wire rd_accepted;
assign rd_accepted = request_accepted && rd;
output wire wr_accepted;
assign wr_accepted = request_accepted && write;
input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r;
output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr;
assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr;
endmodule
|
module mig_7series_v2_3_ui_cmd #
(
parameter TCQ = 100,
parameter ADDR_WIDTH = 33,
parameter BANK_WIDTH = 3,
parameter COL_WIDTH = 12,
parameter DATA_BUF_ADDR_WIDTH = 5,
parameter RANK_WIDTH = 2,
parameter ROW_WIDTH = 16,
parameter RANKS = 4,
parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN"
)
(/*AUTOARG*/
// Outputs
app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority,
rd_accepted, wr_accepted, data_buf_addr,
// Inputs
rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd,
app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r
);
input rst;
input clk;
input accept_ns;
input rd_buf_full;
input wr_req_16;
wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16;
reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */;
always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns;
output wire app_rdy;
assign app_rdy = app_rdy_r;
input [ADDR_WIDTH-1:0] app_addr;
input [2:0] app_cmd;
input app_sz;
input app_hi_pri;
input app_en;
reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}};
reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}};
reg [2:0] app_cmd_r1;
reg [2:0] app_cmd_r2;
reg app_sz_r1;
reg app_sz_r2;
reg app_hi_pri_r1;
reg app_hi_pri_r2;
reg app_en_r1;
reg app_en_r2;
wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1;
wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2;
wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1;
wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2;
wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1;
wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2;
wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1;
wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2;
wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1);
wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2);
always @(posedge clk) begin
if (rst) begin
app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}};
app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}};
end else begin
app_addr_r1 <= #TCQ app_addr_ns1;
app_addr_r2 <= #TCQ app_addr_ns2;
end
app_cmd_r1 <= #TCQ app_cmd_ns1;
app_cmd_r2 <= #TCQ app_cmd_ns2;
app_sz_r1 <= #TCQ app_sz_ns1;
app_sz_r2 <= #TCQ app_sz_ns2;
app_hi_pri_r1 <= #TCQ app_hi_pri_ns1;
app_hi_pri_r2 <= #TCQ app_hi_pri_ns2;
app_en_r1 <= #TCQ app_en_ns1;
app_en_r2 <= #TCQ app_en_ns2;
end // always @ (posedge clk)
wire use_addr_lcl = app_en_r2 && app_rdy_r;
output wire use_addr;
assign use_addr = use_addr_lcl;
output wire [RANK_WIDTH-1:0] rank;
output wire [BANK_WIDTH-1:0] bank;
output wire [ROW_WIDTH-1:0] row;
output wire [COL_WIDTH-1:0] col;
output wire size;
output wire [2:0] cmd;
output wire hi_priority;
/* assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];*/
generate
begin
if (MEM_ADDR_ORDER == "TG_TEST")
begin
assign col[4:0] = app_rdy_r
? app_addr_r1[0+:5]
: app_addr_r2[0+:5];
if (RANKS==1)
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7];
end
else
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7];
end
assign row[2:0] = app_rdy_r
? app_addr_r1[5+:3]
: app_addr_r2[5+:3];
if (RANKS==1)
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
else
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
assign bank = app_rdy_r
? app_addr_r1[5+3+:BANK_WIDTH]
: app_addr_r2[5+3+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH];
end
else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN")
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
else
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
end
endgenerate
/* assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/
assign size = app_rdy_r
? app_sz_r1
: app_sz_r2;
assign cmd = app_rdy_r
? app_cmd_r1
: app_cmd_r2;
assign hi_priority = app_rdy_r
? app_hi_pri_r1
: app_hi_pri_r2;
wire request_accepted = use_addr_lcl && app_rdy_r;
wire rd = app_cmd_r2[1:0] == 2'b01;
wire wr = app_cmd_r2[1:0] == 2'b00;
wire wr_bytes = app_cmd_r2[1:0] == 2'b11;
wire write = wr || wr_bytes;
output wire rd_accepted;
assign rd_accepted = request_accepted && rd;
output wire wr_accepted;
assign wr_accepted = request_accepted && write;
input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r;
output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr;
assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr;
endmodule
|
module mig_7series_v2_3_ui_cmd #
(
parameter TCQ = 100,
parameter ADDR_WIDTH = 33,
parameter BANK_WIDTH = 3,
parameter COL_WIDTH = 12,
parameter DATA_BUF_ADDR_WIDTH = 5,
parameter RANK_WIDTH = 2,
parameter ROW_WIDTH = 16,
parameter RANKS = 4,
parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN"
)
(/*AUTOARG*/
// Outputs
app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority,
rd_accepted, wr_accepted, data_buf_addr,
// Inputs
rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd,
app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r
);
input rst;
input clk;
input accept_ns;
input rd_buf_full;
input wr_req_16;
wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16;
reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */;
always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns;
output wire app_rdy;
assign app_rdy = app_rdy_r;
input [ADDR_WIDTH-1:0] app_addr;
input [2:0] app_cmd;
input app_sz;
input app_hi_pri;
input app_en;
reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}};
reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}};
reg [2:0] app_cmd_r1;
reg [2:0] app_cmd_r2;
reg app_sz_r1;
reg app_sz_r2;
reg app_hi_pri_r1;
reg app_hi_pri_r2;
reg app_en_r1;
reg app_en_r2;
wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1;
wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2;
wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1;
wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2;
wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1;
wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2;
wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1;
wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2;
wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1);
wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2);
always @(posedge clk) begin
if (rst) begin
app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}};
app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}};
end else begin
app_addr_r1 <= #TCQ app_addr_ns1;
app_addr_r2 <= #TCQ app_addr_ns2;
end
app_cmd_r1 <= #TCQ app_cmd_ns1;
app_cmd_r2 <= #TCQ app_cmd_ns2;
app_sz_r1 <= #TCQ app_sz_ns1;
app_sz_r2 <= #TCQ app_sz_ns2;
app_hi_pri_r1 <= #TCQ app_hi_pri_ns1;
app_hi_pri_r2 <= #TCQ app_hi_pri_ns2;
app_en_r1 <= #TCQ app_en_ns1;
app_en_r2 <= #TCQ app_en_ns2;
end // always @ (posedge clk)
wire use_addr_lcl = app_en_r2 && app_rdy_r;
output wire use_addr;
assign use_addr = use_addr_lcl;
output wire [RANK_WIDTH-1:0] rank;
output wire [BANK_WIDTH-1:0] bank;
output wire [ROW_WIDTH-1:0] row;
output wire [COL_WIDTH-1:0] col;
output wire size;
output wire [2:0] cmd;
output wire hi_priority;
/* assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];*/
generate
begin
if (MEM_ADDR_ORDER == "TG_TEST")
begin
assign col[4:0] = app_rdy_r
? app_addr_r1[0+:5]
: app_addr_r2[0+:5];
if (RANKS==1)
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7];
end
else
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7];
end
assign row[2:0] = app_rdy_r
? app_addr_r1[5+:3]
: app_addr_r2[5+:3];
if (RANKS==1)
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
else
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
assign bank = app_rdy_r
? app_addr_r1[5+3+:BANK_WIDTH]
: app_addr_r2[5+3+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH];
end
else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN")
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
else
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
end
endgenerate
/* assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/
assign size = app_rdy_r
? app_sz_r1
: app_sz_r2;
assign cmd = app_rdy_r
? app_cmd_r1
: app_cmd_r2;
assign hi_priority = app_rdy_r
? app_hi_pri_r1
: app_hi_pri_r2;
wire request_accepted = use_addr_lcl && app_rdy_r;
wire rd = app_cmd_r2[1:0] == 2'b01;
wire wr = app_cmd_r2[1:0] == 2'b00;
wire wr_bytes = app_cmd_r2[1:0] == 2'b11;
wire write = wr || wr_bytes;
output wire rd_accepted;
assign rd_accepted = request_accepted && rd;
output wire wr_accepted;
assign wr_accepted = request_accepted && write;
input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r;
output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr;
assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr;
endmodule
|
module mig_7series_v2_3_ui_cmd #
(
parameter TCQ = 100,
parameter ADDR_WIDTH = 33,
parameter BANK_WIDTH = 3,
parameter COL_WIDTH = 12,
parameter DATA_BUF_ADDR_WIDTH = 5,
parameter RANK_WIDTH = 2,
parameter ROW_WIDTH = 16,
parameter RANKS = 4,
parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN"
)
(/*AUTOARG*/
// Outputs
app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority,
rd_accepted, wr_accepted, data_buf_addr,
// Inputs
rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd,
app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r
);
input rst;
input clk;
input accept_ns;
input rd_buf_full;
input wr_req_16;
wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16;
reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */;
always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns;
output wire app_rdy;
assign app_rdy = app_rdy_r;
input [ADDR_WIDTH-1:0] app_addr;
input [2:0] app_cmd;
input app_sz;
input app_hi_pri;
input app_en;
reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}};
reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}};
reg [2:0] app_cmd_r1;
reg [2:0] app_cmd_r2;
reg app_sz_r1;
reg app_sz_r2;
reg app_hi_pri_r1;
reg app_hi_pri_r2;
reg app_en_r1;
reg app_en_r2;
wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1;
wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2;
wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1;
wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2;
wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1;
wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2;
wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1;
wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2;
wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1);
wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2);
always @(posedge clk) begin
if (rst) begin
app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}};
app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}};
end else begin
app_addr_r1 <= #TCQ app_addr_ns1;
app_addr_r2 <= #TCQ app_addr_ns2;
end
app_cmd_r1 <= #TCQ app_cmd_ns1;
app_cmd_r2 <= #TCQ app_cmd_ns2;
app_sz_r1 <= #TCQ app_sz_ns1;
app_sz_r2 <= #TCQ app_sz_ns2;
app_hi_pri_r1 <= #TCQ app_hi_pri_ns1;
app_hi_pri_r2 <= #TCQ app_hi_pri_ns2;
app_en_r1 <= #TCQ app_en_ns1;
app_en_r2 <= #TCQ app_en_ns2;
end // always @ (posedge clk)
wire use_addr_lcl = app_en_r2 && app_rdy_r;
output wire use_addr;
assign use_addr = use_addr_lcl;
output wire [RANK_WIDTH-1:0] rank;
output wire [BANK_WIDTH-1:0] bank;
output wire [ROW_WIDTH-1:0] row;
output wire [COL_WIDTH-1:0] col;
output wire size;
output wire [2:0] cmd;
output wire hi_priority;
/* assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];*/
generate
begin
if (MEM_ADDR_ORDER == "TG_TEST")
begin
assign col[4:0] = app_rdy_r
? app_addr_r1[0+:5]
: app_addr_r2[0+:5];
if (RANKS==1)
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7];
end
else
begin
assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2];
assign col[COL_WIDTH-3:5] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7];
end
assign row[2:0] = app_rdy_r
? app_addr_r1[5+:3]
: app_addr_r2[5+:3];
if (RANKS==1)
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
else
begin
assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2];
assign row[ROW_WIDTH-3:3] = app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]
: app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5];
end
assign bank = app_rdy_r
? app_addr_r1[5+3+:BANK_WIDTH]
: app_addr_r2[5+3+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH];
end
else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN")
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
else
begin
assign col = app_rdy_r
? app_addr_r1[0+:COL_WIDTH]
: app_addr_r2[0+:COL_WIDTH];
assign row = app_rdy_r
? app_addr_r1[COL_WIDTH+:ROW_WIDTH]
: app_addr_r2[COL_WIDTH+:ROW_WIDTH];
assign bank = app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH];
assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
end
end
endgenerate
/* assign rank = (RANKS == 1)
? 1'b0
: app_rdy_r
? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]
: app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/
assign size = app_rdy_r
? app_sz_r1
: app_sz_r2;
assign cmd = app_rdy_r
? app_cmd_r1
: app_cmd_r2;
assign hi_priority = app_rdy_r
? app_hi_pri_r1
: app_hi_pri_r2;
wire request_accepted = use_addr_lcl && app_rdy_r;
wire rd = app_cmd_r2[1:0] == 2'b01;
wire wr = app_cmd_r2[1:0] == 2'b00;
wire wr_bytes = app_cmd_r2[1:0] == 2'b11;
wire write = wr || wr_bytes;
output wire rd_accepted;
assign rd_accepted = request_accepted && rd;
output wire wr_accepted;
assign wr_accepted = request_accepted && write;
input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r;
output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr;
assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr;
endmodule
|
module outputs)
wire act_wait_r; // From bank_state0 of bank_state.v
wire allow_auto_pre; // From bank_state0 of bank_state.v
wire auto_pre_r; // From bank_queue0 of bank_queue.v
wire bank_wait_in_progress; // From bank_state0 of bank_state.v
wire order_q_zero; // From bank_queue0 of bank_queue.v
wire pass_open_bank_ns; // From bank_queue0 of bank_queue.v
wire pass_open_bank_r; // From bank_queue0 of bank_queue.v
wire pre_wait_r; // From bank_state0 of bank_state.v
wire precharge_bm_end; // From bank_state0 of bank_state.v
wire q_has_priority; // From bank_queue0 of bank_queue.v
wire q_has_rd; // From bank_queue0 of bank_queue.v
wire [nBANK_MACHS*2-1:0] rb_hit_busies_r; // From bank_queue0 of bank_queue.v
wire rcv_open_bank; // From bank_queue0 of bank_queue.v
wire rd_half_rmw; // From bank_state0 of bank_state.v
wire req_priority_r; // From bank_compare0 of bank_compare.v
wire row_hit_r; // From bank_compare0 of bank_compare.v
wire tail_r; // From bank_queue0 of bank_queue.v
wire wait_for_maint_r; // From bank_queue0 of bank_queue.v
// End of automatics
output idle_ns;
output req_wr_r;
output rd_wr_r;
output bm_end;
output idle_r;
output head_r;
output [RANK_WIDTH-1:0] req_rank_r;
output rb_hit_busy_r;
output passing_open_bank;
output maint_hit;
output [DATA_BUF_ADDR_WIDTH-1:0] req_data_buf_addr_r;
mig_7series_v2_3_bank_compare #
(/*AUTOINSTPARAM*/
// Parameters
.BANK_WIDTH (BANK_WIDTH),
.TCQ (TCQ),
.BURST_MODE (BURST_MODE),
.COL_WIDTH (COL_WIDTH),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.ECC (ECC),
.RANK_WIDTH (RANK_WIDTH),
.RANKS (RANKS),
.ROW_WIDTH (ROW_WIDTH))
bank_compare0
(/*AUTOINST*/
// Outputs
.req_data_buf_addr_r (req_data_buf_addr_r[DATA_BUF_ADDR_WIDTH-1:0]),
.req_periodic_rd_r (req_periodic_rd_r),
.req_size_r (req_size_r),
.rd_wr_r (rd_wr_r),
.req_rank_r (req_rank_r[RANK_WIDTH-1:0]),
.req_bank_r (req_bank_r[BANK_WIDTH-1:0]),
.req_row_r (req_row_r[ROW_WIDTH-1:0]),
.req_wr_r (req_wr_r),
.req_priority_r (req_priority_r),
.rb_hit_busy_r (rb_hit_busy_r),
.rb_hit_busy_ns (rb_hit_busy_ns),
.row_hit_r (row_hit_r),
.maint_hit (maint_hit),
.col_addr (col_addr[ROW_WIDTH-1:0]),
.req_ras (req_ras),
.req_cas (req_cas),
.row_cmd_wr (row_cmd_wr),
.row_addr (row_addr[ROW_WIDTH-1:0]),
.rank_busy_r (rank_busy_r[RANKS-1:0]),
// Inputs
.clk (clk),
.idle_ns (idle_ns),
.idle_r (idle_r),
.data_buf_addr (data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.periodic_rd_insert (periodic_rd_insert),
.size (size),
.cmd (cmd[2:0]),
.sending_col (sending_col),
.rank (rank[RANK_WIDTH-1:0]),
.periodic_rd_rank_r (periodic_rd_rank_r[RANK_WIDTH-1:0]),
.bank (bank[BANK_WIDTH-1:0]),
.row (row[ROW_WIDTH-1:0]),
.col (col[COL_WIDTH-1:0]),
.hi_priority (hi_priority),
.maint_rank_r (maint_rank_r[RANK_WIDTH-1:0]),
.maint_zq_r (maint_zq_r),
.maint_sre_r (maint_sre_r),
.auto_pre_r (auto_pre_r),
.rd_half_rmw (rd_half_rmw),
.act_wait_r (act_wait_r));
mig_7series_v2_3_bank_state #
(/*AUTOINSTPARAM*/
// Parameters
.TCQ (TCQ),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BM_CNT_WIDTH (BM_CNT_WIDTH),
.BURST_MODE (BURST_MODE),
.CWL (CWL),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.DRAM_TYPE (DRAM_TYPE),
.ECC (ECC),
.ID (ID),
.nBANK_MACHS (nBANK_MACHS),
.nCK_PER_CLK (nCK_PER_CLK),
.nOP_WAIT (nOP_WAIT),
.nRAS_CLKS (nRAS_CLKS),
.nRP (nRP),
.nRTP (nRTP),
.nRCD (nRCD),
.nWTP_CLKS (nWTP_CLKS),
.ORDERING (ORDERING),
.RANKS (RANKS),
.RANK_WIDTH (RANK_WIDTH),
.RAS_TIMER_WIDTH (RAS_TIMER_WIDTH),
.STARVE_LIMIT (STARVE_LIMIT))
bank_state0
(/*AUTOINST*/
// Outputs
.start_rcd (start_rcd),
.act_wait_r (act_wait_r),
.rd_half_rmw (rd_half_rmw),
.ras_timer_ns (ras_timer_ns[RAS_TIMER_WIDTH-1:0]),
.end_rtp (end_rtp),
.bank_wait_in_progress (bank_wait_in_progress),
.start_pre_wait (start_pre_wait),
.op_exit_req (op_exit_req),
.pre_wait_r (pre_wait_r),
.allow_auto_pre (allow_auto_pre),
.precharge_bm_end (precharge_bm_end),
.demand_act_priority (demand_act_priority),
.rts_row (rts_row),
.rts_pre (rts_pre),
.act_this_rank_r (act_this_rank_r[RANKS-1:0]),
.demand_priority (demand_priority),
.col_rdy_wr (col_rdy_wr),
.rts_col (rts_col),
.wr_this_rank_r (wr_this_rank_r[RANKS-1:0]),
.rd_this_rank_r (rd_this_rank_r[RANKS-1:0]),
// Inputs
.clk (clk),
.rst (rst),
.bm_end (bm_end),
.pass_open_bank_r (pass_open_bank_r),
.sending_row (sending_row),
.sending_pre (sending_pre),
.rcv_open_bank (rcv_open_bank),
.sending_col (sending_col),
.rd_wr_r (rd_wr_r),
.req_wr_r (req_wr_r),
.rd_data_addr (rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.req_data_buf_addr_r (req_data_buf_addr_r[DATA_BUF_ADDR_WIDTH-1:0]),
.phy_rddata_valid (phy_rddata_valid),
.rd_rmw (rd_rmw),
.ras_timer_ns_in (ras_timer_ns_in[(2*(RAS_TIMER_WIDTH*nBANK_MACHS))-1:0]),
.rb_hit_busies_r (rb_hit_busies_r[(nBANK_MACHS*2)-1:0]),
.idle_r (idle_r),
.passing_open_bank (passing_open_bank),
.low_idle_cnt_r (low_idle_cnt_r),
.op_exit_grant (op_exit_grant),
.tail_r (tail_r),
.auto_pre_r (auto_pre_r),
.pass_open_bank_ns (pass_open_bank_ns),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_data_full (phy_mc_data_full),
.rnk_config (rnk_config[RANK_WIDTH-1:0]),
.rnk_config_strobe (rnk_config_strobe),
.rnk_config_kill_rts_col (rnk_config_kill_rts_col),
.rnk_config_valid_r (rnk_config_valid_r),
.rtc (rtc),
.req_rank_r (req_rank_r[RANK_WIDTH-1:0]),
.req_rank_r_in (req_rank_r_in[(RANK_WIDTH*nBANK_MACHS*2)-1:0]),
.start_rcd_in (start_rcd_in[(nBANK_MACHS*2)-1:0]),
.inhbt_act_faw_r (inhbt_act_faw_r[RANKS-1:0]),
.wait_for_maint_r (wait_for_maint_r),
.head_r (head_r),
.sent_row (sent_row),
.demand_act_priority_in (demand_act_priority_in[(nBANK_MACHS*2)-1:0]),
.order_q_zero (order_q_zero),
.sent_col (sent_col),
.q_has_rd (q_has_rd),
.q_has_priority (q_has_priority),
.req_priority_r (req_priority_r),
.idle_ns (idle_ns),
.demand_priority_in (demand_priority_in[(nBANK_MACHS*2)-1:0]),
.inhbt_rd (inhbt_rd[RANKS-1:0]),
.inhbt_wr (inhbt_wr[RANKS-1:0]),
.dq_busy_data (dq_busy_data));
mig_7series_v2_3_bank_queue #
(/*AUTOINSTPARAM*/
// Parameters
.TCQ (TCQ),
.BM_CNT_WIDTH (BM_CNT_WIDTH),
.nBANK_MACHS (nBANK_MACHS),
.ORDERING (ORDERING),
.ID (ID))
bank_queue0
(/*AUTOINST*/
// Outputs
.head_r (head_r),
.tail_r (tail_r),
.idle_ns (idle_ns),
.idle_r (idle_r),
.pass_open_bank_ns (pass_open_bank_ns),
.pass_open_bank_r (pass_open_bank_r),
.auto_pre_r (auto_pre_r),
.bm_end (bm_end),
.passing_open_bank (passing_open_bank),
.ordered_issued (ordered_issued),
.ordered_r (ordered_r),
.order_q_zero (order_q_zero),
.rcv_open_bank (rcv_open_bank),
.rb_hit_busies_r (rb_hit_busies_r[nBANK_MACHS*2-1:0]),
.q_has_rd (q_has_rd),
.q_has_priority (q_has_priority),
.wait_for_maint_r (wait_for_maint_r),
// Inputs
.clk (clk),
.rst (rst),
.accept_internal_r (accept_internal_r),
.use_addr (use_addr),
.periodic_rd_ack_r (periodic_rd_ack_r),
.bm_end_in (bm_end_in[(nBANK_MACHS*2)-1:0]),
.idle_cnt (idle_cnt[BM_CNT_WIDTH-1:0]),
.rb_hit_busy_cnt (rb_hit_busy_cnt[BM_CNT_WIDTH-1:0]),
.accept_req (accept_req),
.rb_hit_busy_r (rb_hit_busy_r),
.maint_idle (maint_idle),
.maint_hit (maint_hit),
.row_hit_r (row_hit_r),
.pre_wait_r (pre_wait_r),
.allow_auto_pre (allow_auto_pre),
.sending_col (sending_col),
.req_wr_r (req_wr_r),
.rd_wr_r (rd_wr_r),
.bank_wait_in_progress (bank_wait_in_progress),
.precharge_bm_end (precharge_bm_end),
.adv_order_q (adv_order_q),
.order_cnt (order_cnt[BM_CNT_WIDTH-1:0]),
.rb_hit_busy_ns_in (rb_hit_busy_ns_in[(nBANK_MACHS*2)-1:0]),
.passing_open_bank_in (passing_open_bank_in[(nBANK_MACHS*2)-1:0]),
.was_wr (was_wr),
.maint_req_r (maint_req_r),
.was_priority (was_priority));
endmodule
|
module outputs)
wire act_wait_r; // From bank_state0 of bank_state.v
wire allow_auto_pre; // From bank_state0 of bank_state.v
wire auto_pre_r; // From bank_queue0 of bank_queue.v
wire bank_wait_in_progress; // From bank_state0 of bank_state.v
wire order_q_zero; // From bank_queue0 of bank_queue.v
wire pass_open_bank_ns; // From bank_queue0 of bank_queue.v
wire pass_open_bank_r; // From bank_queue0 of bank_queue.v
wire pre_wait_r; // From bank_state0 of bank_state.v
wire precharge_bm_end; // From bank_state0 of bank_state.v
wire q_has_priority; // From bank_queue0 of bank_queue.v
wire q_has_rd; // From bank_queue0 of bank_queue.v
wire [nBANK_MACHS*2-1:0] rb_hit_busies_r; // From bank_queue0 of bank_queue.v
wire rcv_open_bank; // From bank_queue0 of bank_queue.v
wire rd_half_rmw; // From bank_state0 of bank_state.v
wire req_priority_r; // From bank_compare0 of bank_compare.v
wire row_hit_r; // From bank_compare0 of bank_compare.v
wire tail_r; // From bank_queue0 of bank_queue.v
wire wait_for_maint_r; // From bank_queue0 of bank_queue.v
// End of automatics
output idle_ns;
output req_wr_r;
output rd_wr_r;
output bm_end;
output idle_r;
output head_r;
output [RANK_WIDTH-1:0] req_rank_r;
output rb_hit_busy_r;
output passing_open_bank;
output maint_hit;
output [DATA_BUF_ADDR_WIDTH-1:0] req_data_buf_addr_r;
mig_7series_v2_3_bank_compare #
(/*AUTOINSTPARAM*/
// Parameters
.BANK_WIDTH (BANK_WIDTH),
.TCQ (TCQ),
.BURST_MODE (BURST_MODE),
.COL_WIDTH (COL_WIDTH),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.ECC (ECC),
.RANK_WIDTH (RANK_WIDTH),
.RANKS (RANKS),
.ROW_WIDTH (ROW_WIDTH))
bank_compare0
(/*AUTOINST*/
// Outputs
.req_data_buf_addr_r (req_data_buf_addr_r[DATA_BUF_ADDR_WIDTH-1:0]),
.req_periodic_rd_r (req_periodic_rd_r),
.req_size_r (req_size_r),
.rd_wr_r (rd_wr_r),
.req_rank_r (req_rank_r[RANK_WIDTH-1:0]),
.req_bank_r (req_bank_r[BANK_WIDTH-1:0]),
.req_row_r (req_row_r[ROW_WIDTH-1:0]),
.req_wr_r (req_wr_r),
.req_priority_r (req_priority_r),
.rb_hit_busy_r (rb_hit_busy_r),
.rb_hit_busy_ns (rb_hit_busy_ns),
.row_hit_r (row_hit_r),
.maint_hit (maint_hit),
.col_addr (col_addr[ROW_WIDTH-1:0]),
.req_ras (req_ras),
.req_cas (req_cas),
.row_cmd_wr (row_cmd_wr),
.row_addr (row_addr[ROW_WIDTH-1:0]),
.rank_busy_r (rank_busy_r[RANKS-1:0]),
// Inputs
.clk (clk),
.idle_ns (idle_ns),
.idle_r (idle_r),
.data_buf_addr (data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.periodic_rd_insert (periodic_rd_insert),
.size (size),
.cmd (cmd[2:0]),
.sending_col (sending_col),
.rank (rank[RANK_WIDTH-1:0]),
.periodic_rd_rank_r (periodic_rd_rank_r[RANK_WIDTH-1:0]),
.bank (bank[BANK_WIDTH-1:0]),
.row (row[ROW_WIDTH-1:0]),
.col (col[COL_WIDTH-1:0]),
.hi_priority (hi_priority),
.maint_rank_r (maint_rank_r[RANK_WIDTH-1:0]),
.maint_zq_r (maint_zq_r),
.maint_sre_r (maint_sre_r),
.auto_pre_r (auto_pre_r),
.rd_half_rmw (rd_half_rmw),
.act_wait_r (act_wait_r));
mig_7series_v2_3_bank_state #
(/*AUTOINSTPARAM*/
// Parameters
.TCQ (TCQ),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.BM_CNT_WIDTH (BM_CNT_WIDTH),
.BURST_MODE (BURST_MODE),
.CWL (CWL),
.DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH),
.DRAM_TYPE (DRAM_TYPE),
.ECC (ECC),
.ID (ID),
.nBANK_MACHS (nBANK_MACHS),
.nCK_PER_CLK (nCK_PER_CLK),
.nOP_WAIT (nOP_WAIT),
.nRAS_CLKS (nRAS_CLKS),
.nRP (nRP),
.nRTP (nRTP),
.nRCD (nRCD),
.nWTP_CLKS (nWTP_CLKS),
.ORDERING (ORDERING),
.RANKS (RANKS),
.RANK_WIDTH (RANK_WIDTH),
.RAS_TIMER_WIDTH (RAS_TIMER_WIDTH),
.STARVE_LIMIT (STARVE_LIMIT))
bank_state0
(/*AUTOINST*/
// Outputs
.start_rcd (start_rcd),
.act_wait_r (act_wait_r),
.rd_half_rmw (rd_half_rmw),
.ras_timer_ns (ras_timer_ns[RAS_TIMER_WIDTH-1:0]),
.end_rtp (end_rtp),
.bank_wait_in_progress (bank_wait_in_progress),
.start_pre_wait (start_pre_wait),
.op_exit_req (op_exit_req),
.pre_wait_r (pre_wait_r),
.allow_auto_pre (allow_auto_pre),
.precharge_bm_end (precharge_bm_end),
.demand_act_priority (demand_act_priority),
.rts_row (rts_row),
.rts_pre (rts_pre),
.act_this_rank_r (act_this_rank_r[RANKS-1:0]),
.demand_priority (demand_priority),
.col_rdy_wr (col_rdy_wr),
.rts_col (rts_col),
.wr_this_rank_r (wr_this_rank_r[RANKS-1:0]),
.rd_this_rank_r (rd_this_rank_r[RANKS-1:0]),
// Inputs
.clk (clk),
.rst (rst),
.bm_end (bm_end),
.pass_open_bank_r (pass_open_bank_r),
.sending_row (sending_row),
.sending_pre (sending_pre),
.rcv_open_bank (rcv_open_bank),
.sending_col (sending_col),
.rd_wr_r (rd_wr_r),
.req_wr_r (req_wr_r),
.rd_data_addr (rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0]),
.req_data_buf_addr_r (req_data_buf_addr_r[DATA_BUF_ADDR_WIDTH-1:0]),
.phy_rddata_valid (phy_rddata_valid),
.rd_rmw (rd_rmw),
.ras_timer_ns_in (ras_timer_ns_in[(2*(RAS_TIMER_WIDTH*nBANK_MACHS))-1:0]),
.rb_hit_busies_r (rb_hit_busies_r[(nBANK_MACHS*2)-1:0]),
.idle_r (idle_r),
.passing_open_bank (passing_open_bank),
.low_idle_cnt_r (low_idle_cnt_r),
.op_exit_grant (op_exit_grant),
.tail_r (tail_r),
.auto_pre_r (auto_pre_r),
.pass_open_bank_ns (pass_open_bank_ns),
.phy_mc_cmd_full (phy_mc_cmd_full),
.phy_mc_ctl_full (phy_mc_ctl_full),
.phy_mc_data_full (phy_mc_data_full),
.rnk_config (rnk_config[RANK_WIDTH-1:0]),
.rnk_config_strobe (rnk_config_strobe),
.rnk_config_kill_rts_col (rnk_config_kill_rts_col),
.rnk_config_valid_r (rnk_config_valid_r),
.rtc (rtc),
.req_rank_r (req_rank_r[RANK_WIDTH-1:0]),
.req_rank_r_in (req_rank_r_in[(RANK_WIDTH*nBANK_MACHS*2)-1:0]),
.start_rcd_in (start_rcd_in[(nBANK_MACHS*2)-1:0]),
.inhbt_act_faw_r (inhbt_act_faw_r[RANKS-1:0]),
.wait_for_maint_r (wait_for_maint_r),
.head_r (head_r),
.sent_row (sent_row),
.demand_act_priority_in (demand_act_priority_in[(nBANK_MACHS*2)-1:0]),
.order_q_zero (order_q_zero),
.sent_col (sent_col),
.q_has_rd (q_has_rd),
.q_has_priority (q_has_priority),
.req_priority_r (req_priority_r),
.idle_ns (idle_ns),
.demand_priority_in (demand_priority_in[(nBANK_MACHS*2)-1:0]),
.inhbt_rd (inhbt_rd[RANKS-1:0]),
.inhbt_wr (inhbt_wr[RANKS-1:0]),
.dq_busy_data (dq_busy_data));
mig_7series_v2_3_bank_queue #
(/*AUTOINSTPARAM*/
// Parameters
.TCQ (TCQ),
.BM_CNT_WIDTH (BM_CNT_WIDTH),
.nBANK_MACHS (nBANK_MACHS),
.ORDERING (ORDERING),
.ID (ID))
bank_queue0
(/*AUTOINST*/
// Outputs
.head_r (head_r),
.tail_r (tail_r),
.idle_ns (idle_ns),
.idle_r (idle_r),
.pass_open_bank_ns (pass_open_bank_ns),
.pass_open_bank_r (pass_open_bank_r),
.auto_pre_r (auto_pre_r),
.bm_end (bm_end),
.passing_open_bank (passing_open_bank),
.ordered_issued (ordered_issued),
.ordered_r (ordered_r),
.order_q_zero (order_q_zero),
.rcv_open_bank (rcv_open_bank),
.rb_hit_busies_r (rb_hit_busies_r[nBANK_MACHS*2-1:0]),
.q_has_rd (q_has_rd),
.q_has_priority (q_has_priority),
.wait_for_maint_r (wait_for_maint_r),
// Inputs
.clk (clk),
.rst (rst),
.accept_internal_r (accept_internal_r),
.use_addr (use_addr),
.periodic_rd_ack_r (periodic_rd_ack_r),
.bm_end_in (bm_end_in[(nBANK_MACHS*2)-1:0]),
.idle_cnt (idle_cnt[BM_CNT_WIDTH-1:0]),
.rb_hit_busy_cnt (rb_hit_busy_cnt[BM_CNT_WIDTH-1:0]),
.accept_req (accept_req),
.rb_hit_busy_r (rb_hit_busy_r),
.maint_idle (maint_idle),
.maint_hit (maint_hit),
.row_hit_r (row_hit_r),
.pre_wait_r (pre_wait_r),
.allow_auto_pre (allow_auto_pre),
.sending_col (sending_col),
.req_wr_r (req_wr_r),
.rd_wr_r (rd_wr_r),
.bank_wait_in_progress (bank_wait_in_progress),
.precharge_bm_end (precharge_bm_end),
.adv_order_q (adv_order_q),
.order_cnt (order_cnt[BM_CNT_WIDTH-1:0]),
.rb_hit_busy_ns_in (rb_hit_busy_ns_in[(nBANK_MACHS*2)-1:0]),
.passing_open_bank_in (passing_open_bank_in[(nBANK_MACHS*2)-1:0]),
.was_wr (was_wr),
.maint_req_r (maint_req_r),
.was_priority (was_priority));
endmodule
|
module mig_7series_v2_3_arb_select #
(
parameter TCQ = 100,
parameter EVEN_CWL_2T_MODE = "OFF",
parameter ADDR_CMD_MODE = "1T",
parameter BANK_VECT_INDX = 11,
parameter BANK_WIDTH = 3,
parameter BURST_MODE = "8",
parameter CS_WIDTH = 4,
parameter CL = 5,
parameter CWL = 5,
parameter DATA_BUF_ADDR_VECT_INDX = 31,
parameter DATA_BUF_ADDR_WIDTH = 8,
parameter DRAM_TYPE = "DDR3",
parameter EARLY_WR_DATA_ADDR = "OFF",
parameter ECC = "OFF",
parameter nBANK_MACHS = 4,
parameter nCK_PER_CLK = 2,
parameter nCS_PER_RANK = 1,
parameter CKE_ODT_AUX = "FALSE",
parameter nSLOTS = 2,
parameter RANKS = 1,
parameter RANK_VECT_INDX = 15,
parameter RANK_WIDTH = 2,
parameter ROW_VECT_INDX = 63,
parameter ROW_WIDTH = 16,
parameter RTT_NOM = "40",
parameter RTT_WR = "120",
parameter SLOT_0_CONFIG = 8'b0000_0101,
parameter SLOT_1_CONFIG = 8'b0000_1010
)
(
// Outputs
output wire col_periodic_rd,
output wire [RANK_WIDTH-1:0] col_ra,
output wire [BANK_WIDTH-1:0] col_ba,
output wire [ROW_WIDTH-1:0] col_a,
output wire col_rmw,
output wire col_rd_wr,
output wire col_size,
output wire [ROW_WIDTH-1:0] col_row,
output wire [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr,
output wire [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr,
output wire [nCK_PER_CLK-1:0] mc_ras_n,
output wire [nCK_PER_CLK-1:0] mc_cas_n,
output wire [nCK_PER_CLK-1:0] mc_we_n,
output wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address,
output wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank,
output wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n,
output wire [1:0] mc_odt,
output wire [nCK_PER_CLK-1:0] mc_cke,
output wire [3:0] mc_aux_out0,
output wire [3:0] mc_aux_out1,
output [2:0] mc_cmd,
output wire [5:0] mc_data_offset,
output wire [5:0] mc_data_offset_1,
output wire [5:0] mc_data_offset_2,
output wire [1:0] mc_cas_slot,
output wire [RANK_WIDTH-1:0] rnk_config,
// Inputs
input clk,
input rst,
input init_calib_complete,
input [RANK_VECT_INDX:0] req_rank_r,
input [BANK_VECT_INDX:0] req_bank_r,
input [nBANK_MACHS-1:0] req_ras,
input [nBANK_MACHS-1:0] req_cas,
input [nBANK_MACHS-1:0] req_wr_r,
input [nBANK_MACHS-1:0] grant_row_r,
input [nBANK_MACHS-1:0] grant_pre_r,
input [ROW_VECT_INDX:0] row_addr,
input [nBANK_MACHS-1:0] row_cmd_wr,
input insert_maint_r1,
input maint_zq_r,
input maint_sre_r,
input maint_srx_r,
input [RANK_WIDTH-1:0] maint_rank_r,
input [nBANK_MACHS-1:0] req_periodic_rd_r,
input [nBANK_MACHS-1:0] req_size_r,
input [nBANK_MACHS-1:0] rd_wr_r,
input [ROW_VECT_INDX:0] req_row_r,
input [ROW_VECT_INDX:0] col_addr,
input [DATA_BUF_ADDR_VECT_INDX:0] req_data_buf_addr_r,
input [nBANK_MACHS-1:0] grant_col_r,
input [nBANK_MACHS-1:0] grant_col_wr,
input [6*RANKS-1:0] calib_rddata_offset,
input [6*RANKS-1:0] calib_rddata_offset_1,
input [6*RANKS-1:0] calib_rddata_offset_2,
input [5:0] col_channel_offset,
input [nBANK_MACHS-1:0] grant_config_r,
input rnk_config_strobe,
input [7:0] slot_0_present,
input [7:0] slot_1_present,
input send_cmd0_row,
input send_cmd0_col,
input send_cmd1_row,
input send_cmd1_col,
input send_cmd2_row,
input send_cmd2_col,
input send_cmd2_pre,
input send_cmd3_col,
input sent_col,
input cs_en0,
input cs_en1,
input cs_en2,
input cs_en3
);
localparam OUT_CMD_WIDTH = RANK_WIDTH + BANK_WIDTH + ROW_WIDTH + 1 + 1 + 1;
reg col_rd_wr_ns;
reg col_rd_wr_r = 1'b0;
reg [OUT_CMD_WIDTH-1:0] col_cmd_r = {OUT_CMD_WIDTH {1'b0}};
reg [OUT_CMD_WIDTH-1:0] row_cmd_r = {OUT_CMD_WIDTH {1'b0}};
// calib_rd_data_offset for currently targeted rank
reg [5:0] rank_rddata_offset_0;
reg [5:0] rank_rddata_offset_1;
reg [5:0] rank_rddata_offset_2;
// Toggle CKE[0] when entering and exiting self-refresh, disable CKE[1]
assign mc_aux_out0[0] = (maint_sre_r || maint_srx_r) & insert_maint_r1;
assign mc_aux_out0[2] = 1'b0;
reg cke_r;
reg cke_ns;
generate
if(CKE_ODT_AUX == "FALSE")begin
always @(posedge clk)
begin
if (rst)
cke_r = 1'b1;
else
cke_r = cke_ns;
end
always @(*)
begin
cke_ns = 1'b1;
if (maint_sre_r & insert_maint_r1)
cke_ns = 1'b0;
else if (cke_r==1'b0)
begin
if (maint_srx_r & insert_maint_r1)
cke_ns = 1'b1;
else
cke_ns = 1'b0;
end
end
end
endgenerate
// Disable ODT & CKE toggle enable high bits
assign mc_aux_out1 = 4'b0;
// implement PHY command word
assign mc_cmd[0] = sent_col;
assign mc_cmd[1] = EVEN_CWL_2T_MODE == "ON" ?
sent_col && col_rd_wr_r :
sent_col && col_rd_wr_ns;
assign mc_cmd[2] = ~sent_col;
// generate calib_rd_data_offset for current rank - only use rank 0 values for now
always @(calib_rddata_offset or calib_rddata_offset_1 or calib_rddata_offset_2) begin
rank_rddata_offset_0 = calib_rddata_offset[5:0];
rank_rddata_offset_1 = calib_rddata_offset_1[5:0];
rank_rddata_offset_2 = calib_rddata_offset_2[5:0];
end
// generate data offset
generate
if(EVEN_CWL_2T_MODE == "ON") begin : gen_mc_data_offset_even_cwl_2t
assign mc_data_offset = ~sent_col ?
6'b0 :
col_rd_wr_r ?
rank_rddata_offset_0 + col_channel_offset :
nCK_PER_CLK == 2 ?
CWL - 2 + col_channel_offset :
// nCK_PER_CLK == 4
CWL + 2 + col_channel_offset;
assign mc_data_offset_1 = ~sent_col ?
6'b0 :
col_rd_wr_r ?
rank_rddata_offset_1 + col_channel_offset :
nCK_PER_CLK == 2 ?
CWL - 2 + col_channel_offset :
// nCK_PER_CLK == 4
CWL + 2 + col_channel_offset;
assign mc_data_offset_2 = ~sent_col ?
6'b0 :
col_rd_wr_r ?
rank_rddata_offset_2 + col_channel_offset :
nCK_PER_CLK == 2 ?
CWL - 2 + col_channel_offset :
// nCK_PER_CLK == 4
CWL + 2 + col_channel_offset;
end
else begin : gen_mc_data_offset_not_even_cwl_2t
assign mc_data_offset = ~sent_col ?
6'b0 :
col_rd_wr_ns ?
rank_rddata_offset_0 + col_channel_offset :
nCK_PER_CLK == 2 ?
CWL - 2 + col_channel_offset :
// nCK_PER_CLK == 4
CWL + 2 + col_channel_offset;
assign mc_data_offset_1 = ~sent_col ?
6'b0 :
col_rd_wr_ns ?
rank_rddata_offset_1 + col_channel_offset :
nCK_PER_CLK == 2 ?
CWL - 2 + col_channel_offset :
// nCK_PER_CLK == 4
CWL + 2 + col_channel_offset;
assign mc_data_offset_2 = ~sent_col ?
6'b0 :
col_rd_wr_ns ?
rank_rddata_offset_2 + col_channel_offset :
nCK_PER_CLK == 2 ?
CWL - 2 + col_channel_offset :
// nCK_PER_CLK == 4
CWL + 2 + col_channel_offset;
end
endgenerate
assign mc_cas_slot = col_channel_offset[1:0];
// Based on arbitration results, select the row and column commands.
integer i;
reg [OUT_CMD_WIDTH-1:0] row_cmd_ns;
generate
begin : row_mux
wire [OUT_CMD_WIDTH-1:0] maint_cmd =
{maint_rank_r, // maintenance rank
row_cmd_r[15+:(BANK_WIDTH+ROW_WIDTH-11)],
// bank plus upper address bits
1'b0, // A10 = 0 for ZQCS
row_cmd_r[3+:10], // address bits [9:0]
// ZQ, SRX or SRE/REFRESH
(maint_zq_r ? 3'b110 : maint_srx_r ? 3'b111 : 3'b001)
};
always @(/*AS*/grant_row_r or insert_maint_r1 or maint_cmd
or req_bank_r or req_cas or req_rank_r or req_ras
or row_addr or row_cmd_r or row_cmd_wr or rst)
begin
row_cmd_ns = rst
? {RANK_WIDTH{1'b0}}
: insert_maint_r1
? maint_cmd
: row_cmd_r;
for (i=0; i<nBANK_MACHS; i=i+1)
if (grant_row_r[i])
row_cmd_ns = {req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH],
req_bank_r[(BANK_WIDTH*i)+:BANK_WIDTH],
row_addr[(ROW_WIDTH*i)+:ROW_WIDTH],
req_ras[i],
req_cas[i],
row_cmd_wr[i]};
end
if (ADDR_CMD_MODE == "2T" && nCK_PER_CLK == 2)
always @(posedge clk) row_cmd_r <= #TCQ row_cmd_ns;
end // row_mux
endgenerate
reg [OUT_CMD_WIDTH-1:0] pre_cmd_ns;
generate
if((nCK_PER_CLK == 4) && (ADDR_CMD_MODE != "2T")) begin : pre_mux
reg [OUT_CMD_WIDTH-1:0] pre_cmd_r = {OUT_CMD_WIDTH {1'b0}};
always @(/*AS*/grant_pre_r or req_bank_r or req_cas or req_rank_r or req_ras
or row_addr or pre_cmd_r or row_cmd_wr or rst)
begin
pre_cmd_ns = rst
? {RANK_WIDTH{1'b0}}
: pre_cmd_r;
for (i=0; i<nBANK_MACHS; i=i+1)
if (grant_pre_r[i])
pre_cmd_ns = {req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH],
req_bank_r[(BANK_WIDTH*i)+:BANK_WIDTH],
row_addr[(ROW_WIDTH*i)+:ROW_WIDTH],
req_ras[i],
req_cas[i],
row_cmd_wr[i]};
end
end // pre_mux
endgenerate
reg [OUT_CMD_WIDTH-1:0] col_cmd_ns;
generate
begin : col_mux
reg col_periodic_rd_ns;
reg col_periodic_rd_r;
reg col_rmw_ns;
reg col_rmw_r;
reg col_size_ns;
reg col_size_r;
reg [ROW_WIDTH-1:0] col_row_ns;
reg [ROW_WIDTH-1:0] col_row_r;
reg [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr_ns;
reg [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr_r;
always @(col_addr or col_cmd_r or col_data_buf_addr_r
or col_periodic_rd_r or col_rmw_r or col_row_r
or col_size_r or grant_col_r or rd_wr_r or req_bank_r
or req_data_buf_addr_r or req_periodic_rd_r
or req_rank_r or req_row_r or req_size_r or req_wr_r
or rst or col_rd_wr_r)
begin
col_periodic_rd_ns = ~rst && col_periodic_rd_r;
col_cmd_ns = {(rst ? {RANK_WIDTH{1'b0}}
: col_cmd_r[(OUT_CMD_WIDTH-1)-:RANK_WIDTH]),
((rst && ECC != "OFF")
? {OUT_CMD_WIDTH-3-RANK_WIDTH{1'b0}}
: col_cmd_r[3+:(OUT_CMD_WIDTH-3-RANK_WIDTH)]),
(rst ? 3'b0 : col_cmd_r[2:0])};
col_rmw_ns = col_rmw_r;
col_size_ns = rst ? 1'b0 : col_size_r;
col_row_ns = col_row_r;
col_rd_wr_ns = col_rd_wr_r;
col_data_buf_addr_ns = col_data_buf_addr_r;
for (i=0; i<nBANK_MACHS; i=i+1)
if (grant_col_r[i]) begin
col_periodic_rd_ns = req_periodic_rd_r[i];
col_cmd_ns = {req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH],
req_bank_r[(BANK_WIDTH*i)+:BANK_WIDTH],
col_addr[(ROW_WIDTH*i)+:ROW_WIDTH],
1'b1,
1'b0,
rd_wr_r[i]};
col_rmw_ns = req_wr_r[i] && rd_wr_r[i];
col_size_ns = req_size_r[i];
col_row_ns = req_row_r[(ROW_WIDTH*i)+:ROW_WIDTH];
col_rd_wr_ns = rd_wr_r[i];
col_data_buf_addr_ns =
req_data_buf_addr_r[(DATA_BUF_ADDR_WIDTH*i)+:DATA_BUF_ADDR_WIDTH];
end
end // always @ (...
if (EARLY_WR_DATA_ADDR == "OFF") begin : early_wr_data_addr_off
assign col_wr_data_buf_addr = col_data_buf_addr_ns;
end
else begin : early_wr_data_addr_on
reg [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr_ns;
reg [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr_r;
always @(/*AS*/col_wr_data_buf_addr_r or grant_col_wr
or req_data_buf_addr_r) begin
col_wr_data_buf_addr_ns = col_wr_data_buf_addr_r;
for (i=0; i<nBANK_MACHS; i=i+1)
if (grant_col_wr[i])
col_wr_data_buf_addr_ns =
req_data_buf_addr_r[(DATA_BUF_ADDR_WIDTH*i)+:DATA_BUF_ADDR_WIDTH];
end
always @(posedge clk) col_wr_data_buf_addr_r <=
#TCQ col_wr_data_buf_addr_ns;
assign col_wr_data_buf_addr = col_wr_data_buf_addr_ns;
end
always @(posedge clk) col_periodic_rd_r <= #TCQ col_periodic_rd_ns;
always @(posedge clk) col_rmw_r <= #TCQ col_rmw_ns;
always @(posedge clk) col_size_r <= #TCQ col_size_ns;
always @(posedge clk) col_data_buf_addr_r <=
#TCQ col_data_buf_addr_ns;
if (ECC != "OFF" || EVEN_CWL_2T_MODE == "ON") begin
always @(posedge clk) col_cmd_r <= #TCQ col_cmd_ns;
always @(posedge clk) col_row_r <= #TCQ col_row_ns;
end
always @(posedge clk) col_rd_wr_r <= #TCQ col_rd_wr_ns;
if(EVEN_CWL_2T_MODE == "ON") begin
assign col_periodic_rd = col_periodic_rd_r;
assign col_ra = col_cmd_r[3+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
assign col_ba = col_cmd_r[3+ROW_WIDTH+:BANK_WIDTH];
assign col_a = col_cmd_r[3+:ROW_WIDTH];
assign col_rmw = col_rmw_r;
assign col_rd_wr = col_rd_wr_r;
assign col_size = col_size_r;
assign col_row = col_row_r;
assign col_data_buf_addr = col_data_buf_addr_r;
end
else begin
assign col_periodic_rd = col_periodic_rd_ns;
assign col_ra = col_cmd_ns[3+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];
assign col_ba = col_cmd_ns[3+ROW_WIDTH+:BANK_WIDTH];
assign col_a = col_cmd_ns[3+:ROW_WIDTH];
assign col_rmw = col_rmw_ns;
assign col_rd_wr = col_rd_wr_ns;
assign col_size = col_size_ns;
assign col_row = col_row_ns;
assign col_data_buf_addr = col_data_buf_addr_ns;
end
end // col_mux
endgenerate
reg [OUT_CMD_WIDTH-1:0] cmd0 = {OUT_CMD_WIDTH{1'b1}};
reg cke0;
always @(send_cmd0_row or send_cmd0_col or row_cmd_ns or row_cmd_r or col_cmd_ns or col_cmd_r or cke_ns or cke_r ) begin
cmd0 = {OUT_CMD_WIDTH{1'b1}};
if (send_cmd0_row) cmd0 = row_cmd_ns;
if (send_cmd0_row && EVEN_CWL_2T_MODE == "ON" && nCK_PER_CLK == 2) cmd0 = row_cmd_r;
if (send_cmd0_col) cmd0 = col_cmd_ns;
if (send_cmd0_col && EVEN_CWL_2T_MODE == "ON") cmd0 = col_cmd_r;
if (send_cmd0_row) cke0 = cke_ns;
else cke0 = cke_r ;
end
reg [OUT_CMD_WIDTH-1:0] cmd1 = {OUT_CMD_WIDTH{1'b1}};
generate
if ((nCK_PER_CLK == 2) || (nCK_PER_CLK == 4))
always @(send_cmd1_row or send_cmd1_col or row_cmd_ns or col_cmd_ns or pre_cmd_ns) begin
cmd1 = {OUT_CMD_WIDTH{1'b1}};
if (send_cmd1_row) cmd1 = row_cmd_ns;
if (send_cmd1_col) cmd1 = col_cmd_ns;
end
endgenerate
reg [OUT_CMD_WIDTH-1:0] cmd2 = {OUT_CMD_WIDTH{1'b1}};
reg [OUT_CMD_WIDTH-1:0] cmd3 = {OUT_CMD_WIDTH{1'b1}};
generate
if (nCK_PER_CLK == 4)
always @(send_cmd2_row or send_cmd2_col or send_cmd2_pre or send_cmd3_col or row_cmd_ns or col_cmd_ns or pre_cmd_ns) begin
cmd2 = {OUT_CMD_WIDTH{1'b1}};
cmd3 = {OUT_CMD_WIDTH{1'b1}};
if (send_cmd2_row) cmd2 = row_cmd_ns;
if (send_cmd2_col) cmd2 = col_cmd_ns;
if (send_cmd2_pre) cmd2 = pre_cmd_ns;
if (send_cmd3_col) cmd3 = col_cmd_ns;
end
endgenerate
// Output command bus 0.
wire [RANK_WIDTH-1:0] ra0;
// assign address
assign {ra0, mc_bank[BANK_WIDTH-1:0], mc_address[ROW_WIDTH-1:0], mc_ras_n[0], mc_cas_n[0], mc_we_n[0]} = cmd0;
// Output command bus 1.
wire [RANK_WIDTH-1:0] ra1;
// assign address
assign {ra1, mc_bank[2*BANK_WIDTH-1:BANK_WIDTH], mc_address[2*ROW_WIDTH-1:ROW_WIDTH], mc_ras_n[1], mc_cas_n[1], mc_we_n[1]} = cmd1;
wire [RANK_WIDTH-1:0] ra2;
wire [RANK_WIDTH-1:0] ra3;
generate
if(nCK_PER_CLK == 4) begin
// Output command bus 2.
// assign address
assign {ra2, mc_bank[3*BANK_WIDTH-1:2*BANK_WIDTH], mc_address[3*ROW_WIDTH-1:2*ROW_WIDTH], mc_ras_n[2], mc_cas_n[2], mc_we_n[2]} = cmd2;
// Output command bus 3.
// assign address
assign {ra3, mc_bank[4*BANK_WIDTH-1:3*BANK_WIDTH], mc_address[4*ROW_WIDTH-1:3*ROW_WIDTH], mc_ras_n[3], mc_cas_n[3], mc_we_n[3]} =
cmd3;
end
endgenerate
generate
if(CKE_ODT_AUX == "FALSE")begin
assign mc_cke[0] = cke0;
assign mc_cke[1] = cke_ns;
if(nCK_PER_CLK == 4) begin
assign mc_cke[2] = cke_ns;
assign mc_cke[3] = cke_ns;
end
end
endgenerate
// Output cs busses.
localparam ONE = {nCS_PER_RANK{1'b1}};
wire [(CS_WIDTH*nCS_PER_RANK)-1:0] cs_one_hot =
{{CS_WIDTH{1'b0}},ONE};
assign mc_cs_n[CS_WIDTH*nCS_PER_RANK -1 :0 ] =
{(~(cs_one_hot << (nCS_PER_RANK*ra0)) | {CS_WIDTH*nCS_PER_RANK{~cs_en0}})};
assign mc_cs_n[2*CS_WIDTH*nCS_PER_RANK -1 : CS_WIDTH*nCS_PER_RANK ] =
{(~(cs_one_hot << (nCS_PER_RANK*ra1)) | {CS_WIDTH*nCS_PER_RANK{~cs_en1}})};
generate
if(nCK_PER_CLK == 4) begin
assign mc_cs_n[3*CS_WIDTH*nCS_PER_RANK -1 :2*CS_WIDTH*nCS_PER_RANK ] =
{(~(cs_one_hot << (nCS_PER_RANK*ra2)) | {CS_WIDTH*nCS_PER_RANK{~cs_en2}})};
assign mc_cs_n[4*CS_WIDTH*nCS_PER_RANK -1 :3*CS_WIDTH*nCS_PER_RANK ] =
{(~(cs_one_hot << (nCS_PER_RANK*ra3)) | {CS_WIDTH*nCS_PER_RANK{~cs_en3}})};
end
endgenerate
// Output rnk_config info.
reg [RANK_WIDTH-1:0] rnk_config_ns;
reg [RANK_WIDTH-1:0] rnk_config_r;
always @(/*AS*/grant_config_r
or rnk_config_r or rnk_config_strobe or req_rank_r or rst) begin
if (rst) rnk_config_ns = {RANK_WIDTH{1'b0}};
else begin
rnk_config_ns = rnk_config_r;
if (rnk_config_strobe)
for (i=0; i<nBANK_MACHS; i=i+1)
if (grant_config_r[i]) rnk_config_ns = req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH];
end
end
always @(posedge clk) rnk_config_r <= #TCQ rnk_config_ns;
assign rnk_config = rnk_config_ns;
// Generate ODT signals.
wire [CS_WIDTH-1:0] col_ra_one_hot = cs_one_hot << col_ra;
wire slot_0_select = (nSLOTS == 1) ? |(col_ra_one_hot & slot_0_present)
: (slot_0_present[2] & slot_0_present[0]) ?
|(col_ra_one_hot[CS_WIDTH-1:0] & {slot_0_present[2],
slot_0_present[0]}) : (slot_0_present[0])?
col_ra_one_hot[0] : 1'b0;
wire slot_0_read = EVEN_CWL_2T_MODE == "ON" ?
slot_0_select && col_rd_wr_r :
slot_0_select && col_rd_wr_ns;
wire slot_0_write = EVEN_CWL_2T_MODE == "ON" ?
slot_0_select && ~col_rd_wr_r :
slot_0_select && ~col_rd_wr_ns;
reg [1:0] slot_1_population = 2'b0;
reg[1:0] slot_0_population;
always @(/*AS*/slot_0_present) begin
slot_0_population = 2'b0;
for (i=0; i<8; i=i+1)
if (~slot_0_population[1])
if (slot_0_present[i] == 1'b1) slot_0_population =
slot_0_population + 2'b1;
end
// ODT on in slot 0 for writes to slot 0 (and R/W to slot 1 for DDR3)
wire slot_0_odt = (DRAM_TYPE == "DDR3") ? ~slot_0_read : slot_0_write;
assign mc_aux_out0[1] = slot_0_odt & sent_col; // Only send for COL cmds
generate
if (nSLOTS > 1) begin : slot_1_configured
wire slot_1_select = (slot_1_present[3] & slot_1_present[1])?
|({col_ra_one_hot[slot_0_population+1],
col_ra_one_hot[slot_0_population]}) :
(slot_1_present[1]) ? col_ra_one_hot[slot_0_population] :1'b0;
wire slot_1_read = EVEN_CWL_2T_MODE == "ON" ?
slot_1_select && col_rd_wr_r :
slot_1_select && col_rd_wr_ns;
wire slot_1_write = EVEN_CWL_2T_MODE == "ON" ?
slot_1_select && ~col_rd_wr_r :
slot_1_select && ~col_rd_wr_ns;
// ODT on in slot 1 for writes to slot 1 (and R/W to slot 0 for DDR3)
wire slot_1_odt = (DRAM_TYPE == "DDR3") ? ~slot_1_read : slot_1_write;
assign mc_aux_out0[3] = slot_1_odt & sent_col; // Only send for COL cmds
end // if (nSLOTS > 1)
else begin
// Disable slot 1 ODT when not present
assign mc_aux_out0[3] = 1'b0;
end // else: !if(nSLOTS > 1)
endgenerate
generate
if(CKE_ODT_AUX == "FALSE")begin
reg[1:0] mc_aux_out_r ;
reg[1:0] mc_aux_out_r_1 ;
reg[1:0] mc_aux_out_r_2 ;
always@(posedge clk) begin
mc_aux_out_r[0] <= #TCQ mc_aux_out0[1] ;
mc_aux_out_r[1] <= #TCQ mc_aux_out0[3] ;
mc_aux_out_r_1 <= #TCQ mc_aux_out_r ;
mc_aux_out_r_2 <= #TCQ mc_aux_out_r_1 ;
end
if((nCK_PER_CLK == 4) && (nSLOTS > 1 )) begin:odt_high_time_4_1_dslot
assign mc_odt[0] = mc_aux_out0[1] | mc_aux_out_r[0] | mc_aux_out_r_1[0];
assign mc_odt[1] = mc_aux_out0[3] | mc_aux_out_r[1] | mc_aux_out_r_1[1];
end else if(nCK_PER_CLK == 4) begin:odt_high_time_4_1
assign mc_odt[0] = mc_aux_out0[1] | mc_aux_out_r[0] ;
assign mc_odt[1] = mc_aux_out0[3] | mc_aux_out_r[1] ;
end else if(nCK_PER_CLK == 2) begin:odt_high_time_2_1
assign mc_odt[0] = mc_aux_out0[1] | mc_aux_out_r[0] | mc_aux_out_r_1[0] | mc_aux_out_r_2[0] ;
assign mc_odt[1] = mc_aux_out0[3] | mc_aux_out_r[1] | mc_aux_out_r_1[1] | mc_aux_out_r_2[1] ;
end
end
endgenerate
endmodule
|
module mig_7series_v2_3_poc_meta #
(parameter SCANFROMRIGHT = 0,
parameter TCQ = 100,
parameter TAPCNTRWIDTH = 7,
parameter TAPSPERKCLK = 112)
(/*AUTOARG*/
// Outputs
mmcm_edge_detect_done, poc_backup, mmcm_lbclk_edge_aligned,
// Inputs
rst, clk, mmcm_edge_detect_rdy, run, run_polarity, run_end,
rise_lead_right, rise_trail_left, rise_lead_center,
rise_trail_center, rise_trail_right, rise_lead_left, ninety_offsets,
use_noise_window, ktap_at_right_edge, ktap_at_left_edge
);
localparam NINETY = TAPSPERKCLK/4;
function [TAPCNTRWIDTH-1:0] offset (input [TAPCNTRWIDTH-1:0] a,
input [1:0] b,
input integer base);
integer offset_ii;
begin
offset_ii = (a + b * NINETY) < base
? (a + b * NINETY)
: (a + b * NINETY - base);
offset = offset_ii[TAPCNTRWIDTH-1:0];
end
endfunction // offset
function [TAPCNTRWIDTH-1:0] mod_sub (input [TAPCNTRWIDTH-1:0] a,
input [TAPCNTRWIDTH-1:0] b,
input integer base);
begin
mod_sub = (a>=b) ? a-b : a+base-b;
end
endfunction // mod_sub
function [TAPCNTRWIDTH:0] center (input [TAPCNTRWIDTH-1:0] left,
input [TAPCNTRWIDTH-1:0] diff,
input integer base);
integer center_ii;
begin
center_ii = ({left, 1'b0} + diff < base * 2)
? {left, 1'b0} + diff + 32'h0
: {left, 1'b0} + diff - base * 2;
center = center_ii[TAPCNTRWIDTH:0];
end
endfunction // center
input rst;
input clk;
input mmcm_edge_detect_rdy;
wire reset_run_ends = rst || ~mmcm_edge_detect_rdy;
// This input used only for the SVA.
input [TAPCNTRWIDTH-1:0] run;
input run_end;
reg run_end_r, run_end_r1, run_end_r2, run_end_r3;
always @(posedge clk) run_end_r <= #TCQ run_end;
always @(posedge clk) run_end_r1 <= #TCQ run_end_r;
always @(posedge clk) run_end_r2 <= #TCQ run_end_r1;
always @(posedge clk) run_end_r3 <= #TCQ run_end_r2;
input run_polarity;
reg run_polarity_held_ns, run_polarity_held_r;
always @(posedge clk) run_polarity_held_r <= #TCQ run_polarity_held_ns;
always @(*) run_polarity_held_ns = run_end ? run_polarity : run_polarity_held_r;
reg [1:0] run_ends_r;
reg [1:0] run_ends_ns;
always @(posedge clk) run_ends_r <= #TCQ run_ends_ns;
always @(*) begin
run_ends_ns = run_ends_r;
if (reset_run_ends) run_ends_ns = 2'b0;
else case (run_ends_r)
2'b00 : run_ends_ns = run_ends_r + {1'b0, run_end_r3 && run_polarity_held_r};
2'b01, 2'b10 : run_ends_ns = run_ends_r + {1'b0, run_end_r3};
endcase // case (run_ends_r)
end
reg done_r;
wire done_ns = mmcm_edge_detect_rdy && &run_ends_r;
always @(posedge clk) done_r <= #TCQ done_ns;
output mmcm_edge_detect_done;
assign mmcm_edge_detect_done = done_r;
input [TAPCNTRWIDTH-1:0] rise_lead_right;
input [TAPCNTRWIDTH-1:0] rise_trail_left;
input [TAPCNTRWIDTH-1:0] rise_lead_center;
input [TAPCNTRWIDTH-1:0] rise_trail_center;
input [TAPCNTRWIDTH-1:0] rise_trail_right;
input [TAPCNTRWIDTH-1:0] rise_lead_left;
input [1:0] ninety_offsets;
wire [1:0] offsets = SCANFROMRIGHT == 1 ? ninety_offsets : 2'b00 - ninety_offsets;
wire [TAPCNTRWIDTH-1:0] rise_lead_center_offset_ns = offset(rise_lead_center, offsets, TAPSPERKCLK);
wire [TAPCNTRWIDTH-1:0] rise_trail_center_offset_ns = offset(rise_trail_center, offsets, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] rise_lead_center_offset_r, rise_trail_center_offset_r;
always @(posedge clk) rise_lead_center_offset_r <= #TCQ rise_lead_center_offset_ns;
always @(posedge clk) rise_trail_center_offset_r <= #TCQ rise_trail_center_offset_ns;
wire [TAPCNTRWIDTH-1:0] edge_diff_ns = mod_sub(rise_trail_center_offset_r, rise_lead_center_offset_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] edge_diff_r;
always @(posedge clk) edge_diff_r <= #TCQ edge_diff_ns;
wire [TAPCNTRWIDTH:0] edge_center_ns = center(rise_lead_center_offset_r, edge_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] edge_center_r;
always @(posedge clk) edge_center_r <= #TCQ edge_center_ns;
input use_noise_window;
wire [TAPCNTRWIDTH-1:0] left = use_noise_window ? rise_lead_left : rise_trail_left;
wire [TAPCNTRWIDTH-1:0] right = use_noise_window ? rise_trail_right : rise_lead_right;
wire [TAPCNTRWIDTH-1:0] center_diff_ns = mod_sub(right, left, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] center_diff_r;
always @(posedge clk) center_diff_r <= #TCQ center_diff_ns;
wire [TAPCNTRWIDTH:0] window_center_ns = center(left, center_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] window_center_r;
always @(posedge clk) window_center_r <= #TCQ window_center_ns;
localparam TAPSPERKCLKX2 = TAPSPERKCLK * 2;
wire [TAPCNTRWIDTH+1:0] left_center = {1'b0, SCANFROMRIGHT == 1 ? window_center_r : edge_center_r};
wire [TAPCNTRWIDTH+1:0] right_center = {1'b0, SCANFROMRIGHT == 1 ? edge_center_r : window_center_r};
wire [TAPCNTRWIDTH+1:0] diff_ns = right_center >= left_center
? right_center - left_center
: right_center + TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - left_center;
reg [TAPCNTRWIDTH+1:0] diff_r;
always @(posedge clk) diff_r <= #TCQ diff_ns;
wire [TAPCNTRWIDTH+1:0] abs_diff = diff_r > TAPSPERKCLKX2[TAPCNTRWIDTH+1:0]/2
? TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - diff_r
: diff_r;
reg [TAPCNTRWIDTH+1:0] prev_ns, prev_r;
always @(posedge clk) prev_r <= #TCQ prev_ns;
always @(*) prev_ns = done_ns ? diff_r : prev_r;
input ktap_at_right_edge;
input ktap_at_left_edge;
wire centering = !(ktap_at_right_edge || ktap_at_left_edge);
wire diffs_eq = abs_diff == diff_r;
reg diffs_eq_ns, diffs_eq_r;
always @(*) diffs_eq_ns = centering && ((done_r && done_ns) ? diffs_eq : diffs_eq_r);
always @(posedge clk) diffs_eq_r <= #TCQ diffs_eq_ns;
reg edge_aligned_r;
reg prev_valid_ns, prev_valid_r;
always @(posedge clk) prev_valid_r <= #TCQ prev_valid_ns;
always @(*) prev_valid_ns = (~rst && ~ktap_at_right_edge && ~ktap_at_left_edge && ~edge_aligned_r) && prev_valid_r | done_ns;
wire indicate_alignment = ~rst && centering && done_ns;
wire edge_aligned_ns = indicate_alignment && (~|diff_r || ~diffs_eq & diffs_eq_r);
always @(posedge clk) edge_aligned_r <= #TCQ edge_aligned_ns;
reg poc_backup_r;
wire poc_backup_ns = edge_aligned_ns && abs_diff > prev_r;
always @(posedge clk) poc_backup_r <= #TCQ poc_backup_ns;
output poc_backup;
assign poc_backup = poc_backup_r;
output mmcm_lbclk_edge_aligned;
assign mmcm_lbclk_edge_aligned = edge_aligned_r;
endmodule
|
module mig_7series_v2_3_poc_meta #
(parameter SCANFROMRIGHT = 0,
parameter TCQ = 100,
parameter TAPCNTRWIDTH = 7,
parameter TAPSPERKCLK = 112)
(/*AUTOARG*/
// Outputs
mmcm_edge_detect_done, poc_backup, mmcm_lbclk_edge_aligned,
// Inputs
rst, clk, mmcm_edge_detect_rdy, run, run_polarity, run_end,
rise_lead_right, rise_trail_left, rise_lead_center,
rise_trail_center, rise_trail_right, rise_lead_left, ninety_offsets,
use_noise_window, ktap_at_right_edge, ktap_at_left_edge
);
localparam NINETY = TAPSPERKCLK/4;
function [TAPCNTRWIDTH-1:0] offset (input [TAPCNTRWIDTH-1:0] a,
input [1:0] b,
input integer base);
integer offset_ii;
begin
offset_ii = (a + b * NINETY) < base
? (a + b * NINETY)
: (a + b * NINETY - base);
offset = offset_ii[TAPCNTRWIDTH-1:0];
end
endfunction // offset
function [TAPCNTRWIDTH-1:0] mod_sub (input [TAPCNTRWIDTH-1:0] a,
input [TAPCNTRWIDTH-1:0] b,
input integer base);
begin
mod_sub = (a>=b) ? a-b : a+base-b;
end
endfunction // mod_sub
function [TAPCNTRWIDTH:0] center (input [TAPCNTRWIDTH-1:0] left,
input [TAPCNTRWIDTH-1:0] diff,
input integer base);
integer center_ii;
begin
center_ii = ({left, 1'b0} + diff < base * 2)
? {left, 1'b0} + diff + 32'h0
: {left, 1'b0} + diff - base * 2;
center = center_ii[TAPCNTRWIDTH:0];
end
endfunction // center
input rst;
input clk;
input mmcm_edge_detect_rdy;
wire reset_run_ends = rst || ~mmcm_edge_detect_rdy;
// This input used only for the SVA.
input [TAPCNTRWIDTH-1:0] run;
input run_end;
reg run_end_r, run_end_r1, run_end_r2, run_end_r3;
always @(posedge clk) run_end_r <= #TCQ run_end;
always @(posedge clk) run_end_r1 <= #TCQ run_end_r;
always @(posedge clk) run_end_r2 <= #TCQ run_end_r1;
always @(posedge clk) run_end_r3 <= #TCQ run_end_r2;
input run_polarity;
reg run_polarity_held_ns, run_polarity_held_r;
always @(posedge clk) run_polarity_held_r <= #TCQ run_polarity_held_ns;
always @(*) run_polarity_held_ns = run_end ? run_polarity : run_polarity_held_r;
reg [1:0] run_ends_r;
reg [1:0] run_ends_ns;
always @(posedge clk) run_ends_r <= #TCQ run_ends_ns;
always @(*) begin
run_ends_ns = run_ends_r;
if (reset_run_ends) run_ends_ns = 2'b0;
else case (run_ends_r)
2'b00 : run_ends_ns = run_ends_r + {1'b0, run_end_r3 && run_polarity_held_r};
2'b01, 2'b10 : run_ends_ns = run_ends_r + {1'b0, run_end_r3};
endcase // case (run_ends_r)
end
reg done_r;
wire done_ns = mmcm_edge_detect_rdy && &run_ends_r;
always @(posedge clk) done_r <= #TCQ done_ns;
output mmcm_edge_detect_done;
assign mmcm_edge_detect_done = done_r;
input [TAPCNTRWIDTH-1:0] rise_lead_right;
input [TAPCNTRWIDTH-1:0] rise_trail_left;
input [TAPCNTRWIDTH-1:0] rise_lead_center;
input [TAPCNTRWIDTH-1:0] rise_trail_center;
input [TAPCNTRWIDTH-1:0] rise_trail_right;
input [TAPCNTRWIDTH-1:0] rise_lead_left;
input [1:0] ninety_offsets;
wire [1:0] offsets = SCANFROMRIGHT == 1 ? ninety_offsets : 2'b00 - ninety_offsets;
wire [TAPCNTRWIDTH-1:0] rise_lead_center_offset_ns = offset(rise_lead_center, offsets, TAPSPERKCLK);
wire [TAPCNTRWIDTH-1:0] rise_trail_center_offset_ns = offset(rise_trail_center, offsets, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] rise_lead_center_offset_r, rise_trail_center_offset_r;
always @(posedge clk) rise_lead_center_offset_r <= #TCQ rise_lead_center_offset_ns;
always @(posedge clk) rise_trail_center_offset_r <= #TCQ rise_trail_center_offset_ns;
wire [TAPCNTRWIDTH-1:0] edge_diff_ns = mod_sub(rise_trail_center_offset_r, rise_lead_center_offset_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] edge_diff_r;
always @(posedge clk) edge_diff_r <= #TCQ edge_diff_ns;
wire [TAPCNTRWIDTH:0] edge_center_ns = center(rise_lead_center_offset_r, edge_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] edge_center_r;
always @(posedge clk) edge_center_r <= #TCQ edge_center_ns;
input use_noise_window;
wire [TAPCNTRWIDTH-1:0] left = use_noise_window ? rise_lead_left : rise_trail_left;
wire [TAPCNTRWIDTH-1:0] right = use_noise_window ? rise_trail_right : rise_lead_right;
wire [TAPCNTRWIDTH-1:0] center_diff_ns = mod_sub(right, left, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] center_diff_r;
always @(posedge clk) center_diff_r <= #TCQ center_diff_ns;
wire [TAPCNTRWIDTH:0] window_center_ns = center(left, center_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] window_center_r;
always @(posedge clk) window_center_r <= #TCQ window_center_ns;
localparam TAPSPERKCLKX2 = TAPSPERKCLK * 2;
wire [TAPCNTRWIDTH+1:0] left_center = {1'b0, SCANFROMRIGHT == 1 ? window_center_r : edge_center_r};
wire [TAPCNTRWIDTH+1:0] right_center = {1'b0, SCANFROMRIGHT == 1 ? edge_center_r : window_center_r};
wire [TAPCNTRWIDTH+1:0] diff_ns = right_center >= left_center
? right_center - left_center
: right_center + TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - left_center;
reg [TAPCNTRWIDTH+1:0] diff_r;
always @(posedge clk) diff_r <= #TCQ diff_ns;
wire [TAPCNTRWIDTH+1:0] abs_diff = diff_r > TAPSPERKCLKX2[TAPCNTRWIDTH+1:0]/2
? TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - diff_r
: diff_r;
reg [TAPCNTRWIDTH+1:0] prev_ns, prev_r;
always @(posedge clk) prev_r <= #TCQ prev_ns;
always @(*) prev_ns = done_ns ? diff_r : prev_r;
input ktap_at_right_edge;
input ktap_at_left_edge;
wire centering = !(ktap_at_right_edge || ktap_at_left_edge);
wire diffs_eq = abs_diff == diff_r;
reg diffs_eq_ns, diffs_eq_r;
always @(*) diffs_eq_ns = centering && ((done_r && done_ns) ? diffs_eq : diffs_eq_r);
always @(posedge clk) diffs_eq_r <= #TCQ diffs_eq_ns;
reg edge_aligned_r;
reg prev_valid_ns, prev_valid_r;
always @(posedge clk) prev_valid_r <= #TCQ prev_valid_ns;
always @(*) prev_valid_ns = (~rst && ~ktap_at_right_edge && ~ktap_at_left_edge && ~edge_aligned_r) && prev_valid_r | done_ns;
wire indicate_alignment = ~rst && centering && done_ns;
wire edge_aligned_ns = indicate_alignment && (~|diff_r || ~diffs_eq & diffs_eq_r);
always @(posedge clk) edge_aligned_r <= #TCQ edge_aligned_ns;
reg poc_backup_r;
wire poc_backup_ns = edge_aligned_ns && abs_diff > prev_r;
always @(posedge clk) poc_backup_r <= #TCQ poc_backup_ns;
output poc_backup;
assign poc_backup = poc_backup_r;
output mmcm_lbclk_edge_aligned;
assign mmcm_lbclk_edge_aligned = edge_aligned_r;
endmodule
|
module mig_7series_v2_3_poc_meta #
(parameter SCANFROMRIGHT = 0,
parameter TCQ = 100,
parameter TAPCNTRWIDTH = 7,
parameter TAPSPERKCLK = 112)
(/*AUTOARG*/
// Outputs
mmcm_edge_detect_done, poc_backup, mmcm_lbclk_edge_aligned,
// Inputs
rst, clk, mmcm_edge_detect_rdy, run, run_polarity, run_end,
rise_lead_right, rise_trail_left, rise_lead_center,
rise_trail_center, rise_trail_right, rise_lead_left, ninety_offsets,
use_noise_window, ktap_at_right_edge, ktap_at_left_edge
);
localparam NINETY = TAPSPERKCLK/4;
function [TAPCNTRWIDTH-1:0] offset (input [TAPCNTRWIDTH-1:0] a,
input [1:0] b,
input integer base);
integer offset_ii;
begin
offset_ii = (a + b * NINETY) < base
? (a + b * NINETY)
: (a + b * NINETY - base);
offset = offset_ii[TAPCNTRWIDTH-1:0];
end
endfunction // offset
function [TAPCNTRWIDTH-1:0] mod_sub (input [TAPCNTRWIDTH-1:0] a,
input [TAPCNTRWIDTH-1:0] b,
input integer base);
begin
mod_sub = (a>=b) ? a-b : a+base-b;
end
endfunction // mod_sub
function [TAPCNTRWIDTH:0] center (input [TAPCNTRWIDTH-1:0] left,
input [TAPCNTRWIDTH-1:0] diff,
input integer base);
integer center_ii;
begin
center_ii = ({left, 1'b0} + diff < base * 2)
? {left, 1'b0} + diff + 32'h0
: {left, 1'b0} + diff - base * 2;
center = center_ii[TAPCNTRWIDTH:0];
end
endfunction // center
input rst;
input clk;
input mmcm_edge_detect_rdy;
wire reset_run_ends = rst || ~mmcm_edge_detect_rdy;
// This input used only for the SVA.
input [TAPCNTRWIDTH-1:0] run;
input run_end;
reg run_end_r, run_end_r1, run_end_r2, run_end_r3;
always @(posedge clk) run_end_r <= #TCQ run_end;
always @(posedge clk) run_end_r1 <= #TCQ run_end_r;
always @(posedge clk) run_end_r2 <= #TCQ run_end_r1;
always @(posedge clk) run_end_r3 <= #TCQ run_end_r2;
input run_polarity;
reg run_polarity_held_ns, run_polarity_held_r;
always @(posedge clk) run_polarity_held_r <= #TCQ run_polarity_held_ns;
always @(*) run_polarity_held_ns = run_end ? run_polarity : run_polarity_held_r;
reg [1:0] run_ends_r;
reg [1:0] run_ends_ns;
always @(posedge clk) run_ends_r <= #TCQ run_ends_ns;
always @(*) begin
run_ends_ns = run_ends_r;
if (reset_run_ends) run_ends_ns = 2'b0;
else case (run_ends_r)
2'b00 : run_ends_ns = run_ends_r + {1'b0, run_end_r3 && run_polarity_held_r};
2'b01, 2'b10 : run_ends_ns = run_ends_r + {1'b0, run_end_r3};
endcase // case (run_ends_r)
end
reg done_r;
wire done_ns = mmcm_edge_detect_rdy && &run_ends_r;
always @(posedge clk) done_r <= #TCQ done_ns;
output mmcm_edge_detect_done;
assign mmcm_edge_detect_done = done_r;
input [TAPCNTRWIDTH-1:0] rise_lead_right;
input [TAPCNTRWIDTH-1:0] rise_trail_left;
input [TAPCNTRWIDTH-1:0] rise_lead_center;
input [TAPCNTRWIDTH-1:0] rise_trail_center;
input [TAPCNTRWIDTH-1:0] rise_trail_right;
input [TAPCNTRWIDTH-1:0] rise_lead_left;
input [1:0] ninety_offsets;
wire [1:0] offsets = SCANFROMRIGHT == 1 ? ninety_offsets : 2'b00 - ninety_offsets;
wire [TAPCNTRWIDTH-1:0] rise_lead_center_offset_ns = offset(rise_lead_center, offsets, TAPSPERKCLK);
wire [TAPCNTRWIDTH-1:0] rise_trail_center_offset_ns = offset(rise_trail_center, offsets, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] rise_lead_center_offset_r, rise_trail_center_offset_r;
always @(posedge clk) rise_lead_center_offset_r <= #TCQ rise_lead_center_offset_ns;
always @(posedge clk) rise_trail_center_offset_r <= #TCQ rise_trail_center_offset_ns;
wire [TAPCNTRWIDTH-1:0] edge_diff_ns = mod_sub(rise_trail_center_offset_r, rise_lead_center_offset_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] edge_diff_r;
always @(posedge clk) edge_diff_r <= #TCQ edge_diff_ns;
wire [TAPCNTRWIDTH:0] edge_center_ns = center(rise_lead_center_offset_r, edge_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] edge_center_r;
always @(posedge clk) edge_center_r <= #TCQ edge_center_ns;
input use_noise_window;
wire [TAPCNTRWIDTH-1:0] left = use_noise_window ? rise_lead_left : rise_trail_left;
wire [TAPCNTRWIDTH-1:0] right = use_noise_window ? rise_trail_right : rise_lead_right;
wire [TAPCNTRWIDTH-1:0] center_diff_ns = mod_sub(right, left, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] center_diff_r;
always @(posedge clk) center_diff_r <= #TCQ center_diff_ns;
wire [TAPCNTRWIDTH:0] window_center_ns = center(left, center_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] window_center_r;
always @(posedge clk) window_center_r <= #TCQ window_center_ns;
localparam TAPSPERKCLKX2 = TAPSPERKCLK * 2;
wire [TAPCNTRWIDTH+1:0] left_center = {1'b0, SCANFROMRIGHT == 1 ? window_center_r : edge_center_r};
wire [TAPCNTRWIDTH+1:0] right_center = {1'b0, SCANFROMRIGHT == 1 ? edge_center_r : window_center_r};
wire [TAPCNTRWIDTH+1:0] diff_ns = right_center >= left_center
? right_center - left_center
: right_center + TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - left_center;
reg [TAPCNTRWIDTH+1:0] diff_r;
always @(posedge clk) diff_r <= #TCQ diff_ns;
wire [TAPCNTRWIDTH+1:0] abs_diff = diff_r > TAPSPERKCLKX2[TAPCNTRWIDTH+1:0]/2
? TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - diff_r
: diff_r;
reg [TAPCNTRWIDTH+1:0] prev_ns, prev_r;
always @(posedge clk) prev_r <= #TCQ prev_ns;
always @(*) prev_ns = done_ns ? diff_r : prev_r;
input ktap_at_right_edge;
input ktap_at_left_edge;
wire centering = !(ktap_at_right_edge || ktap_at_left_edge);
wire diffs_eq = abs_diff == diff_r;
reg diffs_eq_ns, diffs_eq_r;
always @(*) diffs_eq_ns = centering && ((done_r && done_ns) ? diffs_eq : diffs_eq_r);
always @(posedge clk) diffs_eq_r <= #TCQ diffs_eq_ns;
reg edge_aligned_r;
reg prev_valid_ns, prev_valid_r;
always @(posedge clk) prev_valid_r <= #TCQ prev_valid_ns;
always @(*) prev_valid_ns = (~rst && ~ktap_at_right_edge && ~ktap_at_left_edge && ~edge_aligned_r) && prev_valid_r | done_ns;
wire indicate_alignment = ~rst && centering && done_ns;
wire edge_aligned_ns = indicate_alignment && (~|diff_r || ~diffs_eq & diffs_eq_r);
always @(posedge clk) edge_aligned_r <= #TCQ edge_aligned_ns;
reg poc_backup_r;
wire poc_backup_ns = edge_aligned_ns && abs_diff > prev_r;
always @(posedge clk) poc_backup_r <= #TCQ poc_backup_ns;
output poc_backup;
assign poc_backup = poc_backup_r;
output mmcm_lbclk_edge_aligned;
assign mmcm_lbclk_edge_aligned = edge_aligned_r;
endmodule
|
module mig_7series_v2_3_poc_meta #
(parameter SCANFROMRIGHT = 0,
parameter TCQ = 100,
parameter TAPCNTRWIDTH = 7,
parameter TAPSPERKCLK = 112)
(/*AUTOARG*/
// Outputs
mmcm_edge_detect_done, poc_backup, mmcm_lbclk_edge_aligned,
// Inputs
rst, clk, mmcm_edge_detect_rdy, run, run_polarity, run_end,
rise_lead_right, rise_trail_left, rise_lead_center,
rise_trail_center, rise_trail_right, rise_lead_left, ninety_offsets,
use_noise_window, ktap_at_right_edge, ktap_at_left_edge
);
localparam NINETY = TAPSPERKCLK/4;
function [TAPCNTRWIDTH-1:0] offset (input [TAPCNTRWIDTH-1:0] a,
input [1:0] b,
input integer base);
integer offset_ii;
begin
offset_ii = (a + b * NINETY) < base
? (a + b * NINETY)
: (a + b * NINETY - base);
offset = offset_ii[TAPCNTRWIDTH-1:0];
end
endfunction // offset
function [TAPCNTRWIDTH-1:0] mod_sub (input [TAPCNTRWIDTH-1:0] a,
input [TAPCNTRWIDTH-1:0] b,
input integer base);
begin
mod_sub = (a>=b) ? a-b : a+base-b;
end
endfunction // mod_sub
function [TAPCNTRWIDTH:0] center (input [TAPCNTRWIDTH-1:0] left,
input [TAPCNTRWIDTH-1:0] diff,
input integer base);
integer center_ii;
begin
center_ii = ({left, 1'b0} + diff < base * 2)
? {left, 1'b0} + diff + 32'h0
: {left, 1'b0} + diff - base * 2;
center = center_ii[TAPCNTRWIDTH:0];
end
endfunction // center
input rst;
input clk;
input mmcm_edge_detect_rdy;
wire reset_run_ends = rst || ~mmcm_edge_detect_rdy;
// This input used only for the SVA.
input [TAPCNTRWIDTH-1:0] run;
input run_end;
reg run_end_r, run_end_r1, run_end_r2, run_end_r3;
always @(posedge clk) run_end_r <= #TCQ run_end;
always @(posedge clk) run_end_r1 <= #TCQ run_end_r;
always @(posedge clk) run_end_r2 <= #TCQ run_end_r1;
always @(posedge clk) run_end_r3 <= #TCQ run_end_r2;
input run_polarity;
reg run_polarity_held_ns, run_polarity_held_r;
always @(posedge clk) run_polarity_held_r <= #TCQ run_polarity_held_ns;
always @(*) run_polarity_held_ns = run_end ? run_polarity : run_polarity_held_r;
reg [1:0] run_ends_r;
reg [1:0] run_ends_ns;
always @(posedge clk) run_ends_r <= #TCQ run_ends_ns;
always @(*) begin
run_ends_ns = run_ends_r;
if (reset_run_ends) run_ends_ns = 2'b0;
else case (run_ends_r)
2'b00 : run_ends_ns = run_ends_r + {1'b0, run_end_r3 && run_polarity_held_r};
2'b01, 2'b10 : run_ends_ns = run_ends_r + {1'b0, run_end_r3};
endcase // case (run_ends_r)
end
reg done_r;
wire done_ns = mmcm_edge_detect_rdy && &run_ends_r;
always @(posedge clk) done_r <= #TCQ done_ns;
output mmcm_edge_detect_done;
assign mmcm_edge_detect_done = done_r;
input [TAPCNTRWIDTH-1:0] rise_lead_right;
input [TAPCNTRWIDTH-1:0] rise_trail_left;
input [TAPCNTRWIDTH-1:0] rise_lead_center;
input [TAPCNTRWIDTH-1:0] rise_trail_center;
input [TAPCNTRWIDTH-1:0] rise_trail_right;
input [TAPCNTRWIDTH-1:0] rise_lead_left;
input [1:0] ninety_offsets;
wire [1:0] offsets = SCANFROMRIGHT == 1 ? ninety_offsets : 2'b00 - ninety_offsets;
wire [TAPCNTRWIDTH-1:0] rise_lead_center_offset_ns = offset(rise_lead_center, offsets, TAPSPERKCLK);
wire [TAPCNTRWIDTH-1:0] rise_trail_center_offset_ns = offset(rise_trail_center, offsets, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] rise_lead_center_offset_r, rise_trail_center_offset_r;
always @(posedge clk) rise_lead_center_offset_r <= #TCQ rise_lead_center_offset_ns;
always @(posedge clk) rise_trail_center_offset_r <= #TCQ rise_trail_center_offset_ns;
wire [TAPCNTRWIDTH-1:0] edge_diff_ns = mod_sub(rise_trail_center_offset_r, rise_lead_center_offset_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] edge_diff_r;
always @(posedge clk) edge_diff_r <= #TCQ edge_diff_ns;
wire [TAPCNTRWIDTH:0] edge_center_ns = center(rise_lead_center_offset_r, edge_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] edge_center_r;
always @(posedge clk) edge_center_r <= #TCQ edge_center_ns;
input use_noise_window;
wire [TAPCNTRWIDTH-1:0] left = use_noise_window ? rise_lead_left : rise_trail_left;
wire [TAPCNTRWIDTH-1:0] right = use_noise_window ? rise_trail_right : rise_lead_right;
wire [TAPCNTRWIDTH-1:0] center_diff_ns = mod_sub(right, left, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] center_diff_r;
always @(posedge clk) center_diff_r <= #TCQ center_diff_ns;
wire [TAPCNTRWIDTH:0] window_center_ns = center(left, center_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] window_center_r;
always @(posedge clk) window_center_r <= #TCQ window_center_ns;
localparam TAPSPERKCLKX2 = TAPSPERKCLK * 2;
wire [TAPCNTRWIDTH+1:0] left_center = {1'b0, SCANFROMRIGHT == 1 ? window_center_r : edge_center_r};
wire [TAPCNTRWIDTH+1:0] right_center = {1'b0, SCANFROMRIGHT == 1 ? edge_center_r : window_center_r};
wire [TAPCNTRWIDTH+1:0] diff_ns = right_center >= left_center
? right_center - left_center
: right_center + TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - left_center;
reg [TAPCNTRWIDTH+1:0] diff_r;
always @(posedge clk) diff_r <= #TCQ diff_ns;
wire [TAPCNTRWIDTH+1:0] abs_diff = diff_r > TAPSPERKCLKX2[TAPCNTRWIDTH+1:0]/2
? TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - diff_r
: diff_r;
reg [TAPCNTRWIDTH+1:0] prev_ns, prev_r;
always @(posedge clk) prev_r <= #TCQ prev_ns;
always @(*) prev_ns = done_ns ? diff_r : prev_r;
input ktap_at_right_edge;
input ktap_at_left_edge;
wire centering = !(ktap_at_right_edge || ktap_at_left_edge);
wire diffs_eq = abs_diff == diff_r;
reg diffs_eq_ns, diffs_eq_r;
always @(*) diffs_eq_ns = centering && ((done_r && done_ns) ? diffs_eq : diffs_eq_r);
always @(posedge clk) diffs_eq_r <= #TCQ diffs_eq_ns;
reg edge_aligned_r;
reg prev_valid_ns, prev_valid_r;
always @(posedge clk) prev_valid_r <= #TCQ prev_valid_ns;
always @(*) prev_valid_ns = (~rst && ~ktap_at_right_edge && ~ktap_at_left_edge && ~edge_aligned_r) && prev_valid_r | done_ns;
wire indicate_alignment = ~rst && centering && done_ns;
wire edge_aligned_ns = indicate_alignment && (~|diff_r || ~diffs_eq & diffs_eq_r);
always @(posedge clk) edge_aligned_r <= #TCQ edge_aligned_ns;
reg poc_backup_r;
wire poc_backup_ns = edge_aligned_ns && abs_diff > prev_r;
always @(posedge clk) poc_backup_r <= #TCQ poc_backup_ns;
output poc_backup;
assign poc_backup = poc_backup_r;
output mmcm_lbclk_edge_aligned;
assign mmcm_lbclk_edge_aligned = edge_aligned_r;
endmodule
|
module mig_7series_v2_3_poc_meta #
(parameter SCANFROMRIGHT = 0,
parameter TCQ = 100,
parameter TAPCNTRWIDTH = 7,
parameter TAPSPERKCLK = 112)
(/*AUTOARG*/
// Outputs
mmcm_edge_detect_done, poc_backup, mmcm_lbclk_edge_aligned,
// Inputs
rst, clk, mmcm_edge_detect_rdy, run, run_polarity, run_end,
rise_lead_right, rise_trail_left, rise_lead_center,
rise_trail_center, rise_trail_right, rise_lead_left, ninety_offsets,
use_noise_window, ktap_at_right_edge, ktap_at_left_edge
);
localparam NINETY = TAPSPERKCLK/4;
function [TAPCNTRWIDTH-1:0] offset (input [TAPCNTRWIDTH-1:0] a,
input [1:0] b,
input integer base);
integer offset_ii;
begin
offset_ii = (a + b * NINETY) < base
? (a + b * NINETY)
: (a + b * NINETY - base);
offset = offset_ii[TAPCNTRWIDTH-1:0];
end
endfunction // offset
function [TAPCNTRWIDTH-1:0] mod_sub (input [TAPCNTRWIDTH-1:0] a,
input [TAPCNTRWIDTH-1:0] b,
input integer base);
begin
mod_sub = (a>=b) ? a-b : a+base-b;
end
endfunction // mod_sub
function [TAPCNTRWIDTH:0] center (input [TAPCNTRWIDTH-1:0] left,
input [TAPCNTRWIDTH-1:0] diff,
input integer base);
integer center_ii;
begin
center_ii = ({left, 1'b0} + diff < base * 2)
? {left, 1'b0} + diff + 32'h0
: {left, 1'b0} + diff - base * 2;
center = center_ii[TAPCNTRWIDTH:0];
end
endfunction // center
input rst;
input clk;
input mmcm_edge_detect_rdy;
wire reset_run_ends = rst || ~mmcm_edge_detect_rdy;
// This input used only for the SVA.
input [TAPCNTRWIDTH-1:0] run;
input run_end;
reg run_end_r, run_end_r1, run_end_r2, run_end_r3;
always @(posedge clk) run_end_r <= #TCQ run_end;
always @(posedge clk) run_end_r1 <= #TCQ run_end_r;
always @(posedge clk) run_end_r2 <= #TCQ run_end_r1;
always @(posedge clk) run_end_r3 <= #TCQ run_end_r2;
input run_polarity;
reg run_polarity_held_ns, run_polarity_held_r;
always @(posedge clk) run_polarity_held_r <= #TCQ run_polarity_held_ns;
always @(*) run_polarity_held_ns = run_end ? run_polarity : run_polarity_held_r;
reg [1:0] run_ends_r;
reg [1:0] run_ends_ns;
always @(posedge clk) run_ends_r <= #TCQ run_ends_ns;
always @(*) begin
run_ends_ns = run_ends_r;
if (reset_run_ends) run_ends_ns = 2'b0;
else case (run_ends_r)
2'b00 : run_ends_ns = run_ends_r + {1'b0, run_end_r3 && run_polarity_held_r};
2'b01, 2'b10 : run_ends_ns = run_ends_r + {1'b0, run_end_r3};
endcase // case (run_ends_r)
end
reg done_r;
wire done_ns = mmcm_edge_detect_rdy && &run_ends_r;
always @(posedge clk) done_r <= #TCQ done_ns;
output mmcm_edge_detect_done;
assign mmcm_edge_detect_done = done_r;
input [TAPCNTRWIDTH-1:0] rise_lead_right;
input [TAPCNTRWIDTH-1:0] rise_trail_left;
input [TAPCNTRWIDTH-1:0] rise_lead_center;
input [TAPCNTRWIDTH-1:0] rise_trail_center;
input [TAPCNTRWIDTH-1:0] rise_trail_right;
input [TAPCNTRWIDTH-1:0] rise_lead_left;
input [1:0] ninety_offsets;
wire [1:0] offsets = SCANFROMRIGHT == 1 ? ninety_offsets : 2'b00 - ninety_offsets;
wire [TAPCNTRWIDTH-1:0] rise_lead_center_offset_ns = offset(rise_lead_center, offsets, TAPSPERKCLK);
wire [TAPCNTRWIDTH-1:0] rise_trail_center_offset_ns = offset(rise_trail_center, offsets, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] rise_lead_center_offset_r, rise_trail_center_offset_r;
always @(posedge clk) rise_lead_center_offset_r <= #TCQ rise_lead_center_offset_ns;
always @(posedge clk) rise_trail_center_offset_r <= #TCQ rise_trail_center_offset_ns;
wire [TAPCNTRWIDTH-1:0] edge_diff_ns = mod_sub(rise_trail_center_offset_r, rise_lead_center_offset_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] edge_diff_r;
always @(posedge clk) edge_diff_r <= #TCQ edge_diff_ns;
wire [TAPCNTRWIDTH:0] edge_center_ns = center(rise_lead_center_offset_r, edge_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] edge_center_r;
always @(posedge clk) edge_center_r <= #TCQ edge_center_ns;
input use_noise_window;
wire [TAPCNTRWIDTH-1:0] left = use_noise_window ? rise_lead_left : rise_trail_left;
wire [TAPCNTRWIDTH-1:0] right = use_noise_window ? rise_trail_right : rise_lead_right;
wire [TAPCNTRWIDTH-1:0] center_diff_ns = mod_sub(right, left, TAPSPERKCLK);
reg [TAPCNTRWIDTH-1:0] center_diff_r;
always @(posedge clk) center_diff_r <= #TCQ center_diff_ns;
wire [TAPCNTRWIDTH:0] window_center_ns = center(left, center_diff_r, TAPSPERKCLK);
reg [TAPCNTRWIDTH:0] window_center_r;
always @(posedge clk) window_center_r <= #TCQ window_center_ns;
localparam TAPSPERKCLKX2 = TAPSPERKCLK * 2;
wire [TAPCNTRWIDTH+1:0] left_center = {1'b0, SCANFROMRIGHT == 1 ? window_center_r : edge_center_r};
wire [TAPCNTRWIDTH+1:0] right_center = {1'b0, SCANFROMRIGHT == 1 ? edge_center_r : window_center_r};
wire [TAPCNTRWIDTH+1:0] diff_ns = right_center >= left_center
? right_center - left_center
: right_center + TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - left_center;
reg [TAPCNTRWIDTH+1:0] diff_r;
always @(posedge clk) diff_r <= #TCQ diff_ns;
wire [TAPCNTRWIDTH+1:0] abs_diff = diff_r > TAPSPERKCLKX2[TAPCNTRWIDTH+1:0]/2
? TAPSPERKCLKX2[TAPCNTRWIDTH+1:0] - diff_r
: diff_r;
reg [TAPCNTRWIDTH+1:0] prev_ns, prev_r;
always @(posedge clk) prev_r <= #TCQ prev_ns;
always @(*) prev_ns = done_ns ? diff_r : prev_r;
input ktap_at_right_edge;
input ktap_at_left_edge;
wire centering = !(ktap_at_right_edge || ktap_at_left_edge);
wire diffs_eq = abs_diff == diff_r;
reg diffs_eq_ns, diffs_eq_r;
always @(*) diffs_eq_ns = centering && ((done_r && done_ns) ? diffs_eq : diffs_eq_r);
always @(posedge clk) diffs_eq_r <= #TCQ diffs_eq_ns;
reg edge_aligned_r;
reg prev_valid_ns, prev_valid_r;
always @(posedge clk) prev_valid_r <= #TCQ prev_valid_ns;
always @(*) prev_valid_ns = (~rst && ~ktap_at_right_edge && ~ktap_at_left_edge && ~edge_aligned_r) && prev_valid_r | done_ns;
wire indicate_alignment = ~rst && centering && done_ns;
wire edge_aligned_ns = indicate_alignment && (~|diff_r || ~diffs_eq & diffs_eq_r);
always @(posedge clk) edge_aligned_r <= #TCQ edge_aligned_ns;
reg poc_backup_r;
wire poc_backup_ns = edge_aligned_ns && abs_diff > prev_r;
always @(posedge clk) poc_backup_r <= #TCQ poc_backup_ns;
output poc_backup;
assign poc_backup = poc_backup_r;
output mmcm_lbclk_edge_aligned;
assign mmcm_lbclk_edge_aligned = edge_aligned_r;
endmodule
|
module mig_7series_v2_3_bank_state #
(
parameter TCQ = 100,
parameter ADDR_CMD_MODE = "1T",
parameter BM_CNT_WIDTH = 2,
parameter BURST_MODE = "8",
parameter CWL = 5,
parameter DATA_BUF_ADDR_WIDTH = 8,
parameter DRAM_TYPE = "DDR3",
parameter ECC = "OFF",
parameter ID = 0,
parameter nBANK_MACHS = 4,
parameter nCK_PER_CLK = 2,
parameter nOP_WAIT = 0,
parameter nRAS_CLKS = 10,
parameter nRP = 10,
parameter nRTP = 4,
parameter nRCD = 5,
parameter nWTP_CLKS = 5,
parameter ORDERING = "NORM",
parameter RANKS = 4,
parameter RANK_WIDTH = 4,
parameter RAS_TIMER_WIDTH = 5,
parameter STARVE_LIMIT = 2
)
(/*AUTOARG*/
// Outputs
start_rcd, act_wait_r, rd_half_rmw, ras_timer_ns, end_rtp,
bank_wait_in_progress, start_pre_wait, op_exit_req, pre_wait_r,
allow_auto_pre, precharge_bm_end, demand_act_priority, rts_row,
act_this_rank_r, demand_priority, col_rdy_wr, rts_col, wr_this_rank_r,
rd_this_rank_r, rts_pre, rtc,
// Inputs
clk, rst, bm_end, pass_open_bank_r, sending_row, sending_pre, rcv_open_bank,
sending_col, rd_wr_r, req_wr_r, rd_data_addr, req_data_buf_addr_r,
phy_rddata_valid, rd_rmw, ras_timer_ns_in, rb_hit_busies_r, idle_r,
passing_open_bank, low_idle_cnt_r, op_exit_grant, tail_r,
auto_pre_r, pass_open_bank_ns, req_rank_r, req_rank_r_in,
start_rcd_in, inhbt_act_faw_r, wait_for_maint_r, head_r, sent_row,
demand_act_priority_in, order_q_zero, sent_col, q_has_rd,
q_has_priority, req_priority_r, idle_ns, demand_priority_in, inhbt_rd,
inhbt_wr, dq_busy_data, rnk_config_strobe, rnk_config_valid_r, rnk_config,
rnk_config_kill_rts_col, phy_mc_cmd_full, phy_mc_ctl_full, phy_mc_data_full
);
function integer clogb2 (input integer size); // ceiling logb2
begin
size = size - 1;
for (clogb2=1; size>1; clogb2=clogb2+1)
size = size >> 1;
end
endfunction // clogb2
input clk;
input rst;
// Activate wait state machine.
input bm_end;
reg bm_end_r1;
always @(posedge clk) bm_end_r1 <= #TCQ bm_end;
reg col_wait_r;
input pass_open_bank_r;
input sending_row;
reg act_wait_r_lcl;
input rcv_open_bank;
wire start_rcd_lcl = act_wait_r_lcl && sending_row;
output wire start_rcd;
assign start_rcd = start_rcd_lcl;
wire act_wait_ns = rst ||
((act_wait_r_lcl && ~start_rcd_lcl && ~rcv_open_bank) ||
bm_end_r1 || (pass_open_bank_r && bm_end));
always @(posedge clk) act_wait_r_lcl <= #TCQ act_wait_ns;
output wire act_wait_r;
assign act_wait_r = act_wait_r_lcl;
// RCD timer
//
// When CWL is even, CAS commands are issued on slot 0 and RAS commands are
// issued on slot 1. This implies that the RCD can never expire in the same
// cycle as the RAS (otherwise the CAS for a given transaction would precede
// the RAS). Similarly, this can also cause premature expiration for longer
// RCD. An offset must be added to RCD before translating it to the FPGA clock
// domain. In this mode, CAS are on the first DRAM clock cycle corresponding to
// a given FPGA cycle. In 2:1 mode add 2 to generate this offset aligned to
// the FPGA cycle. Likewise, add 4 to generate an aligned offset in 4:1 mode.
//
// When CWL is odd, RAS commands are issued on slot 0 and CAS commands are
// issued on slot 1. There is a natural 1 cycle seperation between RAS and CAS
// in the DRAM clock domain so the RCD can expire in the same FPGA cycle as the
// RAS command. In 2:1 mode, there are only 2 slots so direct translation
// correctly places the CAS with respect to the corresponding RAS. In 4:1 mode,
// there are two slots after CAS, so 2 is added to shift the timer into the
// next FPGA cycle for cases that can't expire in the current cycle.
//
// In 2T mode, the offset from ROW to COL commands is fixed at 2. In 2:1 mode,
// It is sufficient to translate to the half-rate domain and add the remainder.
// In 4:1 mode, we must translate to the quarter-rate domain and add an
// additional fabric cycle only if the remainder exceeds the fixed offset of 2
localparam nRCD_CLKS =
nCK_PER_CLK == 1 ?
nRCD :
nCK_PER_CLK == 2 ?
ADDR_CMD_MODE == "2T" ?
(nRCD/2) + (nRCD%2) :
CWL % 2 ?
(nRCD/2) :
(nRCD+2) / 2 :
// (nCK_PER_CLK == 4)
ADDR_CMD_MODE == "2T" ?
(nRCD/4) + (nRCD%4 > 2 ? 1 : 0) :
CWL % 2 ?
(nRCD-2 ? (nRCD-2) / 4 + 1 : 1) :
nRCD/4 + 1;
localparam nRCD_CLKS_M2 = (nRCD_CLKS-2 <0) ? 0 : nRCD_CLKS-2;
localparam RCD_TIMER_WIDTH = clogb2(nRCD_CLKS_M2+1);
localparam ZERO = 0;
localparam ONE = 1;
reg [RCD_TIMER_WIDTH-1:0] rcd_timer_r = {RCD_TIMER_WIDTH{1'b0}};
reg end_rcd;
reg rcd_active_r = 1'b0;
generate
if (nRCD_CLKS <= 2) begin : rcd_timer_leq_2
always @(/*AS*/start_rcd_lcl) end_rcd = start_rcd_lcl;
end
else if (nRCD_CLKS > 2) begin : rcd_timer_gt_2
reg [RCD_TIMER_WIDTH-1:0] rcd_timer_ns;
always @(/*AS*/rcd_timer_r or rst or start_rcd_lcl) begin
if (rst) rcd_timer_ns = ZERO[RCD_TIMER_WIDTH-1:0];
else begin
rcd_timer_ns = rcd_timer_r;
if (start_rcd_lcl) rcd_timer_ns = nRCD_CLKS_M2[RCD_TIMER_WIDTH-1:0];
else if (|rcd_timer_r) rcd_timer_ns =
rcd_timer_r - ONE[RCD_TIMER_WIDTH-1:0];
end
end
always @(posedge clk) rcd_timer_r <= #TCQ rcd_timer_ns;
wire end_rcd_ns = (rcd_timer_ns == ONE[RCD_TIMER_WIDTH-1:0]);
always @(posedge clk) end_rcd = end_rcd_ns;
wire rcd_active_ns = |rcd_timer_ns;
always @(posedge clk) rcd_active_r <= #TCQ rcd_active_ns;
end
endgenerate
// Figure out if the read that's completing is for an RMW for
// this bank machine. Delay by a state if CWL != 8 since the
// data is not ready in the RMW buffer for the early write
// data fetch that happens with ECC and CWL != 8.
// Create a state bit indicating we're waiting for the read
// half of the rmw to complete.
input sending_col;
input rd_wr_r;
input req_wr_r;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] req_data_buf_addr_r;
input phy_rddata_valid;
input rd_rmw;
reg rmw_rd_done = 1'b0;
reg rd_half_rmw_lcl = 1'b0;
output wire rd_half_rmw;
assign rd_half_rmw = rd_half_rmw_lcl;
reg rmw_wait_r = 1'b0;
generate
if (ECC != "OFF") begin : rmw_on
// Delay phy_rddata_valid and rd_rmw by one cycle to align them
// to req_data_buf_addr_r so that rmw_wait_r clears properly
reg phy_rddata_valid_r;
reg rd_rmw_r;
always @(posedge clk) begin
phy_rddata_valid_r <= #TCQ phy_rddata_valid;
rd_rmw_r <= #TCQ rd_rmw;
end
wire my_rmw_rd_ns = phy_rddata_valid_r && rd_rmw_r &&
(rd_data_addr == req_data_buf_addr_r);
if (CWL == 8) always @(my_rmw_rd_ns) rmw_rd_done = my_rmw_rd_ns;
else always @(posedge clk) rmw_rd_done = #TCQ my_rmw_rd_ns;
always @(/*AS*/rd_wr_r or req_wr_r) rd_half_rmw_lcl = req_wr_r && rd_wr_r;
wire rmw_wait_ns = ~rst &&
((rmw_wait_r && ~rmw_rd_done) || (rd_half_rmw_lcl && sending_col));
always @(posedge clk) rmw_wait_r <= #TCQ rmw_wait_ns;
end
endgenerate
// column wait state machine.
wire col_wait_ns = ~rst && ((col_wait_r && ~sending_col) || end_rcd
|| rcv_open_bank || (rmw_rd_done && rmw_wait_r));
always @(posedge clk) col_wait_r <= #TCQ col_wait_ns;
// Set up various RAS timer parameters, wires, etc.
localparam TWO = 2;
output reg [RAS_TIMER_WIDTH-1:0] ras_timer_ns;
reg [RAS_TIMER_WIDTH-1:0] ras_timer_r;
input [(2*(RAS_TIMER_WIDTH*nBANK_MACHS))-1:0] ras_timer_ns_in;
input [(nBANK_MACHS*2)-1:0] rb_hit_busies_r;
// On a bank pass, select the RAS timer from the passing bank machine.
reg [RAS_TIMER_WIDTH-1:0] passed_ras_timer;
integer i;
always @(/*AS*/ras_timer_ns_in or rb_hit_busies_r) begin
passed_ras_timer = {RAS_TIMER_WIDTH{1'b0}};
for (i=ID+1; i<(ID+nBANK_MACHS); i=i+1)
if (rb_hit_busies_r[i])
passed_ras_timer = ras_timer_ns_in[i*RAS_TIMER_WIDTH+:RAS_TIMER_WIDTH];
end
// RAS and (reused for) WTP timer. When an open bank is passed, this
// timer is passed to the new owner. The existing RAS prevents
// an activate from occuring too early.
wire start_wtp_timer = sending_col && ~rd_wr_r;
input idle_r;
always @(/*AS*/bm_end_r1 or ras_timer_r or rst or start_rcd_lcl
or start_wtp_timer) begin
if (bm_end_r1 || rst) ras_timer_ns = ZERO[RAS_TIMER_WIDTH-1:0];
else begin
ras_timer_ns = ras_timer_r;
if (start_rcd_lcl) ras_timer_ns =
nRAS_CLKS[RAS_TIMER_WIDTH-1:0] - TWO[RAS_TIMER_WIDTH-1:0];
if (start_wtp_timer) ras_timer_ns =
// As the timer is being reused, it is essential to compare
// before new value is loaded.
(ras_timer_r <= (nWTP_CLKS-2)) ? nWTP_CLKS[RAS_TIMER_WIDTH-1:0] - TWO[RAS_TIMER_WIDTH-1:0]
: ras_timer_r - ONE[RAS_TIMER_WIDTH-1:0];
if (|ras_timer_r && ~start_wtp_timer) ras_timer_ns =
ras_timer_r - ONE[RAS_TIMER_WIDTH-1:0];
end
end // always @ (...
wire [RAS_TIMER_WIDTH-1:0] ras_timer_passed_ns = rcv_open_bank
? passed_ras_timer
: ras_timer_ns;
always @(posedge clk) ras_timer_r <= #TCQ ras_timer_passed_ns;
wire ras_timer_zero_ns = (ras_timer_ns == ZERO[RAS_TIMER_WIDTH-1:0]);
reg ras_timer_zero_r;
always @(posedge clk) ras_timer_zero_r <= #TCQ ras_timer_zero_ns;
// RTP timer. Unless 2T mode, add one for 2:1 mode. This accounts for loss of
// one DRAM CK due to column command to row command fixed offset. In 2T mode,
// Add the remainder. In 4:1 mode, the fixed offset is -2. Add 2 unless in 2T
// mode, in which case we add 1 if the remainder exceeds the fixed offset.
localparam nRTP_CLKS = (nCK_PER_CLK == 1)
? nRTP :
(nCK_PER_CLK == 2)
? (nRTP/2) + ((ADDR_CMD_MODE == "2T") ? nRTP%2 : 1) :
(nRTP/4) + ((ADDR_CMD_MODE == "2T") ? (nRTP%4 > 2 ? 2 : 1) : 2);
localparam nRTP_CLKS_M1 = ((nRTP_CLKS-1) <= 0) ? 0 : nRTP_CLKS-1;
localparam RTP_TIMER_WIDTH = clogb2(nRTP_CLKS_M1 + 1);
reg [RTP_TIMER_WIDTH-1:0] rtp_timer_ns;
reg [RTP_TIMER_WIDTH-1:0] rtp_timer_r;
wire sending_col_not_rmw_rd = sending_col && ~rd_half_rmw_lcl;
always @(/*AS*/pass_open_bank_r or rst or rtp_timer_r
or sending_col_not_rmw_rd) begin
rtp_timer_ns = rtp_timer_r;
if (rst || pass_open_bank_r)
rtp_timer_ns = ZERO[RTP_TIMER_WIDTH-1:0];
else begin
if (sending_col_not_rmw_rd)
rtp_timer_ns = nRTP_CLKS_M1[RTP_TIMER_WIDTH-1:0];
if (|rtp_timer_r) rtp_timer_ns = rtp_timer_r - ONE[RTP_TIMER_WIDTH-1:0];
end
end
always @(posedge clk) rtp_timer_r <= #TCQ rtp_timer_ns;
wire end_rtp_lcl = ~pass_open_bank_r &&
((rtp_timer_r == ONE[RTP_TIMER_WIDTH-1:0]) ||
((nRTP_CLKS_M1 == 0) && sending_col_not_rmw_rd));
output wire end_rtp;
assign end_rtp = end_rtp_lcl;
// Optionally implement open page mode timer.
localparam OP_WIDTH = clogb2(nOP_WAIT + 1);
output wire bank_wait_in_progress;
output wire start_pre_wait;
input passing_open_bank;
input low_idle_cnt_r;
output wire op_exit_req;
input op_exit_grant;
input tail_r;
output reg pre_wait_r;
generate
if (nOP_WAIT == 0) begin : op_mode_disabled
assign bank_wait_in_progress = sending_col_not_rmw_rd || |rtp_timer_r ||
(pre_wait_r && ~ras_timer_zero_r);
assign start_pre_wait = end_rtp_lcl;
assign op_exit_req = 1'b0;
end
else begin : op_mode_enabled
reg op_wait_r;
assign bank_wait_in_progress = sending_col || |rtp_timer_r ||
(pre_wait_r && ~ras_timer_zero_r) ||
op_wait_r;
wire op_active = ~rst && ~passing_open_bank && ((end_rtp_lcl && tail_r)
|| op_wait_r);
wire op_wait_ns = ~op_exit_grant && op_active;
always @(posedge clk) op_wait_r <= #TCQ op_wait_ns;
assign start_pre_wait = op_exit_grant ||
(end_rtp_lcl && ~tail_r && ~passing_open_bank);
if (nOP_WAIT == -1)
assign op_exit_req = (low_idle_cnt_r && op_active);
else begin : op_cnt
reg [OP_WIDTH-1:0] op_cnt_r;
wire [OP_WIDTH-1:0] op_cnt_ns =
(passing_open_bank || op_exit_grant || rst)
? ZERO[OP_WIDTH-1:0]
: end_rtp_lcl
? nOP_WAIT[OP_WIDTH-1:0]
: |op_cnt_r
? op_cnt_r - ONE[OP_WIDTH-1:0]
: op_cnt_r;
always @(posedge clk) op_cnt_r <= #TCQ op_cnt_ns;
assign op_exit_req = (low_idle_cnt_r && op_active) ||
(op_wait_r && ~|op_cnt_r);
end
end
endgenerate
output allow_auto_pre;
wire allow_auto_pre = act_wait_r_lcl || rcd_active_r ||
(col_wait_r && ~sending_col);
// precharge wait state machine.
input auto_pre_r;
wire start_pre;
input pass_open_bank_ns;
wire pre_wait_ns = ~rst && (~pass_open_bank_ns &&
(start_pre_wait || (pre_wait_r && ~start_pre)));
always @(posedge clk) pre_wait_r <= #TCQ pre_wait_ns;
wire pre_request = pre_wait_r && ras_timer_zero_r && ~auto_pre_r;
// precharge timer.
localparam nRP_CLKS = (nCK_PER_CLK == 1) ? nRP :
(nCK_PER_CLK == 2) ? ((nRP/2) + (nRP%2)) :
/*(nCK_PER_CLK == 4)*/ ((nRP/4) + ((nRP%4) ? 1 : 0));
// Subtract two because there are a minimum of two fabric states from
// end of RP timer until earliest possible arb to send act.
localparam nRP_CLKS_M2 = (nRP_CLKS-2 < 0) ? 0 : nRP_CLKS-2;
localparam RP_TIMER_WIDTH = clogb2(nRP_CLKS_M2 + 1);
input sending_pre;
output rts_pre;
generate
if((nCK_PER_CLK == 4) && (ADDR_CMD_MODE != "2T")) begin
assign start_pre = pre_wait_r && ras_timer_zero_r &&
(sending_pre || auto_pre_r);
assign rts_pre = ~sending_pre && pre_request;
end
else begin
assign start_pre = pre_wait_r && ras_timer_zero_r &&
(sending_row || auto_pre_r);
assign rts_pre = 1'b0;
end
endgenerate
reg [RP_TIMER_WIDTH-1:0] rp_timer_r = ZERO[RP_TIMER_WIDTH-1:0];
generate
if (nRP_CLKS_M2 > ZERO) begin : rp_timer
reg [RP_TIMER_WIDTH-1:0] rp_timer_ns;
always @(/*AS*/rp_timer_r or rst or start_pre)
if (rst) rp_timer_ns = ZERO[RP_TIMER_WIDTH-1:0];
else begin
rp_timer_ns = rp_timer_r;
if (start_pre) rp_timer_ns = nRP_CLKS_M2[RP_TIMER_WIDTH-1:0];
else if (|rp_timer_r) rp_timer_ns =
rp_timer_r - ONE[RP_TIMER_WIDTH-1:0];
end
always @(posedge clk) rp_timer_r <= #TCQ rp_timer_ns;
end // block: rp_timer
endgenerate
output wire precharge_bm_end;
assign precharge_bm_end = (rp_timer_r == ONE[RP_TIMER_WIDTH-1:0]) ||
(start_pre && (nRP_CLKS_M2 == ZERO));
// Compute RRD related activate inhibit.
// Compare this bank machine's rank with others, then
// select result based on grant. An alternative is to
// select the just issued rank with the grant and simply
// compare against this bank machine's rank. However, this
// serializes the selection of the rank and the compare processes.
// As implemented below, the compare occurs first, then the
// selection based on grant. This is faster.
input [RANK_WIDTH-1:0] req_rank_r;
input [(RANK_WIDTH*nBANK_MACHS*2)-1:0] req_rank_r_in;
reg inhbt_act_rrd;
input [(nBANK_MACHS*2)-1:0] start_rcd_in;
generate
integer j;
if (RANKS == 1)
always @(/*AS*/req_rank_r or req_rank_r_in or start_rcd_in) begin
inhbt_act_rrd = 1'b0;
for (j=(ID+1); j<(ID+nBANK_MACHS); j=j+1)
inhbt_act_rrd = inhbt_act_rrd || start_rcd_in[j];
end
else begin
always @(/*AS*/req_rank_r or req_rank_r_in or start_rcd_in) begin
inhbt_act_rrd = 1'b0;
for (j=(ID+1); j<(ID+nBANK_MACHS); j=j+1)
inhbt_act_rrd = inhbt_act_rrd ||
(start_rcd_in[j] &&
(req_rank_r_in[(j*RANK_WIDTH)+:RANK_WIDTH] == req_rank_r));
end
end
endgenerate
// Extract the activate command inhibit for the rank associated
// with this request. FAW and RRD are computed separately so that
// gate level timing can be carefully managed.
input [RANKS-1:0] inhbt_act_faw_r;
wire my_inhbt_act_faw = inhbt_act_faw_r[req_rank_r];
input wait_for_maint_r;
input head_r;
wire act_req = ~idle_r && head_r && act_wait_r && ras_timer_zero_r &&
~wait_for_maint_r;
// Implement simple starvation avoidance for act requests. Precharge
// requests don't need this because they are never gated off by
// timing events such as inhbt_act_rrd. Priority request timeout
// is fixed at a single trip around the round robin arbiter.
input sent_row;
wire rts_act_denied = act_req && sent_row && ~sending_row;
reg [BM_CNT_WIDTH-1:0] act_starve_limit_cntr_ns;
reg [BM_CNT_WIDTH-1:0] act_starve_limit_cntr_r;
generate
if (BM_CNT_WIDTH > 1) // Number of Bank Machs > 2
begin :BM_MORE_THAN_2
always @(/*AS*/act_req or act_starve_limit_cntr_r or rts_act_denied)
begin
act_starve_limit_cntr_ns = act_starve_limit_cntr_r;
if (~act_req)
act_starve_limit_cntr_ns = {BM_CNT_WIDTH{1'b0}};
else
if (rts_act_denied && &act_starve_limit_cntr_r)
act_starve_limit_cntr_ns = act_starve_limit_cntr_r +
{{BM_CNT_WIDTH-1{1'b0}}, 1'b1};
end
end
else // Number of Bank Machs == 2
begin :BM_EQUAL_2
always @(/*AS*/act_req or act_starve_limit_cntr_r or rts_act_denied)
begin
act_starve_limit_cntr_ns = act_starve_limit_cntr_r;
if (~act_req)
act_starve_limit_cntr_ns = {BM_CNT_WIDTH{1'b0}};
else
if (rts_act_denied && &act_starve_limit_cntr_r)
act_starve_limit_cntr_ns = act_starve_limit_cntr_r +
{1'b1};
end
end
endgenerate
always @(posedge clk) act_starve_limit_cntr_r <=
#TCQ act_starve_limit_cntr_ns;
reg demand_act_priority_r;
wire demand_act_priority_ns = act_req &&
(demand_act_priority_r || (rts_act_denied && &act_starve_limit_cntr_r));
always @(posedge clk) demand_act_priority_r <= #TCQ demand_act_priority_ns;
`ifdef MC_SVA
cover_demand_act_priority:
cover property (@(posedge clk) (~rst && demand_act_priority_r));
`endif
output wire demand_act_priority;
assign demand_act_priority = demand_act_priority_r && ~sending_row;
// compute act_demanded from other demand_act_priorities
input [(nBANK_MACHS*2)-1:0] demand_act_priority_in;
reg act_demanded = 1'b0;
generate
if (nBANK_MACHS > 1) begin : compute_act_demanded
always @(demand_act_priority_in[`BM_SHARED_BV])
act_demanded = |demand_act_priority_in[`BM_SHARED_BV];
end
endgenerate
wire row_demand_ok = demand_act_priority_r || ~act_demanded;
// Generate the Request To Send row arbitation signal.
output wire rts_row;
generate
if((nCK_PER_CLK == 4) && (ADDR_CMD_MODE != "2T"))
assign rts_row = ~sending_row && row_demand_ok &&
(act_req && ~my_inhbt_act_faw && ~inhbt_act_rrd);
else
assign rts_row = ~sending_row && row_demand_ok &&
((act_req && ~my_inhbt_act_faw && ~inhbt_act_rrd) ||
pre_request);
endgenerate
`ifdef MC_SVA
four_activate_window_wait:
cover property (@(posedge clk)
(~rst && ~sending_row && act_req && my_inhbt_act_faw));
ras_ras_delay_wait:
cover property (@(posedge clk)
(~rst && ~sending_row && act_req && inhbt_act_rrd));
`endif
// Provide rank machines early knowledge that this bank machine is
// going to send an activate to the rank. In this way, the rank
// machines just need to use the sending_row wire to figure out if
// they need to keep track of the activate.
output reg [RANKS-1:0] act_this_rank_r;
reg [RANKS-1:0] act_this_rank_ns;
always @(/*AS*/act_wait_r or req_rank_r) begin
act_this_rank_ns = {RANKS{1'b0}};
for (i = 0; i < RANKS; i = i + 1)
act_this_rank_ns[i] = act_wait_r && (i[RANK_WIDTH-1:0] == req_rank_r);
end
always @(posedge clk) act_this_rank_r <= #TCQ act_this_rank_ns;
// Generate request to send column command signal.
input order_q_zero;
wire req_bank_rdy_ns = order_q_zero && col_wait_r;
reg req_bank_rdy_r;
always @(posedge clk) req_bank_rdy_r <= #TCQ req_bank_rdy_ns;
// Determine is we have been denied a column command request.
input sent_col;
wire rts_col_denied = req_bank_rdy_r && sent_col && ~sending_col;
// Implement a starvation limit counter. Count the number of times a
// request to send a column command has been denied.
localparam STARVE_LIMIT_CNT = STARVE_LIMIT * nBANK_MACHS;
localparam STARVE_LIMIT_WIDTH = clogb2(STARVE_LIMIT_CNT);
reg [STARVE_LIMIT_WIDTH-1:0] starve_limit_cntr_r;
reg [STARVE_LIMIT_WIDTH-1:0] starve_limit_cntr_ns;
always @(/*AS*/col_wait_r or rts_col_denied or starve_limit_cntr_r)
if (~col_wait_r)
starve_limit_cntr_ns = {STARVE_LIMIT_WIDTH{1'b0}};
else
if (rts_col_denied && (starve_limit_cntr_r != STARVE_LIMIT_CNT-1))
starve_limit_cntr_ns = starve_limit_cntr_r +
{{STARVE_LIMIT_WIDTH-1{1'b0}}, 1'b1};
else starve_limit_cntr_ns = starve_limit_cntr_r;
always @(posedge clk) starve_limit_cntr_r <= #TCQ starve_limit_cntr_ns;
input q_has_rd;
input q_has_priority;
// Decide if this bank machine should demand priority. Priority is demanded
// when starvation limit counter is reached, or a bit in the request.
wire starved = ((starve_limit_cntr_r == (STARVE_LIMIT_CNT-1)) &&
rts_col_denied);
input req_priority_r;
input idle_ns;
reg demand_priority_r;
wire demand_priority_ns = ~idle_ns && col_wait_ns &&
(demand_priority_r ||
(order_q_zero &&
(req_priority_r || q_has_priority)) ||
(starved && (q_has_rd || ~req_wr_r)));
always @(posedge clk) demand_priority_r <= #TCQ demand_priority_ns;
`ifdef MC_SVA
wire rdy_for_priority = ~rst && ~demand_priority_r && ~idle_ns &&
col_wait_ns;
req_triggers_demand_priority:
cover property (@(posedge clk)
(rdy_for_priority && req_priority_r && ~q_has_priority && ~starved));
q_priority_triggers_demand_priority:
cover property (@(posedge clk)
(rdy_for_priority && ~req_priority_r && q_has_priority && ~starved));
wire not_req_or_q_rdy_for_priority =
rdy_for_priority && ~req_priority_r && ~q_has_priority;
starved_req_triggers_demand_priority:
cover property (@(posedge clk)
(not_req_or_q_rdy_for_priority && starved && ~q_has_rd && ~req_wr_r));
starved_q_triggers_demand_priority:
cover property (@(posedge clk)
(not_req_or_q_rdy_for_priority && starved && q_has_rd && req_wr_r));
`endif
// compute demanded from other demand_priorities
input [(nBANK_MACHS*2)-1:0] demand_priority_in;
reg demanded = 1'b0;
generate
if (nBANK_MACHS > 1) begin : compute_demanded
always @(demand_priority_in[`BM_SHARED_BV]) demanded =
|demand_priority_in[`BM_SHARED_BV];
end
endgenerate
// In order to make sure that there is no starvation amongst a possibly
// unlimited stream of priority requests, add a second stage to the demand
// priority signal. If there are no other requests demanding priority, then
// go ahead and assert demand_priority. If any other requests are asserting
// demand_priority, hold off asserting demand_priority until these clear, then
// assert demand priority. Its possible to get multiple requests asserting
// demand priority simultaneously, but that's OK. Those requests will be
// serviced, demanded will fall, and another group of requests will be
// allowed to assert demand_priority.
reg demanded_prior_r;
wire demanded_prior_ns = demanded &&
(demanded_prior_r || ~demand_priority_r);
always @(posedge clk) demanded_prior_r <= #TCQ demanded_prior_ns;
output wire demand_priority;
assign demand_priority = demand_priority_r && ~demanded_prior_r &&
~sending_col;
`ifdef MC_SVA
demand_priority_gated:
cover property (@(posedge clk) (demand_priority_r && ~demand_priority));
generate
if (nBANK_MACHS >1) multiple_demand_priority:
cover property (@(posedge clk)
($countones(demand_priority_in[`BM_SHARED_BV]) > 1));
endgenerate
`endif
wire demand_ok = demand_priority_r || ~demanded;
// Figure out if the request in this bank machine matches the current rank
// configuration.
input rnk_config_strobe;
input rnk_config_kill_rts_col;
input rnk_config_valid_r;
input [RANK_WIDTH-1:0] rnk_config;
output wire rtc;
wire rnk_config_match = rnk_config_valid_r && (rnk_config == req_rank_r);
assign rtc = ~rnk_config_match && ~rnk_config_kill_rts_col && order_q_zero && col_wait_r && demand_ok;
// Using rank state provided by the rank machines, figure out if
// a read requests should wait for WTR or RTW.
input [RANKS-1:0] inhbt_rd;
wire my_inhbt_rd = inhbt_rd[req_rank_r];
input [RANKS-1:0] inhbt_wr;
wire my_inhbt_wr = inhbt_wr[req_rank_r];
wire allow_rw = ~rd_wr_r ? ~my_inhbt_wr : ~my_inhbt_rd;
// DQ bus timing constraints.
input dq_busy_data;
// Column command is ready to arbitrate, except for databus restrictions.
wire col_rdy = (col_wait_r || ((nRCD_CLKS <= 1) && end_rcd) ||
(rcv_open_bank && nCK_PER_CLK == 2 && DRAM_TYPE=="DDR2" && BURST_MODE == "4") ||
(rcv_open_bank && nCK_PER_CLK == 4 && BURST_MODE == "8")) &&
order_q_zero;
// Column command is ready to arbitrate for sending a write. Used
// to generate early wr_data_addr for ECC mode.
output wire col_rdy_wr;
assign col_rdy_wr = col_rdy && ~rd_wr_r;
// Figure out if we're ready to send a column command based on all timing
// constraints.
// if timing is an issue.
wire col_cmd_rts = col_rdy && ~dq_busy_data && allow_rw && rnk_config_match;
`ifdef MC_SVA
col_wait_for_order_q: cover property
(@(posedge clk)
(~rst && col_wait_r && ~order_q_zero && ~dq_busy_data &&
allow_rw));
col_wait_for_dq_busy: cover property
(@(posedge clk)
(~rst && col_wait_r && order_q_zero && dq_busy_data &&
allow_rw));
col_wait_for_allow_rw: cover property
(@(posedge clk)
(~rst && col_wait_r && order_q_zero && ~dq_busy_data &&
~allow_rw));
`endif
// Implement flow control for the command and control FIFOs and for the data
// FIFO during writes
input phy_mc_ctl_full;
input phy_mc_cmd_full;
input phy_mc_data_full;
// Register ctl_full and cmd_full
reg phy_mc_ctl_full_r = 1'b0;
reg phy_mc_cmd_full_r = 1'b0;
always @(posedge clk)
if(rst) begin
phy_mc_ctl_full_r <= #TCQ 1'b0;
phy_mc_cmd_full_r <= #TCQ 1'b0;
end else begin
phy_mc_ctl_full_r <= #TCQ phy_mc_ctl_full;
phy_mc_cmd_full_r <= #TCQ phy_mc_cmd_full;
end
// register output data pre-fifo almost full condition and fold in WR status
reg ofs_rdy_r = 1'b0;
always @(posedge clk)
if(rst)
ofs_rdy_r <= #TCQ 1'b0;
else
ofs_rdy_r <= #TCQ ~phy_mc_cmd_full_r && ~phy_mc_ctl_full_r && ~(phy_mc_data_full && ~rd_wr_r);
// Disable priority feature for one state after a config to insure
// forward progress on the just installed io config.
reg override_demand_r;
wire override_demand_ns = rnk_config_strobe || rnk_config_kill_rts_col;
always @(posedge clk) override_demand_r <= override_demand_ns;
output wire rts_col;
assign rts_col = ~sending_col && (demand_ok || override_demand_r) &&
col_cmd_rts && ofs_rdy_r;
// As in act_this_rank, wr/rd_this_rank informs rank machines
// that this bank machine is doing a write/rd. Removes logic
// after the grant.
reg [RANKS-1:0] wr_this_rank_ns;
reg [RANKS-1:0] rd_this_rank_ns;
always @(/*AS*/rd_wr_r or req_rank_r) begin
wr_this_rank_ns = {RANKS{1'b0}};
rd_this_rank_ns = {RANKS{1'b0}};
for (i=0; i<RANKS; i=i+1) begin
wr_this_rank_ns[i] = ~rd_wr_r && (i[RANK_WIDTH-1:0] == req_rank_r);
rd_this_rank_ns[i] = rd_wr_r && (i[RANK_WIDTH-1:0] == req_rank_r);
end
end
output reg [RANKS-1:0] wr_this_rank_r;
always @(posedge clk) wr_this_rank_r <= #TCQ wr_this_rank_ns;
output reg [RANKS-1:0] rd_this_rank_r;
always @(posedge clk) rd_this_rank_r <= #TCQ rd_this_rank_ns;
endmodule
|
module mig_7series_v2_3_bank_state #
(
parameter TCQ = 100,
parameter ADDR_CMD_MODE = "1T",
parameter BM_CNT_WIDTH = 2,
parameter BURST_MODE = "8",
parameter CWL = 5,
parameter DATA_BUF_ADDR_WIDTH = 8,
parameter DRAM_TYPE = "DDR3",
parameter ECC = "OFF",
parameter ID = 0,
parameter nBANK_MACHS = 4,
parameter nCK_PER_CLK = 2,
parameter nOP_WAIT = 0,
parameter nRAS_CLKS = 10,
parameter nRP = 10,
parameter nRTP = 4,
parameter nRCD = 5,
parameter nWTP_CLKS = 5,
parameter ORDERING = "NORM",
parameter RANKS = 4,
parameter RANK_WIDTH = 4,
parameter RAS_TIMER_WIDTH = 5,
parameter STARVE_LIMIT = 2
)
(/*AUTOARG*/
// Outputs
start_rcd, act_wait_r, rd_half_rmw, ras_timer_ns, end_rtp,
bank_wait_in_progress, start_pre_wait, op_exit_req, pre_wait_r,
allow_auto_pre, precharge_bm_end, demand_act_priority, rts_row,
act_this_rank_r, demand_priority, col_rdy_wr, rts_col, wr_this_rank_r,
rd_this_rank_r, rts_pre, rtc,
// Inputs
clk, rst, bm_end, pass_open_bank_r, sending_row, sending_pre, rcv_open_bank,
sending_col, rd_wr_r, req_wr_r, rd_data_addr, req_data_buf_addr_r,
phy_rddata_valid, rd_rmw, ras_timer_ns_in, rb_hit_busies_r, idle_r,
passing_open_bank, low_idle_cnt_r, op_exit_grant, tail_r,
auto_pre_r, pass_open_bank_ns, req_rank_r, req_rank_r_in,
start_rcd_in, inhbt_act_faw_r, wait_for_maint_r, head_r, sent_row,
demand_act_priority_in, order_q_zero, sent_col, q_has_rd,
q_has_priority, req_priority_r, idle_ns, demand_priority_in, inhbt_rd,
inhbt_wr, dq_busy_data, rnk_config_strobe, rnk_config_valid_r, rnk_config,
rnk_config_kill_rts_col, phy_mc_cmd_full, phy_mc_ctl_full, phy_mc_data_full
);
function integer clogb2 (input integer size); // ceiling logb2
begin
size = size - 1;
for (clogb2=1; size>1; clogb2=clogb2+1)
size = size >> 1;
end
endfunction // clogb2
input clk;
input rst;
// Activate wait state machine.
input bm_end;
reg bm_end_r1;
always @(posedge clk) bm_end_r1 <= #TCQ bm_end;
reg col_wait_r;
input pass_open_bank_r;
input sending_row;
reg act_wait_r_lcl;
input rcv_open_bank;
wire start_rcd_lcl = act_wait_r_lcl && sending_row;
output wire start_rcd;
assign start_rcd = start_rcd_lcl;
wire act_wait_ns = rst ||
((act_wait_r_lcl && ~start_rcd_lcl && ~rcv_open_bank) ||
bm_end_r1 || (pass_open_bank_r && bm_end));
always @(posedge clk) act_wait_r_lcl <= #TCQ act_wait_ns;
output wire act_wait_r;
assign act_wait_r = act_wait_r_lcl;
// RCD timer
//
// When CWL is even, CAS commands are issued on slot 0 and RAS commands are
// issued on slot 1. This implies that the RCD can never expire in the same
// cycle as the RAS (otherwise the CAS for a given transaction would precede
// the RAS). Similarly, this can also cause premature expiration for longer
// RCD. An offset must be added to RCD before translating it to the FPGA clock
// domain. In this mode, CAS are on the first DRAM clock cycle corresponding to
// a given FPGA cycle. In 2:1 mode add 2 to generate this offset aligned to
// the FPGA cycle. Likewise, add 4 to generate an aligned offset in 4:1 mode.
//
// When CWL is odd, RAS commands are issued on slot 0 and CAS commands are
// issued on slot 1. There is a natural 1 cycle seperation between RAS and CAS
// in the DRAM clock domain so the RCD can expire in the same FPGA cycle as the
// RAS command. In 2:1 mode, there are only 2 slots so direct translation
// correctly places the CAS with respect to the corresponding RAS. In 4:1 mode,
// there are two slots after CAS, so 2 is added to shift the timer into the
// next FPGA cycle for cases that can't expire in the current cycle.
//
// In 2T mode, the offset from ROW to COL commands is fixed at 2. In 2:1 mode,
// It is sufficient to translate to the half-rate domain and add the remainder.
// In 4:1 mode, we must translate to the quarter-rate domain and add an
// additional fabric cycle only if the remainder exceeds the fixed offset of 2
localparam nRCD_CLKS =
nCK_PER_CLK == 1 ?
nRCD :
nCK_PER_CLK == 2 ?
ADDR_CMD_MODE == "2T" ?
(nRCD/2) + (nRCD%2) :
CWL % 2 ?
(nRCD/2) :
(nRCD+2) / 2 :
// (nCK_PER_CLK == 4)
ADDR_CMD_MODE == "2T" ?
(nRCD/4) + (nRCD%4 > 2 ? 1 : 0) :
CWL % 2 ?
(nRCD-2 ? (nRCD-2) / 4 + 1 : 1) :
nRCD/4 + 1;
localparam nRCD_CLKS_M2 = (nRCD_CLKS-2 <0) ? 0 : nRCD_CLKS-2;
localparam RCD_TIMER_WIDTH = clogb2(nRCD_CLKS_M2+1);
localparam ZERO = 0;
localparam ONE = 1;
reg [RCD_TIMER_WIDTH-1:0] rcd_timer_r = {RCD_TIMER_WIDTH{1'b0}};
reg end_rcd;
reg rcd_active_r = 1'b0;
generate
if (nRCD_CLKS <= 2) begin : rcd_timer_leq_2
always @(/*AS*/start_rcd_lcl) end_rcd = start_rcd_lcl;
end
else if (nRCD_CLKS > 2) begin : rcd_timer_gt_2
reg [RCD_TIMER_WIDTH-1:0] rcd_timer_ns;
always @(/*AS*/rcd_timer_r or rst or start_rcd_lcl) begin
if (rst) rcd_timer_ns = ZERO[RCD_TIMER_WIDTH-1:0];
else begin
rcd_timer_ns = rcd_timer_r;
if (start_rcd_lcl) rcd_timer_ns = nRCD_CLKS_M2[RCD_TIMER_WIDTH-1:0];
else if (|rcd_timer_r) rcd_timer_ns =
rcd_timer_r - ONE[RCD_TIMER_WIDTH-1:0];
end
end
always @(posedge clk) rcd_timer_r <= #TCQ rcd_timer_ns;
wire end_rcd_ns = (rcd_timer_ns == ONE[RCD_TIMER_WIDTH-1:0]);
always @(posedge clk) end_rcd = end_rcd_ns;
wire rcd_active_ns = |rcd_timer_ns;
always @(posedge clk) rcd_active_r <= #TCQ rcd_active_ns;
end
endgenerate
// Figure out if the read that's completing is for an RMW for
// this bank machine. Delay by a state if CWL != 8 since the
// data is not ready in the RMW buffer for the early write
// data fetch that happens with ECC and CWL != 8.
// Create a state bit indicating we're waiting for the read
// half of the rmw to complete.
input sending_col;
input rd_wr_r;
input req_wr_r;
input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr;
input [DATA_BUF_ADDR_WIDTH-1:0] req_data_buf_addr_r;
input phy_rddata_valid;
input rd_rmw;
reg rmw_rd_done = 1'b0;
reg rd_half_rmw_lcl = 1'b0;
output wire rd_half_rmw;
assign rd_half_rmw = rd_half_rmw_lcl;
reg rmw_wait_r = 1'b0;
generate
if (ECC != "OFF") begin : rmw_on
// Delay phy_rddata_valid and rd_rmw by one cycle to align them
// to req_data_buf_addr_r so that rmw_wait_r clears properly
reg phy_rddata_valid_r;
reg rd_rmw_r;
always @(posedge clk) begin
phy_rddata_valid_r <= #TCQ phy_rddata_valid;
rd_rmw_r <= #TCQ rd_rmw;
end
wire my_rmw_rd_ns = phy_rddata_valid_r && rd_rmw_r &&
(rd_data_addr == req_data_buf_addr_r);
if (CWL == 8) always @(my_rmw_rd_ns) rmw_rd_done = my_rmw_rd_ns;
else always @(posedge clk) rmw_rd_done = #TCQ my_rmw_rd_ns;
always @(/*AS*/rd_wr_r or req_wr_r) rd_half_rmw_lcl = req_wr_r && rd_wr_r;
wire rmw_wait_ns = ~rst &&
((rmw_wait_r && ~rmw_rd_done) || (rd_half_rmw_lcl && sending_col));
always @(posedge clk) rmw_wait_r <= #TCQ rmw_wait_ns;
end
endgenerate
// column wait state machine.
wire col_wait_ns = ~rst && ((col_wait_r && ~sending_col) || end_rcd
|| rcv_open_bank || (rmw_rd_done && rmw_wait_r));
always @(posedge clk) col_wait_r <= #TCQ col_wait_ns;
// Set up various RAS timer parameters, wires, etc.
localparam TWO = 2;
output reg [RAS_TIMER_WIDTH-1:0] ras_timer_ns;
reg [RAS_TIMER_WIDTH-1:0] ras_timer_r;
input [(2*(RAS_TIMER_WIDTH*nBANK_MACHS))-1:0] ras_timer_ns_in;
input [(nBANK_MACHS*2)-1:0] rb_hit_busies_r;
// On a bank pass, select the RAS timer from the passing bank machine.
reg [RAS_TIMER_WIDTH-1:0] passed_ras_timer;
integer i;
always @(/*AS*/ras_timer_ns_in or rb_hit_busies_r) begin
passed_ras_timer = {RAS_TIMER_WIDTH{1'b0}};
for (i=ID+1; i<(ID+nBANK_MACHS); i=i+1)
if (rb_hit_busies_r[i])
passed_ras_timer = ras_timer_ns_in[i*RAS_TIMER_WIDTH+:RAS_TIMER_WIDTH];
end
// RAS and (reused for) WTP timer. When an open bank is passed, this
// timer is passed to the new owner. The existing RAS prevents
// an activate from occuring too early.
wire start_wtp_timer = sending_col && ~rd_wr_r;
input idle_r;
always @(/*AS*/bm_end_r1 or ras_timer_r or rst or start_rcd_lcl
or start_wtp_timer) begin
if (bm_end_r1 || rst) ras_timer_ns = ZERO[RAS_TIMER_WIDTH-1:0];
else begin
ras_timer_ns = ras_timer_r;
if (start_rcd_lcl) ras_timer_ns =
nRAS_CLKS[RAS_TIMER_WIDTH-1:0] - TWO[RAS_TIMER_WIDTH-1:0];
if (start_wtp_timer) ras_timer_ns =
// As the timer is being reused, it is essential to compare
// before new value is loaded.
(ras_timer_r <= (nWTP_CLKS-2)) ? nWTP_CLKS[RAS_TIMER_WIDTH-1:0] - TWO[RAS_TIMER_WIDTH-1:0]
: ras_timer_r - ONE[RAS_TIMER_WIDTH-1:0];
if (|ras_timer_r && ~start_wtp_timer) ras_timer_ns =
ras_timer_r - ONE[RAS_TIMER_WIDTH-1:0];
end
end // always @ (...
wire [RAS_TIMER_WIDTH-1:0] ras_timer_passed_ns = rcv_open_bank
? passed_ras_timer
: ras_timer_ns;
always @(posedge clk) ras_timer_r <= #TCQ ras_timer_passed_ns;
wire ras_timer_zero_ns = (ras_timer_ns == ZERO[RAS_TIMER_WIDTH-1:0]);
reg ras_timer_zero_r;
always @(posedge clk) ras_timer_zero_r <= #TCQ ras_timer_zero_ns;
// RTP timer. Unless 2T mode, add one for 2:1 mode. This accounts for loss of
// one DRAM CK due to column command to row command fixed offset. In 2T mode,
// Add the remainder. In 4:1 mode, the fixed offset is -2. Add 2 unless in 2T
// mode, in which case we add 1 if the remainder exceeds the fixed offset.
localparam nRTP_CLKS = (nCK_PER_CLK == 1)
? nRTP :
(nCK_PER_CLK == 2)
? (nRTP/2) + ((ADDR_CMD_MODE == "2T") ? nRTP%2 : 1) :
(nRTP/4) + ((ADDR_CMD_MODE == "2T") ? (nRTP%4 > 2 ? 2 : 1) : 2);
localparam nRTP_CLKS_M1 = ((nRTP_CLKS-1) <= 0) ? 0 : nRTP_CLKS-1;
localparam RTP_TIMER_WIDTH = clogb2(nRTP_CLKS_M1 + 1);
reg [RTP_TIMER_WIDTH-1:0] rtp_timer_ns;
reg [RTP_TIMER_WIDTH-1:0] rtp_timer_r;
wire sending_col_not_rmw_rd = sending_col && ~rd_half_rmw_lcl;
always @(/*AS*/pass_open_bank_r or rst or rtp_timer_r
or sending_col_not_rmw_rd) begin
rtp_timer_ns = rtp_timer_r;
if (rst || pass_open_bank_r)
rtp_timer_ns = ZERO[RTP_TIMER_WIDTH-1:0];
else begin
if (sending_col_not_rmw_rd)
rtp_timer_ns = nRTP_CLKS_M1[RTP_TIMER_WIDTH-1:0];
if (|rtp_timer_r) rtp_timer_ns = rtp_timer_r - ONE[RTP_TIMER_WIDTH-1:0];
end
end
always @(posedge clk) rtp_timer_r <= #TCQ rtp_timer_ns;
wire end_rtp_lcl = ~pass_open_bank_r &&
((rtp_timer_r == ONE[RTP_TIMER_WIDTH-1:0]) ||
((nRTP_CLKS_M1 == 0) && sending_col_not_rmw_rd));
output wire end_rtp;
assign end_rtp = end_rtp_lcl;
// Optionally implement open page mode timer.
localparam OP_WIDTH = clogb2(nOP_WAIT + 1);
output wire bank_wait_in_progress;
output wire start_pre_wait;
input passing_open_bank;
input low_idle_cnt_r;
output wire op_exit_req;
input op_exit_grant;
input tail_r;
output reg pre_wait_r;
generate
if (nOP_WAIT == 0) begin : op_mode_disabled
assign bank_wait_in_progress = sending_col_not_rmw_rd || |rtp_timer_r ||
(pre_wait_r && ~ras_timer_zero_r);
assign start_pre_wait = end_rtp_lcl;
assign op_exit_req = 1'b0;
end
else begin : op_mode_enabled
reg op_wait_r;
assign bank_wait_in_progress = sending_col || |rtp_timer_r ||
(pre_wait_r && ~ras_timer_zero_r) ||
op_wait_r;
wire op_active = ~rst && ~passing_open_bank && ((end_rtp_lcl && tail_r)
|| op_wait_r);
wire op_wait_ns = ~op_exit_grant && op_active;
always @(posedge clk) op_wait_r <= #TCQ op_wait_ns;
assign start_pre_wait = op_exit_grant ||
(end_rtp_lcl && ~tail_r && ~passing_open_bank);
if (nOP_WAIT == -1)
assign op_exit_req = (low_idle_cnt_r && op_active);
else begin : op_cnt
reg [OP_WIDTH-1:0] op_cnt_r;
wire [OP_WIDTH-1:0] op_cnt_ns =
(passing_open_bank || op_exit_grant || rst)
? ZERO[OP_WIDTH-1:0]
: end_rtp_lcl
? nOP_WAIT[OP_WIDTH-1:0]
: |op_cnt_r
? op_cnt_r - ONE[OP_WIDTH-1:0]
: op_cnt_r;
always @(posedge clk) op_cnt_r <= #TCQ op_cnt_ns;
assign op_exit_req = (low_idle_cnt_r && op_active) ||
(op_wait_r && ~|op_cnt_r);
end
end
endgenerate
output allow_auto_pre;
wire allow_auto_pre = act_wait_r_lcl || rcd_active_r ||
(col_wait_r && ~sending_col);
// precharge wait state machine.
input auto_pre_r;
wire start_pre;
input pass_open_bank_ns;
wire pre_wait_ns = ~rst && (~pass_open_bank_ns &&
(start_pre_wait || (pre_wait_r && ~start_pre)));
always @(posedge clk) pre_wait_r <= #TCQ pre_wait_ns;
wire pre_request = pre_wait_r && ras_timer_zero_r && ~auto_pre_r;
// precharge timer.
localparam nRP_CLKS = (nCK_PER_CLK == 1) ? nRP :
(nCK_PER_CLK == 2) ? ((nRP/2) + (nRP%2)) :
/*(nCK_PER_CLK == 4)*/ ((nRP/4) + ((nRP%4) ? 1 : 0));
// Subtract two because there are a minimum of two fabric states from
// end of RP timer until earliest possible arb to send act.
localparam nRP_CLKS_M2 = (nRP_CLKS-2 < 0) ? 0 : nRP_CLKS-2;
localparam RP_TIMER_WIDTH = clogb2(nRP_CLKS_M2 + 1);
input sending_pre;
output rts_pre;
generate
if((nCK_PER_CLK == 4) && (ADDR_CMD_MODE != "2T")) begin
assign start_pre = pre_wait_r && ras_timer_zero_r &&
(sending_pre || auto_pre_r);
assign rts_pre = ~sending_pre && pre_request;
end
else begin
assign start_pre = pre_wait_r && ras_timer_zero_r &&
(sending_row || auto_pre_r);
assign rts_pre = 1'b0;
end
endgenerate
reg [RP_TIMER_WIDTH-1:0] rp_timer_r = ZERO[RP_TIMER_WIDTH-1:0];
generate
if (nRP_CLKS_M2 > ZERO) begin : rp_timer
reg [RP_TIMER_WIDTH-1:0] rp_timer_ns;
always @(/*AS*/rp_timer_r or rst or start_pre)
if (rst) rp_timer_ns = ZERO[RP_TIMER_WIDTH-1:0];
else begin
rp_timer_ns = rp_timer_r;
if (start_pre) rp_timer_ns = nRP_CLKS_M2[RP_TIMER_WIDTH-1:0];
else if (|rp_timer_r) rp_timer_ns =
rp_timer_r - ONE[RP_TIMER_WIDTH-1:0];
end
always @(posedge clk) rp_timer_r <= #TCQ rp_timer_ns;
end // block: rp_timer
endgenerate
output wire precharge_bm_end;
assign precharge_bm_end = (rp_timer_r == ONE[RP_TIMER_WIDTH-1:0]) ||
(start_pre && (nRP_CLKS_M2 == ZERO));
// Compute RRD related activate inhibit.
// Compare this bank machine's rank with others, then
// select result based on grant. An alternative is to
// select the just issued rank with the grant and simply
// compare against this bank machine's rank. However, this
// serializes the selection of the rank and the compare processes.
// As implemented below, the compare occurs first, then the
// selection based on grant. This is faster.
input [RANK_WIDTH-1:0] req_rank_r;
input [(RANK_WIDTH*nBANK_MACHS*2)-1:0] req_rank_r_in;
reg inhbt_act_rrd;
input [(nBANK_MACHS*2)-1:0] start_rcd_in;
generate
integer j;
if (RANKS == 1)
always @(/*AS*/req_rank_r or req_rank_r_in or start_rcd_in) begin
inhbt_act_rrd = 1'b0;
for (j=(ID+1); j<(ID+nBANK_MACHS); j=j+1)
inhbt_act_rrd = inhbt_act_rrd || start_rcd_in[j];
end
else begin
always @(/*AS*/req_rank_r or req_rank_r_in or start_rcd_in) begin
inhbt_act_rrd = 1'b0;
for (j=(ID+1); j<(ID+nBANK_MACHS); j=j+1)
inhbt_act_rrd = inhbt_act_rrd ||
(start_rcd_in[j] &&
(req_rank_r_in[(j*RANK_WIDTH)+:RANK_WIDTH] == req_rank_r));
end
end
endgenerate
// Extract the activate command inhibit for the rank associated
// with this request. FAW and RRD are computed separately so that
// gate level timing can be carefully managed.
input [RANKS-1:0] inhbt_act_faw_r;
wire my_inhbt_act_faw = inhbt_act_faw_r[req_rank_r];
input wait_for_maint_r;
input head_r;
wire act_req = ~idle_r && head_r && act_wait_r && ras_timer_zero_r &&
~wait_for_maint_r;
// Implement simple starvation avoidance for act requests. Precharge
// requests don't need this because they are never gated off by
// timing events such as inhbt_act_rrd. Priority request timeout
// is fixed at a single trip around the round robin arbiter.
input sent_row;
wire rts_act_denied = act_req && sent_row && ~sending_row;
reg [BM_CNT_WIDTH-1:0] act_starve_limit_cntr_ns;
reg [BM_CNT_WIDTH-1:0] act_starve_limit_cntr_r;
generate
if (BM_CNT_WIDTH > 1) // Number of Bank Machs > 2
begin :BM_MORE_THAN_2
always @(/*AS*/act_req or act_starve_limit_cntr_r or rts_act_denied)
begin
act_starve_limit_cntr_ns = act_starve_limit_cntr_r;
if (~act_req)
act_starve_limit_cntr_ns = {BM_CNT_WIDTH{1'b0}};
else
if (rts_act_denied && &act_starve_limit_cntr_r)
act_starve_limit_cntr_ns = act_starve_limit_cntr_r +
{{BM_CNT_WIDTH-1{1'b0}}, 1'b1};
end
end
else // Number of Bank Machs == 2
begin :BM_EQUAL_2
always @(/*AS*/act_req or act_starve_limit_cntr_r or rts_act_denied)
begin
act_starve_limit_cntr_ns = act_starve_limit_cntr_r;
if (~act_req)
act_starve_limit_cntr_ns = {BM_CNT_WIDTH{1'b0}};
else
if (rts_act_denied && &act_starve_limit_cntr_r)
act_starve_limit_cntr_ns = act_starve_limit_cntr_r +
{1'b1};
end
end
endgenerate
always @(posedge clk) act_starve_limit_cntr_r <=
#TCQ act_starve_limit_cntr_ns;
reg demand_act_priority_r;
wire demand_act_priority_ns = act_req &&
(demand_act_priority_r || (rts_act_denied && &act_starve_limit_cntr_r));
always @(posedge clk) demand_act_priority_r <= #TCQ demand_act_priority_ns;
`ifdef MC_SVA
cover_demand_act_priority:
cover property (@(posedge clk) (~rst && demand_act_priority_r));
`endif
output wire demand_act_priority;
assign demand_act_priority = demand_act_priority_r && ~sending_row;
// compute act_demanded from other demand_act_priorities
input [(nBANK_MACHS*2)-1:0] demand_act_priority_in;
reg act_demanded = 1'b0;
generate
if (nBANK_MACHS > 1) begin : compute_act_demanded
always @(demand_act_priority_in[`BM_SHARED_BV])
act_demanded = |demand_act_priority_in[`BM_SHARED_BV];
end
endgenerate
wire row_demand_ok = demand_act_priority_r || ~act_demanded;
// Generate the Request To Send row arbitation signal.
output wire rts_row;
generate
if((nCK_PER_CLK == 4) && (ADDR_CMD_MODE != "2T"))
assign rts_row = ~sending_row && row_demand_ok &&
(act_req && ~my_inhbt_act_faw && ~inhbt_act_rrd);
else
assign rts_row = ~sending_row && row_demand_ok &&
((act_req && ~my_inhbt_act_faw && ~inhbt_act_rrd) ||
pre_request);
endgenerate
`ifdef MC_SVA
four_activate_window_wait:
cover property (@(posedge clk)
(~rst && ~sending_row && act_req && my_inhbt_act_faw));
ras_ras_delay_wait:
cover property (@(posedge clk)
(~rst && ~sending_row && act_req && inhbt_act_rrd));
`endif
// Provide rank machines early knowledge that this bank machine is
// going to send an activate to the rank. In this way, the rank
// machines just need to use the sending_row wire to figure out if
// they need to keep track of the activate.
output reg [RANKS-1:0] act_this_rank_r;
reg [RANKS-1:0] act_this_rank_ns;
always @(/*AS*/act_wait_r or req_rank_r) begin
act_this_rank_ns = {RANKS{1'b0}};
for (i = 0; i < RANKS; i = i + 1)
act_this_rank_ns[i] = act_wait_r && (i[RANK_WIDTH-1:0] == req_rank_r);
end
always @(posedge clk) act_this_rank_r <= #TCQ act_this_rank_ns;
// Generate request to send column command signal.
input order_q_zero;
wire req_bank_rdy_ns = order_q_zero && col_wait_r;
reg req_bank_rdy_r;
always @(posedge clk) req_bank_rdy_r <= #TCQ req_bank_rdy_ns;
// Determine is we have been denied a column command request.
input sent_col;
wire rts_col_denied = req_bank_rdy_r && sent_col && ~sending_col;
// Implement a starvation limit counter. Count the number of times a
// request to send a column command has been denied.
localparam STARVE_LIMIT_CNT = STARVE_LIMIT * nBANK_MACHS;
localparam STARVE_LIMIT_WIDTH = clogb2(STARVE_LIMIT_CNT);
reg [STARVE_LIMIT_WIDTH-1:0] starve_limit_cntr_r;
reg [STARVE_LIMIT_WIDTH-1:0] starve_limit_cntr_ns;
always @(/*AS*/col_wait_r or rts_col_denied or starve_limit_cntr_r)
if (~col_wait_r)
starve_limit_cntr_ns = {STARVE_LIMIT_WIDTH{1'b0}};
else
if (rts_col_denied && (starve_limit_cntr_r != STARVE_LIMIT_CNT-1))
starve_limit_cntr_ns = starve_limit_cntr_r +
{{STARVE_LIMIT_WIDTH-1{1'b0}}, 1'b1};
else starve_limit_cntr_ns = starve_limit_cntr_r;
always @(posedge clk) starve_limit_cntr_r <= #TCQ starve_limit_cntr_ns;
input q_has_rd;
input q_has_priority;
// Decide if this bank machine should demand priority. Priority is demanded
// when starvation limit counter is reached, or a bit in the request.
wire starved = ((starve_limit_cntr_r == (STARVE_LIMIT_CNT-1)) &&
rts_col_denied);
input req_priority_r;
input idle_ns;
reg demand_priority_r;
wire demand_priority_ns = ~idle_ns && col_wait_ns &&
(demand_priority_r ||
(order_q_zero &&
(req_priority_r || q_has_priority)) ||
(starved && (q_has_rd || ~req_wr_r)));
always @(posedge clk) demand_priority_r <= #TCQ demand_priority_ns;
`ifdef MC_SVA
wire rdy_for_priority = ~rst && ~demand_priority_r && ~idle_ns &&
col_wait_ns;
req_triggers_demand_priority:
cover property (@(posedge clk)
(rdy_for_priority && req_priority_r && ~q_has_priority && ~starved));
q_priority_triggers_demand_priority:
cover property (@(posedge clk)
(rdy_for_priority && ~req_priority_r && q_has_priority && ~starved));
wire not_req_or_q_rdy_for_priority =
rdy_for_priority && ~req_priority_r && ~q_has_priority;
starved_req_triggers_demand_priority:
cover property (@(posedge clk)
(not_req_or_q_rdy_for_priority && starved && ~q_has_rd && ~req_wr_r));
starved_q_triggers_demand_priority:
cover property (@(posedge clk)
(not_req_or_q_rdy_for_priority && starved && q_has_rd && req_wr_r));
`endif
// compute demanded from other demand_priorities
input [(nBANK_MACHS*2)-1:0] demand_priority_in;
reg demanded = 1'b0;
generate
if (nBANK_MACHS > 1) begin : compute_demanded
always @(demand_priority_in[`BM_SHARED_BV]) demanded =
|demand_priority_in[`BM_SHARED_BV];
end
endgenerate
// In order to make sure that there is no starvation amongst a possibly
// unlimited stream of priority requests, add a second stage to the demand
// priority signal. If there are no other requests demanding priority, then
// go ahead and assert demand_priority. If any other requests are asserting
// demand_priority, hold off asserting demand_priority until these clear, then
// assert demand priority. Its possible to get multiple requests asserting
// demand priority simultaneously, but that's OK. Those requests will be
// serviced, demanded will fall, and another group of requests will be
// allowed to assert demand_priority.
reg demanded_prior_r;
wire demanded_prior_ns = demanded &&
(demanded_prior_r || ~demand_priority_r);
always @(posedge clk) demanded_prior_r <= #TCQ demanded_prior_ns;
output wire demand_priority;
assign demand_priority = demand_priority_r && ~demanded_prior_r &&
~sending_col;
`ifdef MC_SVA
demand_priority_gated:
cover property (@(posedge clk) (demand_priority_r && ~demand_priority));
generate
if (nBANK_MACHS >1) multiple_demand_priority:
cover property (@(posedge clk)
($countones(demand_priority_in[`BM_SHARED_BV]) > 1));
endgenerate
`endif
wire demand_ok = demand_priority_r || ~demanded;
// Figure out if the request in this bank machine matches the current rank
// configuration.
input rnk_config_strobe;
input rnk_config_kill_rts_col;
input rnk_config_valid_r;
input [RANK_WIDTH-1:0] rnk_config;
output wire rtc;
wire rnk_config_match = rnk_config_valid_r && (rnk_config == req_rank_r);
assign rtc = ~rnk_config_match && ~rnk_config_kill_rts_col && order_q_zero && col_wait_r && demand_ok;
// Using rank state provided by the rank machines, figure out if
// a read requests should wait for WTR or RTW.
input [RANKS-1:0] inhbt_rd;
wire my_inhbt_rd = inhbt_rd[req_rank_r];
input [RANKS-1:0] inhbt_wr;
wire my_inhbt_wr = inhbt_wr[req_rank_r];
wire allow_rw = ~rd_wr_r ? ~my_inhbt_wr : ~my_inhbt_rd;
// DQ bus timing constraints.
input dq_busy_data;
// Column command is ready to arbitrate, except for databus restrictions.
wire col_rdy = (col_wait_r || ((nRCD_CLKS <= 1) && end_rcd) ||
(rcv_open_bank && nCK_PER_CLK == 2 && DRAM_TYPE=="DDR2" && BURST_MODE == "4") ||
(rcv_open_bank && nCK_PER_CLK == 4 && BURST_MODE == "8")) &&
order_q_zero;
// Column command is ready to arbitrate for sending a write. Used
// to generate early wr_data_addr for ECC mode.
output wire col_rdy_wr;
assign col_rdy_wr = col_rdy && ~rd_wr_r;
// Figure out if we're ready to send a column command based on all timing
// constraints.
// if timing is an issue.
wire col_cmd_rts = col_rdy && ~dq_busy_data && allow_rw && rnk_config_match;
`ifdef MC_SVA
col_wait_for_order_q: cover property
(@(posedge clk)
(~rst && col_wait_r && ~order_q_zero && ~dq_busy_data &&
allow_rw));
col_wait_for_dq_busy: cover property
(@(posedge clk)
(~rst && col_wait_r && order_q_zero && dq_busy_data &&
allow_rw));
col_wait_for_allow_rw: cover property
(@(posedge clk)
(~rst && col_wait_r && order_q_zero && ~dq_busy_data &&
~allow_rw));
`endif
// Implement flow control for the command and control FIFOs and for the data
// FIFO during writes
input phy_mc_ctl_full;
input phy_mc_cmd_full;
input phy_mc_data_full;
// Register ctl_full and cmd_full
reg phy_mc_ctl_full_r = 1'b0;
reg phy_mc_cmd_full_r = 1'b0;
always @(posedge clk)
if(rst) begin
phy_mc_ctl_full_r <= #TCQ 1'b0;
phy_mc_cmd_full_r <= #TCQ 1'b0;
end else begin
phy_mc_ctl_full_r <= #TCQ phy_mc_ctl_full;
phy_mc_cmd_full_r <= #TCQ phy_mc_cmd_full;
end
// register output data pre-fifo almost full condition and fold in WR status
reg ofs_rdy_r = 1'b0;
always @(posedge clk)
if(rst)
ofs_rdy_r <= #TCQ 1'b0;
else
ofs_rdy_r <= #TCQ ~phy_mc_cmd_full_r && ~phy_mc_ctl_full_r && ~(phy_mc_data_full && ~rd_wr_r);
// Disable priority feature for one state after a config to insure
// forward progress on the just installed io config.
reg override_demand_r;
wire override_demand_ns = rnk_config_strobe || rnk_config_kill_rts_col;
always @(posedge clk) override_demand_r <= override_demand_ns;
output wire rts_col;
assign rts_col = ~sending_col && (demand_ok || override_demand_r) &&
col_cmd_rts && ofs_rdy_r;
// As in act_this_rank, wr/rd_this_rank informs rank machines
// that this bank machine is doing a write/rd. Removes logic
// after the grant.
reg [RANKS-1:0] wr_this_rank_ns;
reg [RANKS-1:0] rd_this_rank_ns;
always @(/*AS*/rd_wr_r or req_rank_r) begin
wr_this_rank_ns = {RANKS{1'b0}};
rd_this_rank_ns = {RANKS{1'b0}};
for (i=0; i<RANKS; i=i+1) begin
wr_this_rank_ns[i] = ~rd_wr_r && (i[RANK_WIDTH-1:0] == req_rank_r);
rd_this_rank_ns[i] = rd_wr_r && (i[RANK_WIDTH-1:0] == req_rank_r);
end
end
output reg [RANKS-1:0] wr_this_rank_r;
always @(posedge clk) wr_this_rank_r <= #TCQ wr_this_rank_ns;
output reg [RANKS-1:0] rd_this_rank_r;
always @(posedge clk) rd_this_rank_r <= #TCQ rd_this_rank_ns;
endmodule
|
module outputs)
wire [TAPCNTRWIDTH-1:0] fall_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] run; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_end; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_polarity; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samples; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [SAMPCNTRWIDTH:0] samps_hi_held; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samps_solid_thresh; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [TAPCNTRWIDTH-1:0] tap; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
// End of automatics
output psen;
output [TAPCNTRWIDTH-1:0] rise_lead_right;
output [TAPCNTRWIDTH-1:0] rise_trail_right;
output mmcm_edge_detect_done;
output mmcm_lbclk_edge_aligned;
mig_7series_v2_3_poc_tap_base #
(/*AUTOINSTPARAM*/
// Parameters
.MMCM_SAMP_WAIT (MMCM_SAMP_WAIT),
.POC_USE_METASTABLE_SAMP (POC_USE_METASTABLE_SAMP),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_tap_base
(/*AUTOINST*/
// Outputs
.psen (psen),
.psincdec (psincdec),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]),
// Inputs
.clk (clk),
.pd_out (pd_out),
.poc_sample_pd (poc_sample_pd),
.psdone (psdone),
.rst (rst),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]));
mig_7series_v2_3_poc_meta #
(/*AUTOINSTPARAM*/
// Parameters
.SCANFROMRIGHT (SCANFROMRIGHT),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_meta
(/*AUTOINST*/
// Outputs
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.poc_backup (poc_backup),
// Inputs
.clk (clk),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_rdy (mmcm_edge_detect_rdy),
.ninety_offsets (ninety_offsets[1:0]),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.use_noise_window (use_noise_window));
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (ktap_at_@_edge),
.select1 (1'b1),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_right
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_right[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_right_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_left
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_left[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_left_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
wire not_ktap_at_right_edge = ~ktap_at_right_edge;
wire not_ktap_at_left_edge = ~ktap_at_left_edge;
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (not_ktap_at_right_edge),
.select1 (not_ktap_at_left_edge),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_center
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_center[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (not_ktap_at_right_edge), // Templated
.select1 (not_ktap_at_left_edge), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_cc #
(/*AUTOINSTPARAM*/
// Parameters
.CCENABLE (CCENABLE),
.PCT_SAMPS_SOLID (PCT_SAMPS_SOLID),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.SAMPLES (SAMPLES),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TCQ (TCQ))
u_poc_cc
(/*AUTOINST*/
// Outputs
.poc_error (poc_error),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]),
// Inputs
.clk (clk),
.fall_lead_center (fall_lead_center[TAPCNTRWIDTH-1:0]),
.fall_lead_left (fall_lead_left[TAPCNTRWIDTH-1:0]),
.fall_lead_right (fall_lead_right[TAPCNTRWIDTH-1:0]),
.fall_trail_center (fall_trail_center[TAPCNTRWIDTH-1:0]),
.fall_trail_left (fall_trail_left[TAPCNTRWIDTH-1:0]),
.fall_trail_right (fall_trail_right[TAPCNTRWIDTH-1:0]),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.psen (psen),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]));
endmodule
|
module outputs)
wire [TAPCNTRWIDTH-1:0] fall_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] run; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_end; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_polarity; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samples; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [SAMPCNTRWIDTH:0] samps_hi_held; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samps_solid_thresh; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [TAPCNTRWIDTH-1:0] tap; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
// End of automatics
output psen;
output [TAPCNTRWIDTH-1:0] rise_lead_right;
output [TAPCNTRWIDTH-1:0] rise_trail_right;
output mmcm_edge_detect_done;
output mmcm_lbclk_edge_aligned;
mig_7series_v2_3_poc_tap_base #
(/*AUTOINSTPARAM*/
// Parameters
.MMCM_SAMP_WAIT (MMCM_SAMP_WAIT),
.POC_USE_METASTABLE_SAMP (POC_USE_METASTABLE_SAMP),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_tap_base
(/*AUTOINST*/
// Outputs
.psen (psen),
.psincdec (psincdec),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]),
// Inputs
.clk (clk),
.pd_out (pd_out),
.poc_sample_pd (poc_sample_pd),
.psdone (psdone),
.rst (rst),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]));
mig_7series_v2_3_poc_meta #
(/*AUTOINSTPARAM*/
// Parameters
.SCANFROMRIGHT (SCANFROMRIGHT),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_meta
(/*AUTOINST*/
// Outputs
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.poc_backup (poc_backup),
// Inputs
.clk (clk),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_rdy (mmcm_edge_detect_rdy),
.ninety_offsets (ninety_offsets[1:0]),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.use_noise_window (use_noise_window));
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (ktap_at_@_edge),
.select1 (1'b1),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_right
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_right[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_right_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_left
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_left[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_left_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
wire not_ktap_at_right_edge = ~ktap_at_right_edge;
wire not_ktap_at_left_edge = ~ktap_at_left_edge;
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (not_ktap_at_right_edge),
.select1 (not_ktap_at_left_edge),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_center
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_center[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (not_ktap_at_right_edge), // Templated
.select1 (not_ktap_at_left_edge), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_cc #
(/*AUTOINSTPARAM*/
// Parameters
.CCENABLE (CCENABLE),
.PCT_SAMPS_SOLID (PCT_SAMPS_SOLID),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.SAMPLES (SAMPLES),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TCQ (TCQ))
u_poc_cc
(/*AUTOINST*/
// Outputs
.poc_error (poc_error),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]),
// Inputs
.clk (clk),
.fall_lead_center (fall_lead_center[TAPCNTRWIDTH-1:0]),
.fall_lead_left (fall_lead_left[TAPCNTRWIDTH-1:0]),
.fall_lead_right (fall_lead_right[TAPCNTRWIDTH-1:0]),
.fall_trail_center (fall_trail_center[TAPCNTRWIDTH-1:0]),
.fall_trail_left (fall_trail_left[TAPCNTRWIDTH-1:0]),
.fall_trail_right (fall_trail_right[TAPCNTRWIDTH-1:0]),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.psen (psen),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]));
endmodule
|
module outputs)
wire [TAPCNTRWIDTH-1:0] fall_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] run; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_end; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_polarity; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samples; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [SAMPCNTRWIDTH:0] samps_hi_held; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samps_solid_thresh; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [TAPCNTRWIDTH-1:0] tap; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
// End of automatics
output psen;
output [TAPCNTRWIDTH-1:0] rise_lead_right;
output [TAPCNTRWIDTH-1:0] rise_trail_right;
output mmcm_edge_detect_done;
output mmcm_lbclk_edge_aligned;
mig_7series_v2_3_poc_tap_base #
(/*AUTOINSTPARAM*/
// Parameters
.MMCM_SAMP_WAIT (MMCM_SAMP_WAIT),
.POC_USE_METASTABLE_SAMP (POC_USE_METASTABLE_SAMP),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_tap_base
(/*AUTOINST*/
// Outputs
.psen (psen),
.psincdec (psincdec),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]),
// Inputs
.clk (clk),
.pd_out (pd_out),
.poc_sample_pd (poc_sample_pd),
.psdone (psdone),
.rst (rst),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]));
mig_7series_v2_3_poc_meta #
(/*AUTOINSTPARAM*/
// Parameters
.SCANFROMRIGHT (SCANFROMRIGHT),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_meta
(/*AUTOINST*/
// Outputs
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.poc_backup (poc_backup),
// Inputs
.clk (clk),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_rdy (mmcm_edge_detect_rdy),
.ninety_offsets (ninety_offsets[1:0]),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.use_noise_window (use_noise_window));
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (ktap_at_@_edge),
.select1 (1'b1),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_right
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_right[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_right_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_left
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_left[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_left_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
wire not_ktap_at_right_edge = ~ktap_at_right_edge;
wire not_ktap_at_left_edge = ~ktap_at_left_edge;
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (not_ktap_at_right_edge),
.select1 (not_ktap_at_left_edge),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_center
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_center[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (not_ktap_at_right_edge), // Templated
.select1 (not_ktap_at_left_edge), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_cc #
(/*AUTOINSTPARAM*/
// Parameters
.CCENABLE (CCENABLE),
.PCT_SAMPS_SOLID (PCT_SAMPS_SOLID),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.SAMPLES (SAMPLES),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TCQ (TCQ))
u_poc_cc
(/*AUTOINST*/
// Outputs
.poc_error (poc_error),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]),
// Inputs
.clk (clk),
.fall_lead_center (fall_lead_center[TAPCNTRWIDTH-1:0]),
.fall_lead_left (fall_lead_left[TAPCNTRWIDTH-1:0]),
.fall_lead_right (fall_lead_right[TAPCNTRWIDTH-1:0]),
.fall_trail_center (fall_trail_center[TAPCNTRWIDTH-1:0]),
.fall_trail_left (fall_trail_left[TAPCNTRWIDTH-1:0]),
.fall_trail_right (fall_trail_right[TAPCNTRWIDTH-1:0]),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.psen (psen),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]));
endmodule
|
module outputs)
wire [TAPCNTRWIDTH-1:0] fall_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] run; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_end; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_polarity; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samples; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [SAMPCNTRWIDTH:0] samps_hi_held; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samps_solid_thresh; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [TAPCNTRWIDTH-1:0] tap; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
// End of automatics
output psen;
output [TAPCNTRWIDTH-1:0] rise_lead_right;
output [TAPCNTRWIDTH-1:0] rise_trail_right;
output mmcm_edge_detect_done;
output mmcm_lbclk_edge_aligned;
mig_7series_v2_3_poc_tap_base #
(/*AUTOINSTPARAM*/
// Parameters
.MMCM_SAMP_WAIT (MMCM_SAMP_WAIT),
.POC_USE_METASTABLE_SAMP (POC_USE_METASTABLE_SAMP),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_tap_base
(/*AUTOINST*/
// Outputs
.psen (psen),
.psincdec (psincdec),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]),
// Inputs
.clk (clk),
.pd_out (pd_out),
.poc_sample_pd (poc_sample_pd),
.psdone (psdone),
.rst (rst),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]));
mig_7series_v2_3_poc_meta #
(/*AUTOINSTPARAM*/
// Parameters
.SCANFROMRIGHT (SCANFROMRIGHT),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_meta
(/*AUTOINST*/
// Outputs
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.poc_backup (poc_backup),
// Inputs
.clk (clk),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_rdy (mmcm_edge_detect_rdy),
.ninety_offsets (ninety_offsets[1:0]),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.use_noise_window (use_noise_window));
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (ktap_at_@_edge),
.select1 (1'b1),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_right
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_right[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_right_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_left
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_left[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_left_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
wire not_ktap_at_right_edge = ~ktap_at_right_edge;
wire not_ktap_at_left_edge = ~ktap_at_left_edge;
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (not_ktap_at_right_edge),
.select1 (not_ktap_at_left_edge),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_center
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_center[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (not_ktap_at_right_edge), // Templated
.select1 (not_ktap_at_left_edge), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_cc #
(/*AUTOINSTPARAM*/
// Parameters
.CCENABLE (CCENABLE),
.PCT_SAMPS_SOLID (PCT_SAMPS_SOLID),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.SAMPLES (SAMPLES),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TCQ (TCQ))
u_poc_cc
(/*AUTOINST*/
// Outputs
.poc_error (poc_error),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]),
// Inputs
.clk (clk),
.fall_lead_center (fall_lead_center[TAPCNTRWIDTH-1:0]),
.fall_lead_left (fall_lead_left[TAPCNTRWIDTH-1:0]),
.fall_lead_right (fall_lead_right[TAPCNTRWIDTH-1:0]),
.fall_trail_center (fall_trail_center[TAPCNTRWIDTH-1:0]),
.fall_trail_left (fall_trail_left[TAPCNTRWIDTH-1:0]),
.fall_trail_right (fall_trail_right[TAPCNTRWIDTH-1:0]),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.psen (psen),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]));
endmodule
|
module outputs)
wire [TAPCNTRWIDTH-1:0] fall_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_lead_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] fall_trail_right; // From u_edge_right of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_lead_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_center; // From u_edge_center of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] rise_trail_left; // From u_edge_left of mig_7series_v2_3_poc_edge_store.v
wire [TAPCNTRWIDTH-1:0] run; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_end; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire run_polarity; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samples; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [SAMPCNTRWIDTH:0] samps_hi_held; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
wire [SAMPCNTRWIDTH:0] samps_solid_thresh; // From u_poc_cc of mig_7series_v2_3_poc_cc.v
wire [TAPCNTRWIDTH-1:0] tap; // From u_poc_tap_base of mig_7series_v2_3_poc_tap_base.v
// End of automatics
output psen;
output [TAPCNTRWIDTH-1:0] rise_lead_right;
output [TAPCNTRWIDTH-1:0] rise_trail_right;
output mmcm_edge_detect_done;
output mmcm_lbclk_edge_aligned;
mig_7series_v2_3_poc_tap_base #
(/*AUTOINSTPARAM*/
// Parameters
.MMCM_SAMP_WAIT (MMCM_SAMP_WAIT),
.POC_USE_METASTABLE_SAMP (POC_USE_METASTABLE_SAMP),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_tap_base
(/*AUTOINST*/
// Outputs
.psen (psen),
.psincdec (psincdec),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]),
// Inputs
.clk (clk),
.pd_out (pd_out),
.poc_sample_pd (poc_sample_pd),
.psdone (psdone),
.rst (rst),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]));
mig_7series_v2_3_poc_meta #
(/*AUTOINSTPARAM*/
// Parameters
.SCANFROMRIGHT (SCANFROMRIGHT),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_poc_meta
(/*AUTOINST*/
// Outputs
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.poc_backup (poc_backup),
// Inputs
.clk (clk),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_rdy (mmcm_edge_detect_rdy),
.ninety_offsets (ninety_offsets[1:0]),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.use_noise_window (use_noise_window));
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (ktap_at_@_edge),
.select1 (1'b1),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_right
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_right[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_right[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_right_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_left
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_left[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_left[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (ktap_at_left_edge), // Templated
.select1 (1'b1), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
wire not_ktap_at_right_edge = ~ktap_at_right_edge;
wire not_ktap_at_left_edge = ~ktap_at_left_edge;
/*mig_7series_v2_3_poc_edge_store AUTO_TEMPLATE "edge_\(.*\)$" (
.\(.*\)lead (\1lead_@@"vl-bits"),
.\(.*\)trail (\1trail_@@"vl-bits"),
.select0 (not_ktap_at_right_edge),
.select1 (not_ktap_at_left_edge),)*/
mig_7series_v2_3_poc_edge_store #
(/*AUTOINSTPARAM*/
// Parameters
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TAPSPERKCLK (TAPSPERKCLK),
.TCQ (TCQ))
u_edge_center
(/*AUTOINST*/
// Outputs
.fall_lead (fall_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.fall_trail (fall_trail_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_lead (rise_lead_center[TAPCNTRWIDTH-1:0]), // Templated
.rise_trail (rise_trail_center[TAPCNTRWIDTH-1:0]), // Templated
// Inputs
.clk (clk),
.run (run[TAPCNTRWIDTH-1:0]),
.run_end (run_end),
.run_polarity (run_polarity),
.select0 (not_ktap_at_right_edge), // Templated
.select1 (not_ktap_at_left_edge), // Templated
.tap (tap[TAPCNTRWIDTH-1:0]));
mig_7series_v2_3_poc_cc #
(/*AUTOINSTPARAM*/
// Parameters
.CCENABLE (CCENABLE),
.PCT_SAMPS_SOLID (PCT_SAMPS_SOLID),
.SAMPCNTRWIDTH (SAMPCNTRWIDTH),
.SAMPLES (SAMPLES),
.TAPCNTRWIDTH (TAPCNTRWIDTH),
.TCQ (TCQ))
u_poc_cc
(/*AUTOINST*/
// Outputs
.poc_error (poc_error),
.samples (samples[SAMPCNTRWIDTH:0]),
.samps_solid_thresh (samps_solid_thresh[SAMPCNTRWIDTH:0]),
// Inputs
.clk (clk),
.fall_lead_center (fall_lead_center[TAPCNTRWIDTH-1:0]),
.fall_lead_left (fall_lead_left[TAPCNTRWIDTH-1:0]),
.fall_lead_right (fall_lead_right[TAPCNTRWIDTH-1:0]),
.fall_trail_center (fall_trail_center[TAPCNTRWIDTH-1:0]),
.fall_trail_left (fall_trail_left[TAPCNTRWIDTH-1:0]),
.fall_trail_right (fall_trail_right[TAPCNTRWIDTH-1:0]),
.ktap_at_left_edge (ktap_at_left_edge),
.ktap_at_right_edge (ktap_at_right_edge),
.mmcm_edge_detect_done (mmcm_edge_detect_done),
.mmcm_lbclk_edge_aligned (mmcm_lbclk_edge_aligned),
.psen (psen),
.rise_lead_center (rise_lead_center[TAPCNTRWIDTH-1:0]),
.rise_lead_left (rise_lead_left[TAPCNTRWIDTH-1:0]),
.rise_lead_right (rise_lead_right[TAPCNTRWIDTH-1:0]),
.rise_trail_center (rise_trail_center[TAPCNTRWIDTH-1:0]),
.rise_trail_left (rise_trail_left[TAPCNTRWIDTH-1:0]),
.rise_trail_right (rise_trail_right[TAPCNTRWIDTH-1:0]),
.rst (rst),
.samps_hi_held (samps_hi_held[SAMPCNTRWIDTH:0]),
.tap (tap[TAPCNTRWIDTH-1:0]));
endmodule
|
module for complex oclkdelay calib
else if (oclkdelay_calib_done && !oclkdelay_calib_done_r && (BYPASS_COMPLEX_OCAL == "FALSE")) begin
done = 1'b0;
end
end
INIT: begin
ktap_right = 1'b1;
// Initial stage 2 increment to 63 for left limit
if (wait_cnt_done)
lim_nxt_state = STAGE2_TAP_CHK;
end
// Wait for DQS to toggle before asserting poc_ready
WAIT_WR_REQ: begin
write_request = 1'b1;
if (wait_cnt_done) begin
poc_ready = 1'b1;
lim_nxt_state = WAIT_POC_DONE;
end
end
// Wait for POC detect done signal
WAIT_POC_DONE: begin
if (poc2lim_detect_done) begin
write_request = 1'b0;
poc_ready = 1'b0;
lim_nxt_state = WAIT_STG3;
end
end
// Wait for DQS to stop toggling before stage3 inc/dec
WAIT_STG3: begin
if (wait_cnt_done) begin
if (stg3_dec_r) begin
// Check for Stage 3 underflow and MMCM tap limit
if ((stg3_tap_cnt > 'd0) && (mmcm_sub_dec < TDQSS_LIM_MMCM_TAPS))
lim_nxt_state = STAGE3_DEC;
else begin
stg3_dec = 1'b0;
stg3_inc2init_val = 1'b1;
lim_nxt_state = STAGE3_INC;
end
end else begin // Stage 3 being incremented
// Check for Stage 3 overflow and MMCM tap limit
if ((stg3_tap_cnt < 'd63) && (mmcm_sub_inc < TDQSS_LIM_MMCM_TAPS))
lim_nxt_state = STAGE3_INC;
else begin
stg3_dec2init_val = 1'b1;
lim_nxt_state = STAGE3_DEC;
end
end
end
end
STAGE3_INC: begin
stg3_inc_req = 1'b1;
lim_nxt_state = STG3_INCDEC_WAIT;
end
STAGE3_DEC: begin
stg3_dec_req = 1'b1;
lim_nxt_state = STG3_INCDEC_WAIT;
end
// Wait for stage3 inc/dec to complete (po_rdy)
STG3_INCDEC_WAIT: begin
stg3_dec_req = 1'b0;
stg3_inc_req = 1'b0;
if (!stg3_dec_req_r && !stg3_inc_req_r && po_rdy) begin
if (stg3_init_dec_r) begin
// Initial decrement of stage 3
if (stg3_tap_cnt > stg3_dec_val)
lim_nxt_state = STAGE3_DEC;
else begin
lim_nxt_state = WAIT_WR_REQ;
stg3_init_dec = 1'b0;
end
end else if (stg3_dec2init_val_r) begin
if (stg3_tap_cnt > stg3_init_val)
lim_nxt_state = STAGE3_DEC;
else
lim_nxt_state = STAGE2_TAP_CHK;
end else if (stg3_inc2init_val_r) begin
if (stg3_tap_cnt < stg3_inc_val)
lim_nxt_state = STAGE3_INC;
else
lim_nxt_state = STAGE2_TAP_CHK;
end else begin
lim_nxt_state = WAIT_WR_REQ;
end
end
end
// Check for overflow and underflow of stage2 taps
STAGE2_TAP_CHK: begin
if (stg3_dec2init_val_r) begin
// Increment stage 2 to write level tap value at the end of limit detection
if (stg2_tap_cnt < wl_po_fine_cnt)
lim_nxt_state = STAGE2_INC;
else begin
lim_nxt_state = PRECH_REQUEST;
end
end else if (stg3_inc2init_val_r) begin
// Decrement stage 2 to '0' to determine right limit
if (stg2_tap_cnt > 'd0)
lim_nxt_state = STAGE2_DEC;
else begin
lim_nxt_state = PRECH_REQUEST;
stg3_inc2init_val = 1'b0;
end
end else if (stg2_inc_r && (stg2_tap_cnt < 'd63)) begin
// Initial increment to 63
lim_nxt_state = STAGE2_INC;
end else begin
lim_nxt_state = STG3_INCDEC_WAIT;
stg2_inc = 1'b0;
end
end
STAGE2_INC: begin
stg2_inc_req = 1'b1;
lim_nxt_state = STG2_INCDEC_WAIT;
end
STAGE2_DEC: begin
stg2_dec_req = 1'b1;
lim_nxt_state = STG2_INCDEC_WAIT;
end
// Wait for stage3 inc/dec to complete (po_rdy)
STG2_INCDEC_WAIT: begin
stg2_inc_req = 1'b0;
stg2_dec_req = 1'b0;
if (!stg2_inc_req_r && !stg2_dec_req_r && po_rdy)
lim_nxt_state = STAGE2_TAP_CHK;
end
PRECH_REQUEST: begin
prech_req = 1'b1;
if (prech_done) begin
prech_req = 1'b0;
if (stg3_dec2init_val_r)
lim_nxt_state = LIMIT_DONE;
else
lim_nxt_state = WAIT_WR_REQ;
end
end
LIMIT_DONE: begin
done = 1'b1;
ktap_right = 1'b0;
stg3_dec2init_val = 1'b0;
lim_nxt_state = IDLE;
end
default: begin
lim_nxt_state = IDLE;
end
endcase
end
endmodule
|
module dram (
// Inouts
inout [63:0] ddr3_dq,
inout [7:0] ddr3_dqs_n,
inout [7:0] ddr3_dqs_p,
// Outputs
output [15:0] ddr3_addr,
output [2:0] ddr3_ba,
output ddr3_ras_n,
output ddr3_cas_n,
output ddr3_we_n,
output ddr3_reset_n,
output [0:0] ddr3_ck_p,
output [0:0] ddr3_ck_n,
output [0:0] ddr3_cke,
output [0:0] ddr3_cs_n,
output [7:0] ddr3_dm,
output [0:0] ddr3_odt,
// Inputs
// Differential system clocks
input sys_clk_p,
input sys_clk_n,
// user interface signals
input [29:0] app_addr,
input [2:0] app_cmd,
input app_en,
input [511:0] app_wdf_data,
input app_wdf_end,
input [63:0] app_wdf_mask,
input app_wdf_wren,
output [511:0] app_rd_data,
output app_rd_data_end,
output app_rd_data_valid,
output app_rdy,
output app_wdf_rdy,
input app_sr_req,
input app_ref_req,
input app_zq_req,
output app_sr_active,
output app_ref_ack,
output app_zq_ack,
output ui_clk,
output ui_clk_sync_rst,
output init_calib_complete,
input sys_rst
);
// Start of IP top instance
dram_mig u_dram_mig (
// Memory interface ports
.ddr3_addr (ddr3_addr),
.ddr3_ba (ddr3_ba),
.ddr3_cas_n (ddr3_cas_n),
.ddr3_ck_n (ddr3_ck_n),
.ddr3_ck_p (ddr3_ck_p),
.ddr3_cke (ddr3_cke),
.ddr3_ras_n (ddr3_ras_n),
.ddr3_reset_n (ddr3_reset_n),
.ddr3_we_n (ddr3_we_n),
.ddr3_dq (ddr3_dq),
.ddr3_dqs_n (ddr3_dqs_n),
.ddr3_dqs_p (ddr3_dqs_p),
.init_calib_complete (init_calib_complete),
.ddr3_cs_n (ddr3_cs_n),
.ddr3_dm (ddr3_dm),
.ddr3_odt (ddr3_odt),
// Application interface ports
.app_addr (app_addr),
.app_cmd (app_cmd),
.app_en (app_en),
.app_wdf_data (app_wdf_data),
.app_wdf_end (app_wdf_end),
.app_wdf_wren (app_wdf_wren),
.app_rd_data (app_rd_data),
.app_rd_data_end (app_rd_data_end),
.app_rd_data_valid (app_rd_data_valid),
.app_rdy (app_rdy),
.app_wdf_rdy (app_wdf_rdy),
.app_sr_req (app_sr_req),
.app_ref_req (app_ref_req),
.app_zq_req (app_zq_req),
.app_sr_active (app_sr_active),
.app_ref_ack (app_ref_ack),
.app_zq_ack (app_zq_ack),
.ui_clk (ui_clk),
.ui_clk_sync_rst (ui_clk_sync_rst),
.app_wdf_mask (app_wdf_mask),
// System Clock Ports
.sys_clk_p (sys_clk_p),
.sys_clk_n (sys_clk_n),
.sys_rst (sys_rst)
);
// End of IP top instance
endmodule
|
module dram (
// Inouts
inout [63:0] ddr3_dq,
inout [7:0] ddr3_dqs_n,
inout [7:0] ddr3_dqs_p,
// Outputs
output [15:0] ddr3_addr,
output [2:0] ddr3_ba,
output ddr3_ras_n,
output ddr3_cas_n,
output ddr3_we_n,
output ddr3_reset_n,
output [0:0] ddr3_ck_p,
output [0:0] ddr3_ck_n,
output [0:0] ddr3_cke,
output [0:0] ddr3_cs_n,
output [7:0] ddr3_dm,
output [0:0] ddr3_odt,
// Inputs
// Differential system clocks
input sys_clk_p,
input sys_clk_n,
// user interface signals
input [29:0] app_addr,
input [2:0] app_cmd,
input app_en,
input [511:0] app_wdf_data,
input app_wdf_end,
input [63:0] app_wdf_mask,
input app_wdf_wren,
output [511:0] app_rd_data,
output app_rd_data_end,
output app_rd_data_valid,
output app_rdy,
output app_wdf_rdy,
input app_sr_req,
input app_ref_req,
input app_zq_req,
output app_sr_active,
output app_ref_ack,
output app_zq_ack,
output ui_clk,
output ui_clk_sync_rst,
output init_calib_complete,
input sys_rst
);
// Start of IP top instance
dram_mig u_dram_mig (
// Memory interface ports
.ddr3_addr (ddr3_addr),
.ddr3_ba (ddr3_ba),
.ddr3_cas_n (ddr3_cas_n),
.ddr3_ck_n (ddr3_ck_n),
.ddr3_ck_p (ddr3_ck_p),
.ddr3_cke (ddr3_cke),
.ddr3_ras_n (ddr3_ras_n),
.ddr3_reset_n (ddr3_reset_n),
.ddr3_we_n (ddr3_we_n),
.ddr3_dq (ddr3_dq),
.ddr3_dqs_n (ddr3_dqs_n),
.ddr3_dqs_p (ddr3_dqs_p),
.init_calib_complete (init_calib_complete),
.ddr3_cs_n (ddr3_cs_n),
.ddr3_dm (ddr3_dm),
.ddr3_odt (ddr3_odt),
// Application interface ports
.app_addr (app_addr),
.app_cmd (app_cmd),
.app_en (app_en),
.app_wdf_data (app_wdf_data),
.app_wdf_end (app_wdf_end),
.app_wdf_wren (app_wdf_wren),
.app_rd_data (app_rd_data),
.app_rd_data_end (app_rd_data_end),
.app_rd_data_valid (app_rd_data_valid),
.app_rdy (app_rdy),
.app_wdf_rdy (app_wdf_rdy),
.app_sr_req (app_sr_req),
.app_ref_req (app_ref_req),
.app_zq_req (app_zq_req),
.app_sr_active (app_sr_active),
.app_ref_ack (app_ref_ack),
.app_zq_ack (app_zq_ack),
.ui_clk (ui_clk),
.ui_clk_sync_rst (ui_clk_sync_rst),
.app_wdf_mask (app_wdf_mask),
// System Clock Ports
.sys_clk_p (sys_clk_p),
.sys_clk_n (sys_clk_n),
.sys_rst (sys_rst)
);
// End of IP top instance
endmodule
|
module dram (
// Inouts
inout [63:0] ddr3_dq,
inout [7:0] ddr3_dqs_n,
inout [7:0] ddr3_dqs_p,
// Outputs
output [15:0] ddr3_addr,
output [2:0] ddr3_ba,
output ddr3_ras_n,
output ddr3_cas_n,
output ddr3_we_n,
output ddr3_reset_n,
output [0:0] ddr3_ck_p,
output [0:0] ddr3_ck_n,
output [0:0] ddr3_cke,
output [0:0] ddr3_cs_n,
output [7:0] ddr3_dm,
output [0:0] ddr3_odt,
// Inputs
// Differential system clocks
input sys_clk_p,
input sys_clk_n,
// user interface signals
input [29:0] app_addr,
input [2:0] app_cmd,
input app_en,
input [511:0] app_wdf_data,
input app_wdf_end,
input [63:0] app_wdf_mask,
input app_wdf_wren,
output [511:0] app_rd_data,
output app_rd_data_end,
output app_rd_data_valid,
output app_rdy,
output app_wdf_rdy,
input app_sr_req,
input app_ref_req,
input app_zq_req,
output app_sr_active,
output app_ref_ack,
output app_zq_ack,
output ui_clk,
output ui_clk_sync_rst,
output init_calib_complete,
input sys_rst
);
// Start of IP top instance
dram_mig u_dram_mig (
// Memory interface ports
.ddr3_addr (ddr3_addr),
.ddr3_ba (ddr3_ba),
.ddr3_cas_n (ddr3_cas_n),
.ddr3_ck_n (ddr3_ck_n),
.ddr3_ck_p (ddr3_ck_p),
.ddr3_cke (ddr3_cke),
.ddr3_ras_n (ddr3_ras_n),
.ddr3_reset_n (ddr3_reset_n),
.ddr3_we_n (ddr3_we_n),
.ddr3_dq (ddr3_dq),
.ddr3_dqs_n (ddr3_dqs_n),
.ddr3_dqs_p (ddr3_dqs_p),
.init_calib_complete (init_calib_complete),
.ddr3_cs_n (ddr3_cs_n),
.ddr3_dm (ddr3_dm),
.ddr3_odt (ddr3_odt),
// Application interface ports
.app_addr (app_addr),
.app_cmd (app_cmd),
.app_en (app_en),
.app_wdf_data (app_wdf_data),
.app_wdf_end (app_wdf_end),
.app_wdf_wren (app_wdf_wren),
.app_rd_data (app_rd_data),
.app_rd_data_end (app_rd_data_end),
.app_rd_data_valid (app_rd_data_valid),
.app_rdy (app_rdy),
.app_wdf_rdy (app_wdf_rdy),
.app_sr_req (app_sr_req),
.app_ref_req (app_ref_req),
.app_zq_req (app_zq_req),
.app_sr_active (app_sr_active),
.app_ref_ack (app_ref_ack),
.app_zq_ack (app_zq_ack),
.ui_clk (ui_clk),
.ui_clk_sync_rst (ui_clk_sync_rst),
.app_wdf_mask (app_wdf_mask),
// System Clock Ports
.sys_clk_p (sys_clk_p),
.sys_clk_n (sys_clk_n),
.sys_rst (sys_rst)
);
// End of IP top instance
endmodule
|
module dram (
// Inouts
inout [63:0] ddr3_dq,
inout [7:0] ddr3_dqs_n,
inout [7:0] ddr3_dqs_p,
// Outputs
output [15:0] ddr3_addr,
output [2:0] ddr3_ba,
output ddr3_ras_n,
output ddr3_cas_n,
output ddr3_we_n,
output ddr3_reset_n,
output [0:0] ddr3_ck_p,
output [0:0] ddr3_ck_n,
output [0:0] ddr3_cke,
output [0:0] ddr3_cs_n,
output [7:0] ddr3_dm,
output [0:0] ddr3_odt,
// Inputs
// Differential system clocks
input sys_clk_p,
input sys_clk_n,
// user interface signals
input [29:0] app_addr,
input [2:0] app_cmd,
input app_en,
input [511:0] app_wdf_data,
input app_wdf_end,
input [63:0] app_wdf_mask,
input app_wdf_wren,
output [511:0] app_rd_data,
output app_rd_data_end,
output app_rd_data_valid,
output app_rdy,
output app_wdf_rdy,
input app_sr_req,
input app_ref_req,
input app_zq_req,
output app_sr_active,
output app_ref_ack,
output app_zq_ack,
output ui_clk,
output ui_clk_sync_rst,
output init_calib_complete,
input sys_rst
);
// Start of IP top instance
dram_mig u_dram_mig (
// Memory interface ports
.ddr3_addr (ddr3_addr),
.ddr3_ba (ddr3_ba),
.ddr3_cas_n (ddr3_cas_n),
.ddr3_ck_n (ddr3_ck_n),
.ddr3_ck_p (ddr3_ck_p),
.ddr3_cke (ddr3_cke),
.ddr3_ras_n (ddr3_ras_n),
.ddr3_reset_n (ddr3_reset_n),
.ddr3_we_n (ddr3_we_n),
.ddr3_dq (ddr3_dq),
.ddr3_dqs_n (ddr3_dqs_n),
.ddr3_dqs_p (ddr3_dqs_p),
.init_calib_complete (init_calib_complete),
.ddr3_cs_n (ddr3_cs_n),
.ddr3_dm (ddr3_dm),
.ddr3_odt (ddr3_odt),
// Application interface ports
.app_addr (app_addr),
.app_cmd (app_cmd),
.app_en (app_en),
.app_wdf_data (app_wdf_data),
.app_wdf_end (app_wdf_end),
.app_wdf_wren (app_wdf_wren),
.app_rd_data (app_rd_data),
.app_rd_data_end (app_rd_data_end),
.app_rd_data_valid (app_rd_data_valid),
.app_rdy (app_rdy),
.app_wdf_rdy (app_wdf_rdy),
.app_sr_req (app_sr_req),
.app_ref_req (app_ref_req),
.app_zq_req (app_zq_req),
.app_sr_active (app_sr_active),
.app_ref_ack (app_ref_ack),
.app_zq_ack (app_zq_ack),
.ui_clk (ui_clk),
.ui_clk_sync_rst (ui_clk_sync_rst),
.app_wdf_mask (app_wdf_mask),
// System Clock Ports
.sys_clk_p (sys_clk_p),
.sys_clk_n (sys_clk_n),
.sys_rst (sys_rst)
);
// End of IP top instance
endmodule
|
module mig_7series_v2_3_ddr_phy_ck_addr_cmd_delay #
(
parameter TCQ = 100,
parameter tCK = 3636,
parameter DQS_CNT_WIDTH = 3,
parameter N_CTL_LANES = 3,
parameter SIM_CAL_OPTION = "NONE"
)
(
input clk,
input rst,
// Start only after PO_CIRC_BUF_DELAY decremented
input cmd_delay_start,
// Control lane being shifted using Phaser_Out fine delay taps
output reg [N_CTL_LANES-1:0] ctl_lane_cnt,
// Inc/dec Phaser_Out fine delay line
output reg po_stg2_f_incdec,
output reg po_en_stg2_f,
output reg po_stg2_c_incdec,
output reg po_en_stg2_c,
// Completed delaying CK/Address/Commands/Controls
output po_ck_addr_cmd_delay_done
);
localparam TAP_CNT_LIMIT = 63;
//Calculate the tap resolution of the PHASER based on the clock period
localparam FREQ_REF_DIV = (tCK > 5000 ? 4 :
tCK > 2500 ? 2 : 1);
localparam integer PHASER_TAP_RES = ((tCK/2)/64);
// Determine whether 300 ps or 350 ps delay required
localparam CALC_TAP_CNT = (tCK >= 1250) ? 350 : 300;
// Determine the number of Phaser_Out taps required to delay by 300 ps
// 300 ps is the PCB trace uncertainty between CK and DQS byte groups
// Increment control byte lanes
localparam TAP_CNT = 0;
//localparam TAP_CNT = (CALC_TAP_CNT + PHASER_TAP_RES - 1)/PHASER_TAP_RES;
//Decrement control byte lanes
localparam TAP_DEC = (SIM_CAL_OPTION == "FAST_CAL") ? 0 : 29;
reg delay_dec_done;
reg delay_done_r1;
reg delay_done_r2;
reg delay_done_r3;
reg delay_done_r4 /* synthesis syn_maxfan = 10 */;
reg [5:0] delay_cnt_r;
reg [5:0] delaydec_cnt_r;
reg po_cnt_inc;
reg po_cnt_dec;
reg [3:0] wait_cnt_r;
assign po_ck_addr_cmd_delay_done = ((TAP_CNT == 0) && (TAP_DEC == 0)) ? 1'b1 : delay_done_r4;
always @(posedge clk) begin
if (rst || po_cnt_dec || po_cnt_inc)
wait_cnt_r <= #TCQ 'd8;
else if (cmd_delay_start && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst || (delaydec_cnt_r > 6'd0) || (delay_cnt_r == 'd0) || (TAP_DEC == 0))
po_cnt_inc <= #TCQ 1'b0;
else if ((delay_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_inc <= #TCQ 1'b1;
else
po_cnt_inc <= #TCQ 1'b0;
end
//Tap decrement
always @(posedge clk) begin
if (rst || (delaydec_cnt_r == 'd0))
po_cnt_dec <= #TCQ 1'b0;
else if (cmd_delay_start && (delaydec_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_dec <= #TCQ 1'b1;
else
po_cnt_dec <= #TCQ 1'b0;
end
//po_stg2_f_incdec and po_en_stg2_f stay asserted HIGH for TAP_COUNT cycles for every control byte lane
//the alignment is started once the
always @(posedge clk) begin
if (rst) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end else begin
if (po_cnt_dec) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b1;
end else begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
end
if (po_cnt_inc) begin
po_stg2_c_incdec <= #TCQ 1'b1;
po_en_stg2_c <= #TCQ 1'b1;
end else begin
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end
end
end
// delay counter to count 2 cycles
// Increment coarse taps by 2 for all control byte lanes
// to mitigate late writes
always @(posedge clk) begin
// load delay counter with init value
if (rst || (tCK > 2500) || (SIM_CAL_OPTION == "FAST_CAL"))
delay_cnt_r <= #TCQ 'd0;
else if ((delaydec_cnt_r > 6'd0) ||((delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delay_cnt_r <= #TCQ 'd1;
else if (po_cnt_inc && (delay_cnt_r > 6'd0))
delay_cnt_r <= #TCQ delay_cnt_r - 1;
end
// delay counter to count TAP_DEC cycles
always @(posedge clk) begin
// load delay counter with init value of TAP_DEC
if (rst || ~cmd_delay_start ||((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delaydec_cnt_r <= #TCQ TAP_DEC;
else if (po_cnt_dec && (delaydec_cnt_r > 6'd0))
delaydec_cnt_r <= #TCQ delaydec_cnt_r - 1;
end
//ctl_lane_cnt is used to count the number of CTL_LANES or byte lanes that have the address/command phase shifted by 1/4 mem. cycle
//This ensures all ctrl byte lanes have had their output phase shifted.
always @(posedge clk) begin
if (rst || ~cmd_delay_start )
ctl_lane_cnt <= #TCQ 6'b0;
else if (~delay_dec_done && (ctl_lane_cnt == N_CTL_LANES-1) && (delaydec_cnt_r == 6'd1))
ctl_lane_cnt <= #TCQ ctl_lane_cnt;
else if ((ctl_lane_cnt != N_CTL_LANES-1) && (delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0))
ctl_lane_cnt <= #TCQ ctl_lane_cnt + 1;
end
// All control lanes have decremented to 31 fine taps from 46
always @(posedge clk) begin
if (rst || ~cmd_delay_start) begin
delay_dec_done <= #TCQ 1'b0;
end else if (((TAP_CNT == 0) && (TAP_DEC == 0)) ||
((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0) && (ctl_lane_cnt == N_CTL_LANES-1))) begin
delay_dec_done <= #TCQ 1'b1;
end
end
always @(posedge clk) begin
delay_done_r1 <= #TCQ delay_dec_done;
delay_done_r2 <= #TCQ delay_done_r1;
delay_done_r3 <= #TCQ delay_done_r2;
delay_done_r4 <= #TCQ delay_done_r3;
end
endmodule
|
module mig_7series_v2_3_ddr_phy_ck_addr_cmd_delay #
(
parameter TCQ = 100,
parameter tCK = 3636,
parameter DQS_CNT_WIDTH = 3,
parameter N_CTL_LANES = 3,
parameter SIM_CAL_OPTION = "NONE"
)
(
input clk,
input rst,
// Start only after PO_CIRC_BUF_DELAY decremented
input cmd_delay_start,
// Control lane being shifted using Phaser_Out fine delay taps
output reg [N_CTL_LANES-1:0] ctl_lane_cnt,
// Inc/dec Phaser_Out fine delay line
output reg po_stg2_f_incdec,
output reg po_en_stg2_f,
output reg po_stg2_c_incdec,
output reg po_en_stg2_c,
// Completed delaying CK/Address/Commands/Controls
output po_ck_addr_cmd_delay_done
);
localparam TAP_CNT_LIMIT = 63;
//Calculate the tap resolution of the PHASER based on the clock period
localparam FREQ_REF_DIV = (tCK > 5000 ? 4 :
tCK > 2500 ? 2 : 1);
localparam integer PHASER_TAP_RES = ((tCK/2)/64);
// Determine whether 300 ps or 350 ps delay required
localparam CALC_TAP_CNT = (tCK >= 1250) ? 350 : 300;
// Determine the number of Phaser_Out taps required to delay by 300 ps
// 300 ps is the PCB trace uncertainty between CK and DQS byte groups
// Increment control byte lanes
localparam TAP_CNT = 0;
//localparam TAP_CNT = (CALC_TAP_CNT + PHASER_TAP_RES - 1)/PHASER_TAP_RES;
//Decrement control byte lanes
localparam TAP_DEC = (SIM_CAL_OPTION == "FAST_CAL") ? 0 : 29;
reg delay_dec_done;
reg delay_done_r1;
reg delay_done_r2;
reg delay_done_r3;
reg delay_done_r4 /* synthesis syn_maxfan = 10 */;
reg [5:0] delay_cnt_r;
reg [5:0] delaydec_cnt_r;
reg po_cnt_inc;
reg po_cnt_dec;
reg [3:0] wait_cnt_r;
assign po_ck_addr_cmd_delay_done = ((TAP_CNT == 0) && (TAP_DEC == 0)) ? 1'b1 : delay_done_r4;
always @(posedge clk) begin
if (rst || po_cnt_dec || po_cnt_inc)
wait_cnt_r <= #TCQ 'd8;
else if (cmd_delay_start && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst || (delaydec_cnt_r > 6'd0) || (delay_cnt_r == 'd0) || (TAP_DEC == 0))
po_cnt_inc <= #TCQ 1'b0;
else if ((delay_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_inc <= #TCQ 1'b1;
else
po_cnt_inc <= #TCQ 1'b0;
end
//Tap decrement
always @(posedge clk) begin
if (rst || (delaydec_cnt_r == 'd0))
po_cnt_dec <= #TCQ 1'b0;
else if (cmd_delay_start && (delaydec_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_dec <= #TCQ 1'b1;
else
po_cnt_dec <= #TCQ 1'b0;
end
//po_stg2_f_incdec and po_en_stg2_f stay asserted HIGH for TAP_COUNT cycles for every control byte lane
//the alignment is started once the
always @(posedge clk) begin
if (rst) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end else begin
if (po_cnt_dec) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b1;
end else begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
end
if (po_cnt_inc) begin
po_stg2_c_incdec <= #TCQ 1'b1;
po_en_stg2_c <= #TCQ 1'b1;
end else begin
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end
end
end
// delay counter to count 2 cycles
// Increment coarse taps by 2 for all control byte lanes
// to mitigate late writes
always @(posedge clk) begin
// load delay counter with init value
if (rst || (tCK > 2500) || (SIM_CAL_OPTION == "FAST_CAL"))
delay_cnt_r <= #TCQ 'd0;
else if ((delaydec_cnt_r > 6'd0) ||((delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delay_cnt_r <= #TCQ 'd1;
else if (po_cnt_inc && (delay_cnt_r > 6'd0))
delay_cnt_r <= #TCQ delay_cnt_r - 1;
end
// delay counter to count TAP_DEC cycles
always @(posedge clk) begin
// load delay counter with init value of TAP_DEC
if (rst || ~cmd_delay_start ||((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delaydec_cnt_r <= #TCQ TAP_DEC;
else if (po_cnt_dec && (delaydec_cnt_r > 6'd0))
delaydec_cnt_r <= #TCQ delaydec_cnt_r - 1;
end
//ctl_lane_cnt is used to count the number of CTL_LANES or byte lanes that have the address/command phase shifted by 1/4 mem. cycle
//This ensures all ctrl byte lanes have had their output phase shifted.
always @(posedge clk) begin
if (rst || ~cmd_delay_start )
ctl_lane_cnt <= #TCQ 6'b0;
else if (~delay_dec_done && (ctl_lane_cnt == N_CTL_LANES-1) && (delaydec_cnt_r == 6'd1))
ctl_lane_cnt <= #TCQ ctl_lane_cnt;
else if ((ctl_lane_cnt != N_CTL_LANES-1) && (delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0))
ctl_lane_cnt <= #TCQ ctl_lane_cnt + 1;
end
// All control lanes have decremented to 31 fine taps from 46
always @(posedge clk) begin
if (rst || ~cmd_delay_start) begin
delay_dec_done <= #TCQ 1'b0;
end else if (((TAP_CNT == 0) && (TAP_DEC == 0)) ||
((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0) && (ctl_lane_cnt == N_CTL_LANES-1))) begin
delay_dec_done <= #TCQ 1'b1;
end
end
always @(posedge clk) begin
delay_done_r1 <= #TCQ delay_dec_done;
delay_done_r2 <= #TCQ delay_done_r1;
delay_done_r3 <= #TCQ delay_done_r2;
delay_done_r4 <= #TCQ delay_done_r3;
end
endmodule
|
module mig_7series_v2_3_ddr_phy_ck_addr_cmd_delay #
(
parameter TCQ = 100,
parameter tCK = 3636,
parameter DQS_CNT_WIDTH = 3,
parameter N_CTL_LANES = 3,
parameter SIM_CAL_OPTION = "NONE"
)
(
input clk,
input rst,
// Start only after PO_CIRC_BUF_DELAY decremented
input cmd_delay_start,
// Control lane being shifted using Phaser_Out fine delay taps
output reg [N_CTL_LANES-1:0] ctl_lane_cnt,
// Inc/dec Phaser_Out fine delay line
output reg po_stg2_f_incdec,
output reg po_en_stg2_f,
output reg po_stg2_c_incdec,
output reg po_en_stg2_c,
// Completed delaying CK/Address/Commands/Controls
output po_ck_addr_cmd_delay_done
);
localparam TAP_CNT_LIMIT = 63;
//Calculate the tap resolution of the PHASER based on the clock period
localparam FREQ_REF_DIV = (tCK > 5000 ? 4 :
tCK > 2500 ? 2 : 1);
localparam integer PHASER_TAP_RES = ((tCK/2)/64);
// Determine whether 300 ps or 350 ps delay required
localparam CALC_TAP_CNT = (tCK >= 1250) ? 350 : 300;
// Determine the number of Phaser_Out taps required to delay by 300 ps
// 300 ps is the PCB trace uncertainty between CK and DQS byte groups
// Increment control byte lanes
localparam TAP_CNT = 0;
//localparam TAP_CNT = (CALC_TAP_CNT + PHASER_TAP_RES - 1)/PHASER_TAP_RES;
//Decrement control byte lanes
localparam TAP_DEC = (SIM_CAL_OPTION == "FAST_CAL") ? 0 : 29;
reg delay_dec_done;
reg delay_done_r1;
reg delay_done_r2;
reg delay_done_r3;
reg delay_done_r4 /* synthesis syn_maxfan = 10 */;
reg [5:0] delay_cnt_r;
reg [5:0] delaydec_cnt_r;
reg po_cnt_inc;
reg po_cnt_dec;
reg [3:0] wait_cnt_r;
assign po_ck_addr_cmd_delay_done = ((TAP_CNT == 0) && (TAP_DEC == 0)) ? 1'b1 : delay_done_r4;
always @(posedge clk) begin
if (rst || po_cnt_dec || po_cnt_inc)
wait_cnt_r <= #TCQ 'd8;
else if (cmd_delay_start && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst || (delaydec_cnt_r > 6'd0) || (delay_cnt_r == 'd0) || (TAP_DEC == 0))
po_cnt_inc <= #TCQ 1'b0;
else if ((delay_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_inc <= #TCQ 1'b1;
else
po_cnt_inc <= #TCQ 1'b0;
end
//Tap decrement
always @(posedge clk) begin
if (rst || (delaydec_cnt_r == 'd0))
po_cnt_dec <= #TCQ 1'b0;
else if (cmd_delay_start && (delaydec_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_dec <= #TCQ 1'b1;
else
po_cnt_dec <= #TCQ 1'b0;
end
//po_stg2_f_incdec and po_en_stg2_f stay asserted HIGH for TAP_COUNT cycles for every control byte lane
//the alignment is started once the
always @(posedge clk) begin
if (rst) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end else begin
if (po_cnt_dec) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b1;
end else begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
end
if (po_cnt_inc) begin
po_stg2_c_incdec <= #TCQ 1'b1;
po_en_stg2_c <= #TCQ 1'b1;
end else begin
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end
end
end
// delay counter to count 2 cycles
// Increment coarse taps by 2 for all control byte lanes
// to mitigate late writes
always @(posedge clk) begin
// load delay counter with init value
if (rst || (tCK > 2500) || (SIM_CAL_OPTION == "FAST_CAL"))
delay_cnt_r <= #TCQ 'd0;
else if ((delaydec_cnt_r > 6'd0) ||((delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delay_cnt_r <= #TCQ 'd1;
else if (po_cnt_inc && (delay_cnt_r > 6'd0))
delay_cnt_r <= #TCQ delay_cnt_r - 1;
end
// delay counter to count TAP_DEC cycles
always @(posedge clk) begin
// load delay counter with init value of TAP_DEC
if (rst || ~cmd_delay_start ||((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delaydec_cnt_r <= #TCQ TAP_DEC;
else if (po_cnt_dec && (delaydec_cnt_r > 6'd0))
delaydec_cnt_r <= #TCQ delaydec_cnt_r - 1;
end
//ctl_lane_cnt is used to count the number of CTL_LANES or byte lanes that have the address/command phase shifted by 1/4 mem. cycle
//This ensures all ctrl byte lanes have had their output phase shifted.
always @(posedge clk) begin
if (rst || ~cmd_delay_start )
ctl_lane_cnt <= #TCQ 6'b0;
else if (~delay_dec_done && (ctl_lane_cnt == N_CTL_LANES-1) && (delaydec_cnt_r == 6'd1))
ctl_lane_cnt <= #TCQ ctl_lane_cnt;
else if ((ctl_lane_cnt != N_CTL_LANES-1) && (delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0))
ctl_lane_cnt <= #TCQ ctl_lane_cnt + 1;
end
// All control lanes have decremented to 31 fine taps from 46
always @(posedge clk) begin
if (rst || ~cmd_delay_start) begin
delay_dec_done <= #TCQ 1'b0;
end else if (((TAP_CNT == 0) && (TAP_DEC == 0)) ||
((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0) && (ctl_lane_cnt == N_CTL_LANES-1))) begin
delay_dec_done <= #TCQ 1'b1;
end
end
always @(posedge clk) begin
delay_done_r1 <= #TCQ delay_dec_done;
delay_done_r2 <= #TCQ delay_done_r1;
delay_done_r3 <= #TCQ delay_done_r2;
delay_done_r4 <= #TCQ delay_done_r3;
end
endmodule
|
module mig_7series_v2_3_ddr_phy_ck_addr_cmd_delay #
(
parameter TCQ = 100,
parameter tCK = 3636,
parameter DQS_CNT_WIDTH = 3,
parameter N_CTL_LANES = 3,
parameter SIM_CAL_OPTION = "NONE"
)
(
input clk,
input rst,
// Start only after PO_CIRC_BUF_DELAY decremented
input cmd_delay_start,
// Control lane being shifted using Phaser_Out fine delay taps
output reg [N_CTL_LANES-1:0] ctl_lane_cnt,
// Inc/dec Phaser_Out fine delay line
output reg po_stg2_f_incdec,
output reg po_en_stg2_f,
output reg po_stg2_c_incdec,
output reg po_en_stg2_c,
// Completed delaying CK/Address/Commands/Controls
output po_ck_addr_cmd_delay_done
);
localparam TAP_CNT_LIMIT = 63;
//Calculate the tap resolution of the PHASER based on the clock period
localparam FREQ_REF_DIV = (tCK > 5000 ? 4 :
tCK > 2500 ? 2 : 1);
localparam integer PHASER_TAP_RES = ((tCK/2)/64);
// Determine whether 300 ps or 350 ps delay required
localparam CALC_TAP_CNT = (tCK >= 1250) ? 350 : 300;
// Determine the number of Phaser_Out taps required to delay by 300 ps
// 300 ps is the PCB trace uncertainty between CK and DQS byte groups
// Increment control byte lanes
localparam TAP_CNT = 0;
//localparam TAP_CNT = (CALC_TAP_CNT + PHASER_TAP_RES - 1)/PHASER_TAP_RES;
//Decrement control byte lanes
localparam TAP_DEC = (SIM_CAL_OPTION == "FAST_CAL") ? 0 : 29;
reg delay_dec_done;
reg delay_done_r1;
reg delay_done_r2;
reg delay_done_r3;
reg delay_done_r4 /* synthesis syn_maxfan = 10 */;
reg [5:0] delay_cnt_r;
reg [5:0] delaydec_cnt_r;
reg po_cnt_inc;
reg po_cnt_dec;
reg [3:0] wait_cnt_r;
assign po_ck_addr_cmd_delay_done = ((TAP_CNT == 0) && (TAP_DEC == 0)) ? 1'b1 : delay_done_r4;
always @(posedge clk) begin
if (rst || po_cnt_dec || po_cnt_inc)
wait_cnt_r <= #TCQ 'd8;
else if (cmd_delay_start && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst || (delaydec_cnt_r > 6'd0) || (delay_cnt_r == 'd0) || (TAP_DEC == 0))
po_cnt_inc <= #TCQ 1'b0;
else if ((delay_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_inc <= #TCQ 1'b1;
else
po_cnt_inc <= #TCQ 1'b0;
end
//Tap decrement
always @(posedge clk) begin
if (rst || (delaydec_cnt_r == 'd0))
po_cnt_dec <= #TCQ 1'b0;
else if (cmd_delay_start && (delaydec_cnt_r > 'd0) && (wait_cnt_r == 'd1))
po_cnt_dec <= #TCQ 1'b1;
else
po_cnt_dec <= #TCQ 1'b0;
end
//po_stg2_f_incdec and po_en_stg2_f stay asserted HIGH for TAP_COUNT cycles for every control byte lane
//the alignment is started once the
always @(posedge clk) begin
if (rst) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end else begin
if (po_cnt_dec) begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b1;
end else begin
po_stg2_f_incdec <= #TCQ 1'b0;
po_en_stg2_f <= #TCQ 1'b0;
end
if (po_cnt_inc) begin
po_stg2_c_incdec <= #TCQ 1'b1;
po_en_stg2_c <= #TCQ 1'b1;
end else begin
po_stg2_c_incdec <= #TCQ 1'b0;
po_en_stg2_c <= #TCQ 1'b0;
end
end
end
// delay counter to count 2 cycles
// Increment coarse taps by 2 for all control byte lanes
// to mitigate late writes
always @(posedge clk) begin
// load delay counter with init value
if (rst || (tCK > 2500) || (SIM_CAL_OPTION == "FAST_CAL"))
delay_cnt_r <= #TCQ 'd0;
else if ((delaydec_cnt_r > 6'd0) ||((delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delay_cnt_r <= #TCQ 'd1;
else if (po_cnt_inc && (delay_cnt_r > 6'd0))
delay_cnt_r <= #TCQ delay_cnt_r - 1;
end
// delay counter to count TAP_DEC cycles
always @(posedge clk) begin
// load delay counter with init value of TAP_DEC
if (rst || ~cmd_delay_start ||((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 6'd0) && (ctl_lane_cnt != N_CTL_LANES-1)))
delaydec_cnt_r <= #TCQ TAP_DEC;
else if (po_cnt_dec && (delaydec_cnt_r > 6'd0))
delaydec_cnt_r <= #TCQ delaydec_cnt_r - 1;
end
//ctl_lane_cnt is used to count the number of CTL_LANES or byte lanes that have the address/command phase shifted by 1/4 mem. cycle
//This ensures all ctrl byte lanes have had their output phase shifted.
always @(posedge clk) begin
if (rst || ~cmd_delay_start )
ctl_lane_cnt <= #TCQ 6'b0;
else if (~delay_dec_done && (ctl_lane_cnt == N_CTL_LANES-1) && (delaydec_cnt_r == 6'd1))
ctl_lane_cnt <= #TCQ ctl_lane_cnt;
else if ((ctl_lane_cnt != N_CTL_LANES-1) && (delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0))
ctl_lane_cnt <= #TCQ ctl_lane_cnt + 1;
end
// All control lanes have decremented to 31 fine taps from 46
always @(posedge clk) begin
if (rst || ~cmd_delay_start) begin
delay_dec_done <= #TCQ 1'b0;
end else if (((TAP_CNT == 0) && (TAP_DEC == 0)) ||
((delaydec_cnt_r == 6'd0) && (delay_cnt_r == 'd0) && (ctl_lane_cnt == N_CTL_LANES-1))) begin
delay_dec_done <= #TCQ 1'b1;
end
end
always @(posedge clk) begin
delay_done_r1 <= #TCQ delay_dec_done;
delay_done_r2 <= #TCQ delay_done_r1;
delay_done_r3 <= #TCQ delay_done_r2;
delay_done_r4 <= #TCQ delay_done_r3;
end
endmodule
|
module mig_7series_v2_3_ecc_merge_enc
#(
parameter TCQ = 100,
parameter PAYLOAD_WIDTH = 64,
parameter CODE_WIDTH = 72,
parameter DATA_BUF_ADDR_WIDTH = 4,
parameter DATA_BUF_OFFSET_WIDTH = 1,
parameter DATA_WIDTH = 64,
parameter DQ_WIDTH = 72,
parameter ECC_WIDTH = 8,
parameter nCK_PER_CLK = 4
)
(
/*AUTOARG*/
// Outputs
mc_wrdata, mc_wrdata_mask,
// Inputs
clk, rst, wr_data, wr_data_mask, rd_merge_data, h_rows, raw_not_ecc
);
input clk;
input rst;
input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data;
input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask;
input [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data;
reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data_r;
reg [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask_r;
reg [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data_r;
always @(posedge clk) wr_data_r <= #TCQ wr_data;
always @(posedge clk) wr_data_mask_r <= #TCQ wr_data_mask;
always @(posedge clk) rd_merge_data_r <= #TCQ rd_merge_data;
// Merge new data with memory read data.
wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] merged_data;
genvar h;
genvar i;
generate
for (h=0; h<2*nCK_PER_CLK; h=h+1) begin : merge_data_outer
for (i=0; i<DATA_WIDTH/8; i=i+1) begin : merge_data_inner
assign merged_data[h*PAYLOAD_WIDTH+i*8+:8] =
wr_data_mask[h*DATA_WIDTH/8+i]
? rd_merge_data[h*DATA_WIDTH+i*8+:8]
: wr_data[h*PAYLOAD_WIDTH+i*8+:8];
end
if (PAYLOAD_WIDTH > DATA_WIDTH)
assign merged_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH]=
wr_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH];
end
endgenerate
// Generate ECC and overlay onto mc_wrdata.
input [CODE_WIDTH*ECC_WIDTH-1:0] h_rows;
input [2*nCK_PER_CLK-1:0] raw_not_ecc;
reg [2*nCK_PER_CLK-1:0] raw_not_ecc_r;
always @(posedge clk) raw_not_ecc_r <= #TCQ raw_not_ecc;
output reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata;
reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata_c;
genvar j;
integer k;
generate
for (j=0; j<2*nCK_PER_CLK; j=j+1) begin : ecc_word
always @(/*AS*/h_rows or merged_data or raw_not_ecc_r) begin
mc_wrdata_c[j*DQ_WIDTH+:DQ_WIDTH] =
{{DQ_WIDTH-PAYLOAD_WIDTH{1'b0}},
merged_data[j*PAYLOAD_WIDTH+:PAYLOAD_WIDTH]};
for (k=0; k<ECC_WIDTH; k=k+1)
if (~raw_not_ecc_r[j])
mc_wrdata_c[j*DQ_WIDTH+CODE_WIDTH-k-1] =
^(merged_data[j*PAYLOAD_WIDTH+:DATA_WIDTH] &
h_rows[k*CODE_WIDTH+:DATA_WIDTH]);
end
end
endgenerate
always @(posedge clk) mc_wrdata <= mc_wrdata_c;
// Set all DRAM masks to zero.
output wire[2*nCK_PER_CLK*DQ_WIDTH/8-1:0] mc_wrdata_mask;
assign mc_wrdata_mask = {2*nCK_PER_CLK*DQ_WIDTH/8{1'b0}};
endmodule
|
module mig_7series_v2_3_ecc_merge_enc
#(
parameter TCQ = 100,
parameter PAYLOAD_WIDTH = 64,
parameter CODE_WIDTH = 72,
parameter DATA_BUF_ADDR_WIDTH = 4,
parameter DATA_BUF_OFFSET_WIDTH = 1,
parameter DATA_WIDTH = 64,
parameter DQ_WIDTH = 72,
parameter ECC_WIDTH = 8,
parameter nCK_PER_CLK = 4
)
(
/*AUTOARG*/
// Outputs
mc_wrdata, mc_wrdata_mask,
// Inputs
clk, rst, wr_data, wr_data_mask, rd_merge_data, h_rows, raw_not_ecc
);
input clk;
input rst;
input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data;
input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask;
input [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data;
reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data_r;
reg [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask_r;
reg [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data_r;
always @(posedge clk) wr_data_r <= #TCQ wr_data;
always @(posedge clk) wr_data_mask_r <= #TCQ wr_data_mask;
always @(posedge clk) rd_merge_data_r <= #TCQ rd_merge_data;
// Merge new data with memory read data.
wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] merged_data;
genvar h;
genvar i;
generate
for (h=0; h<2*nCK_PER_CLK; h=h+1) begin : merge_data_outer
for (i=0; i<DATA_WIDTH/8; i=i+1) begin : merge_data_inner
assign merged_data[h*PAYLOAD_WIDTH+i*8+:8] =
wr_data_mask[h*DATA_WIDTH/8+i]
? rd_merge_data[h*DATA_WIDTH+i*8+:8]
: wr_data[h*PAYLOAD_WIDTH+i*8+:8];
end
if (PAYLOAD_WIDTH > DATA_WIDTH)
assign merged_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH]=
wr_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH];
end
endgenerate
// Generate ECC and overlay onto mc_wrdata.
input [CODE_WIDTH*ECC_WIDTH-1:0] h_rows;
input [2*nCK_PER_CLK-1:0] raw_not_ecc;
reg [2*nCK_PER_CLK-1:0] raw_not_ecc_r;
always @(posedge clk) raw_not_ecc_r <= #TCQ raw_not_ecc;
output reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata;
reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata_c;
genvar j;
integer k;
generate
for (j=0; j<2*nCK_PER_CLK; j=j+1) begin : ecc_word
always @(/*AS*/h_rows or merged_data or raw_not_ecc_r) begin
mc_wrdata_c[j*DQ_WIDTH+:DQ_WIDTH] =
{{DQ_WIDTH-PAYLOAD_WIDTH{1'b0}},
merged_data[j*PAYLOAD_WIDTH+:PAYLOAD_WIDTH]};
for (k=0; k<ECC_WIDTH; k=k+1)
if (~raw_not_ecc_r[j])
mc_wrdata_c[j*DQ_WIDTH+CODE_WIDTH-k-1] =
^(merged_data[j*PAYLOAD_WIDTH+:DATA_WIDTH] &
h_rows[k*CODE_WIDTH+:DATA_WIDTH]);
end
end
endgenerate
always @(posedge clk) mc_wrdata <= mc_wrdata_c;
// Set all DRAM masks to zero.
output wire[2*nCK_PER_CLK*DQ_WIDTH/8-1:0] mc_wrdata_mask;
assign mc_wrdata_mask = {2*nCK_PER_CLK*DQ_WIDTH/8{1'b0}};
endmodule
|
module mig_7series_v2_3_ecc_merge_enc
#(
parameter TCQ = 100,
parameter PAYLOAD_WIDTH = 64,
parameter CODE_WIDTH = 72,
parameter DATA_BUF_ADDR_WIDTH = 4,
parameter DATA_BUF_OFFSET_WIDTH = 1,
parameter DATA_WIDTH = 64,
parameter DQ_WIDTH = 72,
parameter ECC_WIDTH = 8,
parameter nCK_PER_CLK = 4
)
(
/*AUTOARG*/
// Outputs
mc_wrdata, mc_wrdata_mask,
// Inputs
clk, rst, wr_data, wr_data_mask, rd_merge_data, h_rows, raw_not_ecc
);
input clk;
input rst;
input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data;
input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask;
input [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data;
reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data_r;
reg [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask_r;
reg [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data_r;
always @(posedge clk) wr_data_r <= #TCQ wr_data;
always @(posedge clk) wr_data_mask_r <= #TCQ wr_data_mask;
always @(posedge clk) rd_merge_data_r <= #TCQ rd_merge_data;
// Merge new data with memory read data.
wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] merged_data;
genvar h;
genvar i;
generate
for (h=0; h<2*nCK_PER_CLK; h=h+1) begin : merge_data_outer
for (i=0; i<DATA_WIDTH/8; i=i+1) begin : merge_data_inner
assign merged_data[h*PAYLOAD_WIDTH+i*8+:8] =
wr_data_mask[h*DATA_WIDTH/8+i]
? rd_merge_data[h*DATA_WIDTH+i*8+:8]
: wr_data[h*PAYLOAD_WIDTH+i*8+:8];
end
if (PAYLOAD_WIDTH > DATA_WIDTH)
assign merged_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH]=
wr_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH];
end
endgenerate
// Generate ECC and overlay onto mc_wrdata.
input [CODE_WIDTH*ECC_WIDTH-1:0] h_rows;
input [2*nCK_PER_CLK-1:0] raw_not_ecc;
reg [2*nCK_PER_CLK-1:0] raw_not_ecc_r;
always @(posedge clk) raw_not_ecc_r <= #TCQ raw_not_ecc;
output reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata;
reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata_c;
genvar j;
integer k;
generate
for (j=0; j<2*nCK_PER_CLK; j=j+1) begin : ecc_word
always @(/*AS*/h_rows or merged_data or raw_not_ecc_r) begin
mc_wrdata_c[j*DQ_WIDTH+:DQ_WIDTH] =
{{DQ_WIDTH-PAYLOAD_WIDTH{1'b0}},
merged_data[j*PAYLOAD_WIDTH+:PAYLOAD_WIDTH]};
for (k=0; k<ECC_WIDTH; k=k+1)
if (~raw_not_ecc_r[j])
mc_wrdata_c[j*DQ_WIDTH+CODE_WIDTH-k-1] =
^(merged_data[j*PAYLOAD_WIDTH+:DATA_WIDTH] &
h_rows[k*CODE_WIDTH+:DATA_WIDTH]);
end
end
endgenerate
always @(posedge clk) mc_wrdata <= mc_wrdata_c;
// Set all DRAM masks to zero.
output wire[2*nCK_PER_CLK*DQ_WIDTH/8-1:0] mc_wrdata_mask;
assign mc_wrdata_mask = {2*nCK_PER_CLK*DQ_WIDTH/8{1'b0}};
endmodule
|
module mig_7series_v2_3_bank_queue #
(
parameter TCQ = 100,
parameter BM_CNT_WIDTH = 2,
parameter nBANK_MACHS = 4,
parameter ORDERING = "NORM",
parameter ID = 0
)
(/*AUTOARG*/
// Outputs
head_r, tail_r, idle_ns, idle_r, pass_open_bank_ns,
pass_open_bank_r, auto_pre_r, bm_end, passing_open_bank,
ordered_issued, ordered_r, order_q_zero, rcv_open_bank,
rb_hit_busies_r, q_has_rd, q_has_priority, wait_for_maint_r,
// Inputs
clk, rst, accept_internal_r, use_addr, periodic_rd_ack_r, bm_end_in,
idle_cnt, rb_hit_busy_cnt, accept_req, rb_hit_busy_r, maint_idle,
maint_hit, row_hit_r, pre_wait_r, allow_auto_pre, sending_col,
bank_wait_in_progress, precharge_bm_end, req_wr_r, rd_wr_r,
adv_order_q, order_cnt, rb_hit_busy_ns_in, passing_open_bank_in,
was_wr, maint_req_r, was_priority
);
localparam ZERO = 0;
localparam ONE = 1;
localparam [BM_CNT_WIDTH-1:0] BM_CNT_ZERO = ZERO[0+:BM_CNT_WIDTH];
localparam [BM_CNT_WIDTH-1:0] BM_CNT_ONE = ONE[0+:BM_CNT_WIDTH];
input clk;
input rst;
// Decide if this bank machine should accept a new request.
reg idle_r_lcl;
reg head_r_lcl;
input accept_internal_r;
wire bm_ready = idle_r_lcl && head_r_lcl && accept_internal_r;
// Accept request in this bank machine. Could be maintenance or
// regular request.
input use_addr;
input periodic_rd_ack_r;
wire accept_this_bm = bm_ready && (use_addr || periodic_rd_ack_r);
// Multiple machines may enter the idle queue in a single state.
// Based on bank machine instance number, compute how many
// bank machines with lower instance numbers are entering
// the idle queue.
input [(nBANK_MACHS*2)-1:0] bm_end_in;
reg [BM_CNT_WIDTH-1:0] idlers_below;
integer i;
always @(/*AS*/bm_end_in) begin
idlers_below = BM_CNT_ZERO;
for (i=0; i<ID; i=i+1)
idlers_below = idlers_below + bm_end_in[i];
end
reg idlers_above;
always @(/*AS*/bm_end_in) begin
idlers_above = 1'b0;
for (i=ID+1; i<ID+nBANK_MACHS; i=i+1)
idlers_above = idlers_above || bm_end_in[i];
end
`ifdef MC_SVA
bm_end_and_idlers_above: cover property (@(posedge clk)
(~rst && bm_end && idlers_above));
bm_end_and_idlers_below: cover property (@(posedge clk)
(~rst && bm_end && |idlers_below));
`endif
// Compute the q_entry number.
input [BM_CNT_WIDTH-1:0] idle_cnt;
input [BM_CNT_WIDTH-1:0] rb_hit_busy_cnt;
input accept_req;
wire bm_end_lcl;
reg adv_queue = 1'b0;
reg [BM_CNT_WIDTH-1:0] q_entry_r;
reg [BM_CNT_WIDTH-1:0] q_entry_ns;
wire [BM_CNT_WIDTH-1:0] temp;
// always @(/*AS*/accept_req or accept_this_bm or adv_queue
// or bm_end_lcl or idle_cnt or idle_r_lcl or idlers_below
// or q_entry_r or rb_hit_busy_cnt /*or rst*/) begin
//// if (rst) q_entry_ns = ID[BM_CNT_WIDTH-1:0];
//// else begin
// q_entry_ns = q_entry_r;
// if ((~idle_r_lcl && adv_queue) ||
// (idle_r_lcl && accept_req && ~accept_this_bm))
// q_entry_ns = q_entry_r - BM_CNT_ONE;
// if (accept_this_bm)
//// q_entry_ns = rb_hit_busy_cnt - (adv_queue ? BM_CNT_ONE : BM_CNT_ZERO);
// q_entry_ns = adv_queue ? (rb_hit_busy_cnt - BM_CNT_ONE) : (rb_hit_busy_cnt -BM_CNT_ZERO);
// if (bm_end_lcl) begin
// q_entry_ns = idle_cnt + idlers_below;
// if (accept_req) q_entry_ns = q_entry_ns - BM_CNT_ONE;
//// end
// end
// end
assign temp = idle_cnt + idlers_below;
always @ (*)
begin
if (accept_req & bm_end_lcl)
q_entry_ns = temp - BM_CNT_ONE;
else if (bm_end_lcl)
q_entry_ns = temp;
else if (accept_this_bm)
q_entry_ns = adv_queue ? (rb_hit_busy_cnt - BM_CNT_ONE) : (rb_hit_busy_cnt -BM_CNT_ZERO);
else if ((!idle_r_lcl & adv_queue) |
(idle_r_lcl & accept_req & !accept_this_bm))
q_entry_ns = q_entry_r - BM_CNT_ONE;
else
q_entry_ns = q_entry_r;
end
always @(posedge clk)
if (rst)
q_entry_r <= #TCQ ID[BM_CNT_WIDTH-1:0];
else
q_entry_r <= #TCQ q_entry_ns;
// Determine if this entry is the head of its queue.
reg head_ns;
always @(/*AS*/accept_req or accept_this_bm or adv_queue
or bm_end_lcl or head_r_lcl or idle_cnt or idle_r_lcl
or idlers_below or q_entry_r or rb_hit_busy_cnt or rst) begin
if (rst) head_ns = ~|ID[BM_CNT_WIDTH-1:0];
else begin
head_ns = head_r_lcl;
if (accept_this_bm)
head_ns = ~|(rb_hit_busy_cnt - (adv_queue ? BM_CNT_ONE : BM_CNT_ZERO));
if ((~idle_r_lcl && adv_queue) ||
(idle_r_lcl && accept_req && ~accept_this_bm))
head_ns = ~|(q_entry_r - BM_CNT_ONE);
if (bm_end_lcl) begin
head_ns = ~|(idle_cnt - (accept_req ? BM_CNT_ONE : BM_CNT_ZERO)) &&
~|idlers_below;
end
end
end
always @(posedge clk) head_r_lcl <= #TCQ head_ns;
output wire head_r;
assign head_r = head_r_lcl;
// Determine if this entry is the tail of its queue. Note that
// an entry can be both head and tail.
input rb_hit_busy_r;
reg tail_r_lcl = 1'b1;
generate
if (nBANK_MACHS > 1) begin : compute_tail
reg tail_ns;
always @(accept_req or accept_this_bm
or bm_end_in or bm_end_lcl or idle_r_lcl
or idlers_above or rb_hit_busy_r or rst or tail_r_lcl) begin
if (rst) tail_ns = (ID == nBANK_MACHS);
// The order of the statements below is important in the case where
// another bank machine is retiring and this bank machine is accepting.
else begin
tail_ns = tail_r_lcl;
if ((accept_req && rb_hit_busy_r) ||
(|bm_end_in[`BM_SHARED_BV] && idle_r_lcl))
tail_ns = 1'b0;
if (accept_this_bm || (bm_end_lcl && ~idlers_above)) tail_ns = 1'b1;
end
end
always @(posedge clk) tail_r_lcl <= #TCQ tail_ns;
end // if (nBANK_MACHS > 1)
endgenerate
output wire tail_r;
assign tail_r = tail_r_lcl;
wire clear_req = bm_end_lcl || rst;
// Is this entry in the idle queue?
reg idle_ns_lcl;
always @(/*AS*/accept_this_bm or clear_req or idle_r_lcl) begin
idle_ns_lcl = idle_r_lcl;
if (accept_this_bm) idle_ns_lcl = 1'b0;
if (clear_req) idle_ns_lcl = 1'b1;
end
always @(posedge clk) idle_r_lcl <= #TCQ idle_ns_lcl;
output wire idle_ns;
assign idle_ns = idle_ns_lcl;
output wire idle_r;
assign idle_r = idle_r_lcl;
// Maintenance hitting on this active bank machine is in progress.
input maint_idle;
input maint_hit;
wire maint_hit_this_bm = ~maint_idle && maint_hit;
// Does new request hit on this bank machine while it is able to pass the
// open bank?
input row_hit_r;
input pre_wait_r;
wire pass_open_bank_eligible =
tail_r_lcl && rb_hit_busy_r && row_hit_r && ~pre_wait_r;
// Set pass open bank bit, but not if request preceded active maintenance.
reg wait_for_maint_r_lcl;
reg pass_open_bank_r_lcl;
wire pass_open_bank_ns_lcl = ~clear_req &&
(pass_open_bank_r_lcl ||
(accept_req && pass_open_bank_eligible &&
(~maint_hit_this_bm || wait_for_maint_r_lcl)));
always @(posedge clk) pass_open_bank_r_lcl <= #TCQ pass_open_bank_ns_lcl;
output wire pass_open_bank_ns;
assign pass_open_bank_ns = pass_open_bank_ns_lcl;
output wire pass_open_bank_r;
assign pass_open_bank_r = pass_open_bank_r_lcl;
`ifdef MC_SVA
pass_open_bank: cover property (@(posedge clk) (~rst && pass_open_bank_ns));
pass_open_bank_killed_by_maint: cover property (@(posedge clk)
(~rst && accept_req && pass_open_bank_eligible &&
maint_hit_this_bm && ~wait_for_maint_r_lcl));
pass_open_bank_following_maint: cover property (@(posedge clk)
(~rst && accept_req && pass_open_bank_eligible &&
maint_hit_this_bm && wait_for_maint_r_lcl));
`endif
// Should the column command be sent with the auto precharge bit set? This
// will happen when it is detected that next request is to a different row,
// or the next reqest is the next request is refresh to this rank.
reg auto_pre_r_lcl;
reg auto_pre_ns;
input allow_auto_pre;
always @(/*AS*/accept_req or allow_auto_pre or auto_pre_r_lcl
or clear_req or maint_hit_this_bm or rb_hit_busy_r
or row_hit_r or tail_r_lcl or wait_for_maint_r_lcl) begin
auto_pre_ns = auto_pre_r_lcl;
if (clear_req) auto_pre_ns = 1'b0;
else
if (accept_req && tail_r_lcl && allow_auto_pre && rb_hit_busy_r &&
(~row_hit_r || (maint_hit_this_bm && ~wait_for_maint_r_lcl)))
auto_pre_ns = 1'b1;
end
always @(posedge clk) auto_pre_r_lcl <= #TCQ auto_pre_ns;
output wire auto_pre_r;
assign auto_pre_r = auto_pre_r_lcl;
`ifdef MC_SVA
auto_precharge: cover property (@(posedge clk) (~rst && auto_pre_ns));
maint_triggers_auto_precharge: cover property (@(posedge clk)
(~rst && auto_pre_ns && ~auto_pre_r && row_hit_r));
`endif
// Determine when the current request is finished.
input sending_col;
input req_wr_r;
input rd_wr_r;
wire sending_col_not_rmw_rd = sending_col && !(req_wr_r && rd_wr_r);
input bank_wait_in_progress;
input precharge_bm_end;
reg pre_bm_end_r;
wire pre_bm_end_ns = precharge_bm_end ||
(bank_wait_in_progress && pass_open_bank_ns_lcl);
always @(posedge clk) pre_bm_end_r <= #TCQ pre_bm_end_ns;
assign bm_end_lcl =
pre_bm_end_r || (sending_col_not_rmw_rd && pass_open_bank_r_lcl);
output wire bm_end;
assign bm_end = bm_end_lcl;
// Determine that the open bank should be passed to the successor bank machine.
reg pre_passing_open_bank_r;
wire pre_passing_open_bank_ns =
bank_wait_in_progress && pass_open_bank_ns_lcl;
always @(posedge clk) pre_passing_open_bank_r <= #TCQ
pre_passing_open_bank_ns;
output wire passing_open_bank;
assign passing_open_bank =
pre_passing_open_bank_r || (sending_col_not_rmw_rd && pass_open_bank_r_lcl);
reg ordered_ns;
wire set_order_q = ((ORDERING == "STRICT") || ((ORDERING == "NORM") &&
req_wr_r)) && accept_this_bm;
wire ordered_issued_lcl =
sending_col_not_rmw_rd && !(req_wr_r && rd_wr_r) &&
((ORDERING == "STRICT") || ((ORDERING == "NORM") && req_wr_r));
output wire ordered_issued;
assign ordered_issued = ordered_issued_lcl;
reg ordered_r_lcl;
always @(/*AS*/ordered_issued_lcl or ordered_r_lcl or rst
or set_order_q) begin
if (rst) ordered_ns = 1'b0;
else begin
ordered_ns = ordered_r_lcl;
// Should never see accept_this_bm and adv_order_q at the same time.
if (set_order_q) ordered_ns = 1'b1;
if (ordered_issued_lcl) ordered_ns = 1'b0;
end
end
always @(posedge clk) ordered_r_lcl <= #TCQ ordered_ns;
output wire ordered_r;
assign ordered_r = ordered_r_lcl;
// Figure out when to advance the ordering queue.
input adv_order_q;
input [BM_CNT_WIDTH-1:0] order_cnt;
reg [BM_CNT_WIDTH-1:0] order_q_r;
reg [BM_CNT_WIDTH-1:0] order_q_ns;
always @(/*AS*/adv_order_q or order_cnt or order_q_r or rst
or set_order_q) begin
order_q_ns = order_q_r;
if (rst) order_q_ns = BM_CNT_ZERO;
if (set_order_q)
if (adv_order_q) order_q_ns = order_cnt - BM_CNT_ONE;
else order_q_ns = order_cnt;
if (adv_order_q && |order_q_r) order_q_ns = order_q_r - BM_CNT_ONE;
end
always @(posedge clk) order_q_r <= #TCQ order_q_ns;
output wire order_q_zero;
assign order_q_zero = ~|order_q_r ||
(adv_order_q && (order_q_r == BM_CNT_ONE)) ||
((ORDERING == "NORM") && rd_wr_r);
// Keep track of which other bank machine are ahead of this one in a
// rank-bank queue. This is necessary to know when to advance this bank
// machine in the queue, and when to update bank state machine counter upon
// passing a bank.
input [(nBANK_MACHS*2)-1:0] rb_hit_busy_ns_in;
reg [(nBANK_MACHS*2)-1:0] rb_hit_busies_r_lcl = {nBANK_MACHS*2{1'b0}};
input [(nBANK_MACHS*2)-1:0] passing_open_bank_in;
output reg rcv_open_bank = 1'b0;
generate
if (nBANK_MACHS > 1) begin : rb_hit_busies
// The clear_vector resets bits in the rb_hit_busies vector as bank machines
// completes requests. rst also resets all the bits.
wire [nBANK_MACHS-2:0] clear_vector =
({nBANK_MACHS-1{rst}} | bm_end_in[`BM_SHARED_BV]);
// As this bank machine takes on a new request, capture the vector of
// which other bank machines are in the same queue.
wire [`BM_SHARED_BV] rb_hit_busies_ns =
~clear_vector &
(idle_ns_lcl
? rb_hit_busy_ns_in[`BM_SHARED_BV]
: rb_hit_busies_r_lcl[`BM_SHARED_BV]);
always @(posedge clk) rb_hit_busies_r_lcl[`BM_SHARED_BV] <=
#TCQ rb_hit_busies_ns;
// Compute when to advance this queue entry based on seeing other bank machines
// in the same queue finish.
always @(bm_end_in or rb_hit_busies_r_lcl)
adv_queue =
|(bm_end_in[`BM_SHARED_BV] & rb_hit_busies_r_lcl[`BM_SHARED_BV]);
// Decide when to receive an open bank based on knowing this bank machine is
// one entry from the head, and a passing_open_bank hits on the
// rb_hit_busies vector.
always @(idle_r_lcl
or passing_open_bank_in or q_entry_r
or rb_hit_busies_r_lcl) rcv_open_bank =
|(rb_hit_busies_r_lcl[`BM_SHARED_BV] & passing_open_bank_in[`BM_SHARED_BV])
&& (q_entry_r == BM_CNT_ONE) && ~idle_r_lcl;
end
endgenerate
output wire [nBANK_MACHS*2-1:0] rb_hit_busies_r;
assign rb_hit_busies_r = rb_hit_busies_r_lcl;
// Keep track if the queue this entry is in has priority content.
input was_wr;
input maint_req_r;
reg q_has_rd_r;
wire q_has_rd_ns = ~clear_req &&
(q_has_rd_r || (accept_req && rb_hit_busy_r && ~was_wr) ||
(maint_req_r && maint_hit && ~idle_r_lcl));
always @(posedge clk) q_has_rd_r <= #TCQ q_has_rd_ns;
output wire q_has_rd;
assign q_has_rd = q_has_rd_r;
input was_priority;
reg q_has_priority_r;
wire q_has_priority_ns = ~clear_req &&
(q_has_priority_r || (accept_req && rb_hit_busy_r && was_priority));
always @(posedge clk) q_has_priority_r <= #TCQ q_has_priority_ns;
output wire q_has_priority;
assign q_has_priority = q_has_priority_r;
// Figure out if this entry should wait for maintenance to end.
wire wait_for_maint_ns = ~rst && ~maint_idle &&
(wait_for_maint_r_lcl || (maint_hit && accept_this_bm));
always @(posedge clk) wait_for_maint_r_lcl <= #TCQ wait_for_maint_ns;
output wire wait_for_maint_r;
assign wait_for_maint_r = wait_for_maint_r_lcl;
endmodule
|
module mig_7series_v2_3_bank_queue #
(
parameter TCQ = 100,
parameter BM_CNT_WIDTH = 2,
parameter nBANK_MACHS = 4,
parameter ORDERING = "NORM",
parameter ID = 0
)
(/*AUTOARG*/
// Outputs
head_r, tail_r, idle_ns, idle_r, pass_open_bank_ns,
pass_open_bank_r, auto_pre_r, bm_end, passing_open_bank,
ordered_issued, ordered_r, order_q_zero, rcv_open_bank,
rb_hit_busies_r, q_has_rd, q_has_priority, wait_for_maint_r,
// Inputs
clk, rst, accept_internal_r, use_addr, periodic_rd_ack_r, bm_end_in,
idle_cnt, rb_hit_busy_cnt, accept_req, rb_hit_busy_r, maint_idle,
maint_hit, row_hit_r, pre_wait_r, allow_auto_pre, sending_col,
bank_wait_in_progress, precharge_bm_end, req_wr_r, rd_wr_r,
adv_order_q, order_cnt, rb_hit_busy_ns_in, passing_open_bank_in,
was_wr, maint_req_r, was_priority
);
localparam ZERO = 0;
localparam ONE = 1;
localparam [BM_CNT_WIDTH-1:0] BM_CNT_ZERO = ZERO[0+:BM_CNT_WIDTH];
localparam [BM_CNT_WIDTH-1:0] BM_CNT_ONE = ONE[0+:BM_CNT_WIDTH];
input clk;
input rst;
// Decide if this bank machine should accept a new request.
reg idle_r_lcl;
reg head_r_lcl;
input accept_internal_r;
wire bm_ready = idle_r_lcl && head_r_lcl && accept_internal_r;
// Accept request in this bank machine. Could be maintenance or
// regular request.
input use_addr;
input periodic_rd_ack_r;
wire accept_this_bm = bm_ready && (use_addr || periodic_rd_ack_r);
// Multiple machines may enter the idle queue in a single state.
// Based on bank machine instance number, compute how many
// bank machines with lower instance numbers are entering
// the idle queue.
input [(nBANK_MACHS*2)-1:0] bm_end_in;
reg [BM_CNT_WIDTH-1:0] idlers_below;
integer i;
always @(/*AS*/bm_end_in) begin
idlers_below = BM_CNT_ZERO;
for (i=0; i<ID; i=i+1)
idlers_below = idlers_below + bm_end_in[i];
end
reg idlers_above;
always @(/*AS*/bm_end_in) begin
idlers_above = 1'b0;
for (i=ID+1; i<ID+nBANK_MACHS; i=i+1)
idlers_above = idlers_above || bm_end_in[i];
end
`ifdef MC_SVA
bm_end_and_idlers_above: cover property (@(posedge clk)
(~rst && bm_end && idlers_above));
bm_end_and_idlers_below: cover property (@(posedge clk)
(~rst && bm_end && |idlers_below));
`endif
// Compute the q_entry number.
input [BM_CNT_WIDTH-1:0] idle_cnt;
input [BM_CNT_WIDTH-1:0] rb_hit_busy_cnt;
input accept_req;
wire bm_end_lcl;
reg adv_queue = 1'b0;
reg [BM_CNT_WIDTH-1:0] q_entry_r;
reg [BM_CNT_WIDTH-1:0] q_entry_ns;
wire [BM_CNT_WIDTH-1:0] temp;
// always @(/*AS*/accept_req or accept_this_bm or adv_queue
// or bm_end_lcl or idle_cnt or idle_r_lcl or idlers_below
// or q_entry_r or rb_hit_busy_cnt /*or rst*/) begin
//// if (rst) q_entry_ns = ID[BM_CNT_WIDTH-1:0];
//// else begin
// q_entry_ns = q_entry_r;
// if ((~idle_r_lcl && adv_queue) ||
// (idle_r_lcl && accept_req && ~accept_this_bm))
// q_entry_ns = q_entry_r - BM_CNT_ONE;
// if (accept_this_bm)
//// q_entry_ns = rb_hit_busy_cnt - (adv_queue ? BM_CNT_ONE : BM_CNT_ZERO);
// q_entry_ns = adv_queue ? (rb_hit_busy_cnt - BM_CNT_ONE) : (rb_hit_busy_cnt -BM_CNT_ZERO);
// if (bm_end_lcl) begin
// q_entry_ns = idle_cnt + idlers_below;
// if (accept_req) q_entry_ns = q_entry_ns - BM_CNT_ONE;
//// end
// end
// end
assign temp = idle_cnt + idlers_below;
always @ (*)
begin
if (accept_req & bm_end_lcl)
q_entry_ns = temp - BM_CNT_ONE;
else if (bm_end_lcl)
q_entry_ns = temp;
else if (accept_this_bm)
q_entry_ns = adv_queue ? (rb_hit_busy_cnt - BM_CNT_ONE) : (rb_hit_busy_cnt -BM_CNT_ZERO);
else if ((!idle_r_lcl & adv_queue) |
(idle_r_lcl & accept_req & !accept_this_bm))
q_entry_ns = q_entry_r - BM_CNT_ONE;
else
q_entry_ns = q_entry_r;
end
always @(posedge clk)
if (rst)
q_entry_r <= #TCQ ID[BM_CNT_WIDTH-1:0];
else
q_entry_r <= #TCQ q_entry_ns;
// Determine if this entry is the head of its queue.
reg head_ns;
always @(/*AS*/accept_req or accept_this_bm or adv_queue
or bm_end_lcl or head_r_lcl or idle_cnt or idle_r_lcl
or idlers_below or q_entry_r or rb_hit_busy_cnt or rst) begin
if (rst) head_ns = ~|ID[BM_CNT_WIDTH-1:0];
else begin
head_ns = head_r_lcl;
if (accept_this_bm)
head_ns = ~|(rb_hit_busy_cnt - (adv_queue ? BM_CNT_ONE : BM_CNT_ZERO));
if ((~idle_r_lcl && adv_queue) ||
(idle_r_lcl && accept_req && ~accept_this_bm))
head_ns = ~|(q_entry_r - BM_CNT_ONE);
if (bm_end_lcl) begin
head_ns = ~|(idle_cnt - (accept_req ? BM_CNT_ONE : BM_CNT_ZERO)) &&
~|idlers_below;
end
end
end
always @(posedge clk) head_r_lcl <= #TCQ head_ns;
output wire head_r;
assign head_r = head_r_lcl;
// Determine if this entry is the tail of its queue. Note that
// an entry can be both head and tail.
input rb_hit_busy_r;
reg tail_r_lcl = 1'b1;
generate
if (nBANK_MACHS > 1) begin : compute_tail
reg tail_ns;
always @(accept_req or accept_this_bm
or bm_end_in or bm_end_lcl or idle_r_lcl
or idlers_above or rb_hit_busy_r or rst or tail_r_lcl) begin
if (rst) tail_ns = (ID == nBANK_MACHS);
// The order of the statements below is important in the case where
// another bank machine is retiring and this bank machine is accepting.
else begin
tail_ns = tail_r_lcl;
if ((accept_req && rb_hit_busy_r) ||
(|bm_end_in[`BM_SHARED_BV] && idle_r_lcl))
tail_ns = 1'b0;
if (accept_this_bm || (bm_end_lcl && ~idlers_above)) tail_ns = 1'b1;
end
end
always @(posedge clk) tail_r_lcl <= #TCQ tail_ns;
end // if (nBANK_MACHS > 1)
endgenerate
output wire tail_r;
assign tail_r = tail_r_lcl;
wire clear_req = bm_end_lcl || rst;
// Is this entry in the idle queue?
reg idle_ns_lcl;
always @(/*AS*/accept_this_bm or clear_req or idle_r_lcl) begin
idle_ns_lcl = idle_r_lcl;
if (accept_this_bm) idle_ns_lcl = 1'b0;
if (clear_req) idle_ns_lcl = 1'b1;
end
always @(posedge clk) idle_r_lcl <= #TCQ idle_ns_lcl;
output wire idle_ns;
assign idle_ns = idle_ns_lcl;
output wire idle_r;
assign idle_r = idle_r_lcl;
// Maintenance hitting on this active bank machine is in progress.
input maint_idle;
input maint_hit;
wire maint_hit_this_bm = ~maint_idle && maint_hit;
// Does new request hit on this bank machine while it is able to pass the
// open bank?
input row_hit_r;
input pre_wait_r;
wire pass_open_bank_eligible =
tail_r_lcl && rb_hit_busy_r && row_hit_r && ~pre_wait_r;
// Set pass open bank bit, but not if request preceded active maintenance.
reg wait_for_maint_r_lcl;
reg pass_open_bank_r_lcl;
wire pass_open_bank_ns_lcl = ~clear_req &&
(pass_open_bank_r_lcl ||
(accept_req && pass_open_bank_eligible &&
(~maint_hit_this_bm || wait_for_maint_r_lcl)));
always @(posedge clk) pass_open_bank_r_lcl <= #TCQ pass_open_bank_ns_lcl;
output wire pass_open_bank_ns;
assign pass_open_bank_ns = pass_open_bank_ns_lcl;
output wire pass_open_bank_r;
assign pass_open_bank_r = pass_open_bank_r_lcl;
`ifdef MC_SVA
pass_open_bank: cover property (@(posedge clk) (~rst && pass_open_bank_ns));
pass_open_bank_killed_by_maint: cover property (@(posedge clk)
(~rst && accept_req && pass_open_bank_eligible &&
maint_hit_this_bm && ~wait_for_maint_r_lcl));
pass_open_bank_following_maint: cover property (@(posedge clk)
(~rst && accept_req && pass_open_bank_eligible &&
maint_hit_this_bm && wait_for_maint_r_lcl));
`endif
// Should the column command be sent with the auto precharge bit set? This
// will happen when it is detected that next request is to a different row,
// or the next reqest is the next request is refresh to this rank.
reg auto_pre_r_lcl;
reg auto_pre_ns;
input allow_auto_pre;
always @(/*AS*/accept_req or allow_auto_pre or auto_pre_r_lcl
or clear_req or maint_hit_this_bm or rb_hit_busy_r
or row_hit_r or tail_r_lcl or wait_for_maint_r_lcl) begin
auto_pre_ns = auto_pre_r_lcl;
if (clear_req) auto_pre_ns = 1'b0;
else
if (accept_req && tail_r_lcl && allow_auto_pre && rb_hit_busy_r &&
(~row_hit_r || (maint_hit_this_bm && ~wait_for_maint_r_lcl)))
auto_pre_ns = 1'b1;
end
always @(posedge clk) auto_pre_r_lcl <= #TCQ auto_pre_ns;
output wire auto_pre_r;
assign auto_pre_r = auto_pre_r_lcl;
`ifdef MC_SVA
auto_precharge: cover property (@(posedge clk) (~rst && auto_pre_ns));
maint_triggers_auto_precharge: cover property (@(posedge clk)
(~rst && auto_pre_ns && ~auto_pre_r && row_hit_r));
`endif
// Determine when the current request is finished.
input sending_col;
input req_wr_r;
input rd_wr_r;
wire sending_col_not_rmw_rd = sending_col && !(req_wr_r && rd_wr_r);
input bank_wait_in_progress;
input precharge_bm_end;
reg pre_bm_end_r;
wire pre_bm_end_ns = precharge_bm_end ||
(bank_wait_in_progress && pass_open_bank_ns_lcl);
always @(posedge clk) pre_bm_end_r <= #TCQ pre_bm_end_ns;
assign bm_end_lcl =
pre_bm_end_r || (sending_col_not_rmw_rd && pass_open_bank_r_lcl);
output wire bm_end;
assign bm_end = bm_end_lcl;
// Determine that the open bank should be passed to the successor bank machine.
reg pre_passing_open_bank_r;
wire pre_passing_open_bank_ns =
bank_wait_in_progress && pass_open_bank_ns_lcl;
always @(posedge clk) pre_passing_open_bank_r <= #TCQ
pre_passing_open_bank_ns;
output wire passing_open_bank;
assign passing_open_bank =
pre_passing_open_bank_r || (sending_col_not_rmw_rd && pass_open_bank_r_lcl);
reg ordered_ns;
wire set_order_q = ((ORDERING == "STRICT") || ((ORDERING == "NORM") &&
req_wr_r)) && accept_this_bm;
wire ordered_issued_lcl =
sending_col_not_rmw_rd && !(req_wr_r && rd_wr_r) &&
((ORDERING == "STRICT") || ((ORDERING == "NORM") && req_wr_r));
output wire ordered_issued;
assign ordered_issued = ordered_issued_lcl;
reg ordered_r_lcl;
always @(/*AS*/ordered_issued_lcl or ordered_r_lcl or rst
or set_order_q) begin
if (rst) ordered_ns = 1'b0;
else begin
ordered_ns = ordered_r_lcl;
// Should never see accept_this_bm and adv_order_q at the same time.
if (set_order_q) ordered_ns = 1'b1;
if (ordered_issued_lcl) ordered_ns = 1'b0;
end
end
always @(posedge clk) ordered_r_lcl <= #TCQ ordered_ns;
output wire ordered_r;
assign ordered_r = ordered_r_lcl;
// Figure out when to advance the ordering queue.
input adv_order_q;
input [BM_CNT_WIDTH-1:0] order_cnt;
reg [BM_CNT_WIDTH-1:0] order_q_r;
reg [BM_CNT_WIDTH-1:0] order_q_ns;
always @(/*AS*/adv_order_q or order_cnt or order_q_r or rst
or set_order_q) begin
order_q_ns = order_q_r;
if (rst) order_q_ns = BM_CNT_ZERO;
if (set_order_q)
if (adv_order_q) order_q_ns = order_cnt - BM_CNT_ONE;
else order_q_ns = order_cnt;
if (adv_order_q && |order_q_r) order_q_ns = order_q_r - BM_CNT_ONE;
end
always @(posedge clk) order_q_r <= #TCQ order_q_ns;
output wire order_q_zero;
assign order_q_zero = ~|order_q_r ||
(adv_order_q && (order_q_r == BM_CNT_ONE)) ||
((ORDERING == "NORM") && rd_wr_r);
// Keep track of which other bank machine are ahead of this one in a
// rank-bank queue. This is necessary to know when to advance this bank
// machine in the queue, and when to update bank state machine counter upon
// passing a bank.
input [(nBANK_MACHS*2)-1:0] rb_hit_busy_ns_in;
reg [(nBANK_MACHS*2)-1:0] rb_hit_busies_r_lcl = {nBANK_MACHS*2{1'b0}};
input [(nBANK_MACHS*2)-1:0] passing_open_bank_in;
output reg rcv_open_bank = 1'b0;
generate
if (nBANK_MACHS > 1) begin : rb_hit_busies
// The clear_vector resets bits in the rb_hit_busies vector as bank machines
// completes requests. rst also resets all the bits.
wire [nBANK_MACHS-2:0] clear_vector =
({nBANK_MACHS-1{rst}} | bm_end_in[`BM_SHARED_BV]);
// As this bank machine takes on a new request, capture the vector of
// which other bank machines are in the same queue.
wire [`BM_SHARED_BV] rb_hit_busies_ns =
~clear_vector &
(idle_ns_lcl
? rb_hit_busy_ns_in[`BM_SHARED_BV]
: rb_hit_busies_r_lcl[`BM_SHARED_BV]);
always @(posedge clk) rb_hit_busies_r_lcl[`BM_SHARED_BV] <=
#TCQ rb_hit_busies_ns;
// Compute when to advance this queue entry based on seeing other bank machines
// in the same queue finish.
always @(bm_end_in or rb_hit_busies_r_lcl)
adv_queue =
|(bm_end_in[`BM_SHARED_BV] & rb_hit_busies_r_lcl[`BM_SHARED_BV]);
// Decide when to receive an open bank based on knowing this bank machine is
// one entry from the head, and a passing_open_bank hits on the
// rb_hit_busies vector.
always @(idle_r_lcl
or passing_open_bank_in or q_entry_r
or rb_hit_busies_r_lcl) rcv_open_bank =
|(rb_hit_busies_r_lcl[`BM_SHARED_BV] & passing_open_bank_in[`BM_SHARED_BV])
&& (q_entry_r == BM_CNT_ONE) && ~idle_r_lcl;
end
endgenerate
output wire [nBANK_MACHS*2-1:0] rb_hit_busies_r;
assign rb_hit_busies_r = rb_hit_busies_r_lcl;
// Keep track if the queue this entry is in has priority content.
input was_wr;
input maint_req_r;
reg q_has_rd_r;
wire q_has_rd_ns = ~clear_req &&
(q_has_rd_r || (accept_req && rb_hit_busy_r && ~was_wr) ||
(maint_req_r && maint_hit && ~idle_r_lcl));
always @(posedge clk) q_has_rd_r <= #TCQ q_has_rd_ns;
output wire q_has_rd;
assign q_has_rd = q_has_rd_r;
input was_priority;
reg q_has_priority_r;
wire q_has_priority_ns = ~clear_req &&
(q_has_priority_r || (accept_req && rb_hit_busy_r && was_priority));
always @(posedge clk) q_has_priority_r <= #TCQ q_has_priority_ns;
output wire q_has_priority;
assign q_has_priority = q_has_priority_r;
// Figure out if this entry should wait for maintenance to end.
wire wait_for_maint_ns = ~rst && ~maint_idle &&
(wait_for_maint_r_lcl || (maint_hit && accept_this_bm));
always @(posedge clk) wait_for_maint_r_lcl <= #TCQ wait_for_maint_ns;
output wire wait_for_maint_r;
assign wait_for_maint_r = wait_for_maint_r_lcl;
endmodule
|
module mig_7series_v2_3_bank_queue #
(
parameter TCQ = 100,
parameter BM_CNT_WIDTH = 2,
parameter nBANK_MACHS = 4,
parameter ORDERING = "NORM",
parameter ID = 0
)
(/*AUTOARG*/
// Outputs
head_r, tail_r, idle_ns, idle_r, pass_open_bank_ns,
pass_open_bank_r, auto_pre_r, bm_end, passing_open_bank,
ordered_issued, ordered_r, order_q_zero, rcv_open_bank,
rb_hit_busies_r, q_has_rd, q_has_priority, wait_for_maint_r,
// Inputs
clk, rst, accept_internal_r, use_addr, periodic_rd_ack_r, bm_end_in,
idle_cnt, rb_hit_busy_cnt, accept_req, rb_hit_busy_r, maint_idle,
maint_hit, row_hit_r, pre_wait_r, allow_auto_pre, sending_col,
bank_wait_in_progress, precharge_bm_end, req_wr_r, rd_wr_r,
adv_order_q, order_cnt, rb_hit_busy_ns_in, passing_open_bank_in,
was_wr, maint_req_r, was_priority
);
localparam ZERO = 0;
localparam ONE = 1;
localparam [BM_CNT_WIDTH-1:0] BM_CNT_ZERO = ZERO[0+:BM_CNT_WIDTH];
localparam [BM_CNT_WIDTH-1:0] BM_CNT_ONE = ONE[0+:BM_CNT_WIDTH];
input clk;
input rst;
// Decide if this bank machine should accept a new request.
reg idle_r_lcl;
reg head_r_lcl;
input accept_internal_r;
wire bm_ready = idle_r_lcl && head_r_lcl && accept_internal_r;
// Accept request in this bank machine. Could be maintenance or
// regular request.
input use_addr;
input periodic_rd_ack_r;
wire accept_this_bm = bm_ready && (use_addr || periodic_rd_ack_r);
// Multiple machines may enter the idle queue in a single state.
// Based on bank machine instance number, compute how many
// bank machines with lower instance numbers are entering
// the idle queue.
input [(nBANK_MACHS*2)-1:0] bm_end_in;
reg [BM_CNT_WIDTH-1:0] idlers_below;
integer i;
always @(/*AS*/bm_end_in) begin
idlers_below = BM_CNT_ZERO;
for (i=0; i<ID; i=i+1)
idlers_below = idlers_below + bm_end_in[i];
end
reg idlers_above;
always @(/*AS*/bm_end_in) begin
idlers_above = 1'b0;
for (i=ID+1; i<ID+nBANK_MACHS; i=i+1)
idlers_above = idlers_above || bm_end_in[i];
end
`ifdef MC_SVA
bm_end_and_idlers_above: cover property (@(posedge clk)
(~rst && bm_end && idlers_above));
bm_end_and_idlers_below: cover property (@(posedge clk)
(~rst && bm_end && |idlers_below));
`endif
// Compute the q_entry number.
input [BM_CNT_WIDTH-1:0] idle_cnt;
input [BM_CNT_WIDTH-1:0] rb_hit_busy_cnt;
input accept_req;
wire bm_end_lcl;
reg adv_queue = 1'b0;
reg [BM_CNT_WIDTH-1:0] q_entry_r;
reg [BM_CNT_WIDTH-1:0] q_entry_ns;
wire [BM_CNT_WIDTH-1:0] temp;
// always @(/*AS*/accept_req or accept_this_bm or adv_queue
// or bm_end_lcl or idle_cnt or idle_r_lcl or idlers_below
// or q_entry_r or rb_hit_busy_cnt /*or rst*/) begin
//// if (rst) q_entry_ns = ID[BM_CNT_WIDTH-1:0];
//// else begin
// q_entry_ns = q_entry_r;
// if ((~idle_r_lcl && adv_queue) ||
// (idle_r_lcl && accept_req && ~accept_this_bm))
// q_entry_ns = q_entry_r - BM_CNT_ONE;
// if (accept_this_bm)
//// q_entry_ns = rb_hit_busy_cnt - (adv_queue ? BM_CNT_ONE : BM_CNT_ZERO);
// q_entry_ns = adv_queue ? (rb_hit_busy_cnt - BM_CNT_ONE) : (rb_hit_busy_cnt -BM_CNT_ZERO);
// if (bm_end_lcl) begin
// q_entry_ns = idle_cnt + idlers_below;
// if (accept_req) q_entry_ns = q_entry_ns - BM_CNT_ONE;
//// end
// end
// end
assign temp = idle_cnt + idlers_below;
always @ (*)
begin
if (accept_req & bm_end_lcl)
q_entry_ns = temp - BM_CNT_ONE;
else if (bm_end_lcl)
q_entry_ns = temp;
else if (accept_this_bm)
q_entry_ns = adv_queue ? (rb_hit_busy_cnt - BM_CNT_ONE) : (rb_hit_busy_cnt -BM_CNT_ZERO);
else if ((!idle_r_lcl & adv_queue) |
(idle_r_lcl & accept_req & !accept_this_bm))
q_entry_ns = q_entry_r - BM_CNT_ONE;
else
q_entry_ns = q_entry_r;
end
always @(posedge clk)
if (rst)
q_entry_r <= #TCQ ID[BM_CNT_WIDTH-1:0];
else
q_entry_r <= #TCQ q_entry_ns;
// Determine if this entry is the head of its queue.
reg head_ns;
always @(/*AS*/accept_req or accept_this_bm or adv_queue
or bm_end_lcl or head_r_lcl or idle_cnt or idle_r_lcl
or idlers_below or q_entry_r or rb_hit_busy_cnt or rst) begin
if (rst) head_ns = ~|ID[BM_CNT_WIDTH-1:0];
else begin
head_ns = head_r_lcl;
if (accept_this_bm)
head_ns = ~|(rb_hit_busy_cnt - (adv_queue ? BM_CNT_ONE : BM_CNT_ZERO));
if ((~idle_r_lcl && adv_queue) ||
(idle_r_lcl && accept_req && ~accept_this_bm))
head_ns = ~|(q_entry_r - BM_CNT_ONE);
if (bm_end_lcl) begin
head_ns = ~|(idle_cnt - (accept_req ? BM_CNT_ONE : BM_CNT_ZERO)) &&
~|idlers_below;
end
end
end
always @(posedge clk) head_r_lcl <= #TCQ head_ns;
output wire head_r;
assign head_r = head_r_lcl;
// Determine if this entry is the tail of its queue. Note that
// an entry can be both head and tail.
input rb_hit_busy_r;
reg tail_r_lcl = 1'b1;
generate
if (nBANK_MACHS > 1) begin : compute_tail
reg tail_ns;
always @(accept_req or accept_this_bm
or bm_end_in or bm_end_lcl or idle_r_lcl
or idlers_above or rb_hit_busy_r or rst or tail_r_lcl) begin
if (rst) tail_ns = (ID == nBANK_MACHS);
// The order of the statements below is important in the case where
// another bank machine is retiring and this bank machine is accepting.
else begin
tail_ns = tail_r_lcl;
if ((accept_req && rb_hit_busy_r) ||
(|bm_end_in[`BM_SHARED_BV] && idle_r_lcl))
tail_ns = 1'b0;
if (accept_this_bm || (bm_end_lcl && ~idlers_above)) tail_ns = 1'b1;
end
end
always @(posedge clk) tail_r_lcl <= #TCQ tail_ns;
end // if (nBANK_MACHS > 1)
endgenerate
output wire tail_r;
assign tail_r = tail_r_lcl;
wire clear_req = bm_end_lcl || rst;
// Is this entry in the idle queue?
reg idle_ns_lcl;
always @(/*AS*/accept_this_bm or clear_req or idle_r_lcl) begin
idle_ns_lcl = idle_r_lcl;
if (accept_this_bm) idle_ns_lcl = 1'b0;
if (clear_req) idle_ns_lcl = 1'b1;
end
always @(posedge clk) idle_r_lcl <= #TCQ idle_ns_lcl;
output wire idle_ns;
assign idle_ns = idle_ns_lcl;
output wire idle_r;
assign idle_r = idle_r_lcl;
// Maintenance hitting on this active bank machine is in progress.
input maint_idle;
input maint_hit;
wire maint_hit_this_bm = ~maint_idle && maint_hit;
// Does new request hit on this bank machine while it is able to pass the
// open bank?
input row_hit_r;
input pre_wait_r;
wire pass_open_bank_eligible =
tail_r_lcl && rb_hit_busy_r && row_hit_r && ~pre_wait_r;
// Set pass open bank bit, but not if request preceded active maintenance.
reg wait_for_maint_r_lcl;
reg pass_open_bank_r_lcl;
wire pass_open_bank_ns_lcl = ~clear_req &&
(pass_open_bank_r_lcl ||
(accept_req && pass_open_bank_eligible &&
(~maint_hit_this_bm || wait_for_maint_r_lcl)));
always @(posedge clk) pass_open_bank_r_lcl <= #TCQ pass_open_bank_ns_lcl;
output wire pass_open_bank_ns;
assign pass_open_bank_ns = pass_open_bank_ns_lcl;
output wire pass_open_bank_r;
assign pass_open_bank_r = pass_open_bank_r_lcl;
`ifdef MC_SVA
pass_open_bank: cover property (@(posedge clk) (~rst && pass_open_bank_ns));
pass_open_bank_killed_by_maint: cover property (@(posedge clk)
(~rst && accept_req && pass_open_bank_eligible &&
maint_hit_this_bm && ~wait_for_maint_r_lcl));
pass_open_bank_following_maint: cover property (@(posedge clk)
(~rst && accept_req && pass_open_bank_eligible &&
maint_hit_this_bm && wait_for_maint_r_lcl));
`endif
// Should the column command be sent with the auto precharge bit set? This
// will happen when it is detected that next request is to a different row,
// or the next reqest is the next request is refresh to this rank.
reg auto_pre_r_lcl;
reg auto_pre_ns;
input allow_auto_pre;
always @(/*AS*/accept_req or allow_auto_pre or auto_pre_r_lcl
or clear_req or maint_hit_this_bm or rb_hit_busy_r
or row_hit_r or tail_r_lcl or wait_for_maint_r_lcl) begin
auto_pre_ns = auto_pre_r_lcl;
if (clear_req) auto_pre_ns = 1'b0;
else
if (accept_req && tail_r_lcl && allow_auto_pre && rb_hit_busy_r &&
(~row_hit_r || (maint_hit_this_bm && ~wait_for_maint_r_lcl)))
auto_pre_ns = 1'b1;
end
always @(posedge clk) auto_pre_r_lcl <= #TCQ auto_pre_ns;
output wire auto_pre_r;
assign auto_pre_r = auto_pre_r_lcl;
`ifdef MC_SVA
auto_precharge: cover property (@(posedge clk) (~rst && auto_pre_ns));
maint_triggers_auto_precharge: cover property (@(posedge clk)
(~rst && auto_pre_ns && ~auto_pre_r && row_hit_r));
`endif
// Determine when the current request is finished.
input sending_col;
input req_wr_r;
input rd_wr_r;
wire sending_col_not_rmw_rd = sending_col && !(req_wr_r && rd_wr_r);
input bank_wait_in_progress;
input precharge_bm_end;
reg pre_bm_end_r;
wire pre_bm_end_ns = precharge_bm_end ||
(bank_wait_in_progress && pass_open_bank_ns_lcl);
always @(posedge clk) pre_bm_end_r <= #TCQ pre_bm_end_ns;
assign bm_end_lcl =
pre_bm_end_r || (sending_col_not_rmw_rd && pass_open_bank_r_lcl);
output wire bm_end;
assign bm_end = bm_end_lcl;
// Determine that the open bank should be passed to the successor bank machine.
reg pre_passing_open_bank_r;
wire pre_passing_open_bank_ns =
bank_wait_in_progress && pass_open_bank_ns_lcl;
always @(posedge clk) pre_passing_open_bank_r <= #TCQ
pre_passing_open_bank_ns;
output wire passing_open_bank;
assign passing_open_bank =
pre_passing_open_bank_r || (sending_col_not_rmw_rd && pass_open_bank_r_lcl);
reg ordered_ns;
wire set_order_q = ((ORDERING == "STRICT") || ((ORDERING == "NORM") &&
req_wr_r)) && accept_this_bm;
wire ordered_issued_lcl =
sending_col_not_rmw_rd && !(req_wr_r && rd_wr_r) &&
((ORDERING == "STRICT") || ((ORDERING == "NORM") && req_wr_r));
output wire ordered_issued;
assign ordered_issued = ordered_issued_lcl;
reg ordered_r_lcl;
always @(/*AS*/ordered_issued_lcl or ordered_r_lcl or rst
or set_order_q) begin
if (rst) ordered_ns = 1'b0;
else begin
ordered_ns = ordered_r_lcl;
// Should never see accept_this_bm and adv_order_q at the same time.
if (set_order_q) ordered_ns = 1'b1;
if (ordered_issued_lcl) ordered_ns = 1'b0;
end
end
always @(posedge clk) ordered_r_lcl <= #TCQ ordered_ns;
output wire ordered_r;
assign ordered_r = ordered_r_lcl;
// Figure out when to advance the ordering queue.
input adv_order_q;
input [BM_CNT_WIDTH-1:0] order_cnt;
reg [BM_CNT_WIDTH-1:0] order_q_r;
reg [BM_CNT_WIDTH-1:0] order_q_ns;
always @(/*AS*/adv_order_q or order_cnt or order_q_r or rst
or set_order_q) begin
order_q_ns = order_q_r;
if (rst) order_q_ns = BM_CNT_ZERO;
if (set_order_q)
if (adv_order_q) order_q_ns = order_cnt - BM_CNT_ONE;
else order_q_ns = order_cnt;
if (adv_order_q && |order_q_r) order_q_ns = order_q_r - BM_CNT_ONE;
end
always @(posedge clk) order_q_r <= #TCQ order_q_ns;
output wire order_q_zero;
assign order_q_zero = ~|order_q_r ||
(adv_order_q && (order_q_r == BM_CNT_ONE)) ||
((ORDERING == "NORM") && rd_wr_r);
// Keep track of which other bank machine are ahead of this one in a
// rank-bank queue. This is necessary to know when to advance this bank
// machine in the queue, and when to update bank state machine counter upon
// passing a bank.
input [(nBANK_MACHS*2)-1:0] rb_hit_busy_ns_in;
reg [(nBANK_MACHS*2)-1:0] rb_hit_busies_r_lcl = {nBANK_MACHS*2{1'b0}};
input [(nBANK_MACHS*2)-1:0] passing_open_bank_in;
output reg rcv_open_bank = 1'b0;
generate
if (nBANK_MACHS > 1) begin : rb_hit_busies
// The clear_vector resets bits in the rb_hit_busies vector as bank machines
// completes requests. rst also resets all the bits.
wire [nBANK_MACHS-2:0] clear_vector =
({nBANK_MACHS-1{rst}} | bm_end_in[`BM_SHARED_BV]);
// As this bank machine takes on a new request, capture the vector of
// which other bank machines are in the same queue.
wire [`BM_SHARED_BV] rb_hit_busies_ns =
~clear_vector &
(idle_ns_lcl
? rb_hit_busy_ns_in[`BM_SHARED_BV]
: rb_hit_busies_r_lcl[`BM_SHARED_BV]);
always @(posedge clk) rb_hit_busies_r_lcl[`BM_SHARED_BV] <=
#TCQ rb_hit_busies_ns;
// Compute when to advance this queue entry based on seeing other bank machines
// in the same queue finish.
always @(bm_end_in or rb_hit_busies_r_lcl)
adv_queue =
|(bm_end_in[`BM_SHARED_BV] & rb_hit_busies_r_lcl[`BM_SHARED_BV]);
// Decide when to receive an open bank based on knowing this bank machine is
// one entry from the head, and a passing_open_bank hits on the
// rb_hit_busies vector.
always @(idle_r_lcl
or passing_open_bank_in or q_entry_r
or rb_hit_busies_r_lcl) rcv_open_bank =
|(rb_hit_busies_r_lcl[`BM_SHARED_BV] & passing_open_bank_in[`BM_SHARED_BV])
&& (q_entry_r == BM_CNT_ONE) && ~idle_r_lcl;
end
endgenerate
output wire [nBANK_MACHS*2-1:0] rb_hit_busies_r;
assign rb_hit_busies_r = rb_hit_busies_r_lcl;
// Keep track if the queue this entry is in has priority content.
input was_wr;
input maint_req_r;
reg q_has_rd_r;
wire q_has_rd_ns = ~clear_req &&
(q_has_rd_r || (accept_req && rb_hit_busy_r && ~was_wr) ||
(maint_req_r && maint_hit && ~idle_r_lcl));
always @(posedge clk) q_has_rd_r <= #TCQ q_has_rd_ns;
output wire q_has_rd;
assign q_has_rd = q_has_rd_r;
input was_priority;
reg q_has_priority_r;
wire q_has_priority_ns = ~clear_req &&
(q_has_priority_r || (accept_req && rb_hit_busy_r && was_priority));
always @(posedge clk) q_has_priority_r <= #TCQ q_has_priority_ns;
output wire q_has_priority;
assign q_has_priority = q_has_priority_r;
// Figure out if this entry should wait for maintenance to end.
wire wait_for_maint_ns = ~rst && ~maint_idle &&
(wait_for_maint_r_lcl || (maint_hit && accept_this_bm));
always @(posedge clk) wait_for_maint_r_lcl <= #TCQ wait_for_maint_ns;
output wire wait_for_maint_r;
assign wait_for_maint_r = wait_for_maint_r_lcl;
endmodule
|
module, since this is a DQ/DQS bus-level requirement,
// not a per-rank requirement.
localparam CASRD2CASWR = CL + (BURST_MODE == "4" ? 2 : 4) + DQRD2DQWR_DLY - CWL;
localparam CASRD2CASWR_CLKS = (nCK_PER_CLK == 1)
? CASRD2CASWR :
(nCK_PER_CLK == 2)
? ((CASRD2CASWR / 2) + (CASRD2CASWR % 2)) :
((CASRD2CASWR / 4) + ((CASRD2CASWR % 4) ? 1 :0));
localparam RTW_CNT_WIDTH = clogb2(CASRD2CASWR_CLKS);
generate
begin : rtw_timer
reg read_this_rank;
always @(/*AS*/sending_col or rd_this_rank_r) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_r;
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_ns;
always @(/*AS*/rst or col_rd_wr or sending_col or rtw_cnt_r)
if (rst) rtw_cnt_ns = {RTW_CNT_WIDTH{1'b0}};
else begin
rtw_cnt_ns = rtw_cnt_r;
if (col_rd_wr && |sending_col) rtw_cnt_ns =
CASRD2CASWR_CLKS[RTW_CNT_WIDTH-1:0] - ONE[RTW_CNT_WIDTH-1:0];
else if (|rtw_cnt_r) rtw_cnt_ns = rtw_cnt_r - ONE[RTW_CNT_WIDTH-1:0];
end
wire inhbt_wr_ns = |rtw_cnt_ns;
always @(posedge clk) rtw_cnt_r <= #TCQ rtw_cnt_ns;
always @(inhbt_wr_ns) inhbt_wr = inhbt_wr_ns;
end
endgenerate
// Refresh request generation. Implement a "refresh bank". Referred
// to as pullin-in refresh in the JEDEC spec.
// The refresh_rank_r counter increments when a refresh to this
// rank has been decoded. In the up direction, the count saturates
// at nREFRESH_BANK. As specified in the JEDEC spec, nREFRESH_BANK
// is normally eight. The counter decrements with each refresh_tick,
// saturating at zero. A refresh will be requests when the rank is
// not busy and refresh_rank_r != nREFRESH_BANK, or refresh_rank_r
// equals zero.
localparam REFRESH_BANK_WIDTH = clogb2(nREFRESH_BANK + 1);
generate begin : refresh_generation
reg my_rank_busy;
always @(/*AS*/rank_busy_r) begin
my_rank_busy = 1'b0;
for (i=0; i < nBANK_MACHS; i=i+1)
my_rank_busy = my_rank_busy || rank_busy_r[(i*RANKS)+ID];
end
wire my_refresh =
insert_maint_r1 && ~maint_zq_r && ~maint_sre_r && ~maint_srx_r &&
(maint_rank_r == ID[RANK_WIDTH-1:0]);
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_r;
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_ns;
always @(/*AS*/app_ref_req or init_calib_complete or my_refresh
or refresh_bank_r or refresh_tick)
if (~init_calib_complete)
if (REFRESH_TIMER_DIV == 0)
refresh_bank_ns = nREFRESH_BANK[0+:REFRESH_BANK_WIDTH];
else refresh_bank_ns = {REFRESH_BANK_WIDTH{1'b0}};
else
case ({my_refresh, refresh_tick, app_ref_req})
3'b000, 3'b110, 3'b101, 3'b111 : refresh_bank_ns = refresh_bank_r;
3'b010, 3'b001, 3'b011 : refresh_bank_ns =
(|refresh_bank_r)?
refresh_bank_r - ONE[0+:REFRESH_BANK_WIDTH]:
refresh_bank_r;
3'b100 : refresh_bank_ns =
refresh_bank_r + ONE[0+:REFRESH_BANK_WIDTH];
endcase // case ({my_refresh, refresh_tick})
always @(posedge clk) refresh_bank_r <= #TCQ refresh_bank_ns;
`ifdef MC_SVA
refresh_bank_overflow: assert property (@(posedge clk)
(rst || (refresh_bank_r <= nREFRESH_BANK)));
refresh_bank_underflow: assert property (@(posedge clk)
(rst || ~(~|refresh_bank_r && ~my_refresh && refresh_tick)));
refresh_hi_priority: cover property (@(posedge clk)
(rst && ~|refresh_bank_ns && (refresh_bank_r ==
ONE[0+:REFRESH_BANK_WIDTH])));
refresh_bank_full: cover property (@(posedge clk)
(rst && (refresh_bank_r ==
nREFRESH_BANK[0+:REFRESH_BANK_WIDTH])));
`endif
assign refresh_request = init_calib_complete &&
(~|refresh_bank_r ||
((refresh_bank_r != nREFRESH_BANK[0+:REFRESH_BANK_WIDTH]) && ~my_rank_busy));
end
endgenerate
// Periodic read request generation.
localparam PERIODIC_RD_TIMER_WIDTH = clogb2(PERIODIC_RD_TIMER_DIV + /*idle state*/ 1);
generate begin : periodic_rd_generation
if ( PERIODIC_RD_TIMER_DIV != 0 ) begin // enable periodic reads
reg read_this_rank;
always @(/*AS*/rd_this_rank_r or sending_col) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg read_this_rank_r;
reg read_this_rank_r1;
always @(posedge clk) read_this_rank_r <= #TCQ read_this_rank;
always @(posedge clk) read_this_rank_r1 <= #TCQ read_this_rank_r;
wire int_read_this_rank = read_this_rank &&
(((nCK_PER_CLK == 4) && read_this_rank_r) ||
((nCK_PER_CLK != 4) && read_this_rank_r1));
reg periodic_rd_cntr1_ns;
reg periodic_rd_cntr1_r;
always @(/*AS*/clear_periodic_rd_request or periodic_rd_cntr1_r) begin
periodic_rd_cntr1_ns = periodic_rd_cntr1_r;
if (clear_periodic_rd_request)
periodic_rd_cntr1_ns = periodic_rd_cntr1_r + 1'b1;
end
always @(posedge clk) begin
if (rst) periodic_rd_cntr1_r <= #TCQ 1'b0;
else periodic_rd_cntr1_r <= #TCQ periodic_rd_cntr1_ns;
end
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_r;
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_ns;
always @(/*AS*/init_calib_complete or maint_prescaler_tick_r
or periodic_rd_timer_r or int_read_this_rank) begin
periodic_rd_timer_ns = periodic_rd_timer_r;
if (~init_calib_complete)
periodic_rd_timer_ns = {PERIODIC_RD_TIMER_WIDTH{1'b0}};
else if (int_read_this_rank)
periodic_rd_timer_ns =
PERIODIC_RD_TIMER_DIV[0+:PERIODIC_RD_TIMER_WIDTH];
else if (|periodic_rd_timer_r && maint_prescaler_tick_r)
periodic_rd_timer_ns =
periodic_rd_timer_r - ONE[0+:PERIODIC_RD_TIMER_WIDTH];
end
always @(posedge clk) periodic_rd_timer_r <= #TCQ periodic_rd_timer_ns;
wire periodic_rd_timer_one = maint_prescaler_tick_r &&
(periodic_rd_timer_r == ONE[0+:PERIODIC_RD_TIMER_WIDTH]);
reg periodic_rd_request_r;
wire periodic_rd_request_ns = ~rst &&
((app_periodic_rd_req && init_calib_complete) ||
((PERIODIC_RD_TIMER_DIV != 0) && ~init_calib_complete) ||
// (~(read_this_rank || clear_periodic_rd_request) &&
(~((int_read_this_rank) || (clear_periodic_rd_request && periodic_rd_cntr1_r)) &&
(periodic_rd_request_r || periodic_rd_timer_one)));
always @(posedge clk) periodic_rd_request_r <=
#TCQ periodic_rd_request_ns;
`ifdef MC_SVA
read_clears_periodic_rd_request: cover property (@(posedge clk)
(rst && (periodic_rd_request_r && read_this_rank)));
`endif
assign periodic_rd_request = init_calib_complete && periodic_rd_request_r;
end else
assign periodic_rd_request = 1'b0; //to disable periodic reads
end
endgenerate
endmodule
|
module, since this is a DQ/DQS bus-level requirement,
// not a per-rank requirement.
localparam CASRD2CASWR = CL + (BURST_MODE == "4" ? 2 : 4) + DQRD2DQWR_DLY - CWL;
localparam CASRD2CASWR_CLKS = (nCK_PER_CLK == 1)
? CASRD2CASWR :
(nCK_PER_CLK == 2)
? ((CASRD2CASWR / 2) + (CASRD2CASWR % 2)) :
((CASRD2CASWR / 4) + ((CASRD2CASWR % 4) ? 1 :0));
localparam RTW_CNT_WIDTH = clogb2(CASRD2CASWR_CLKS);
generate
begin : rtw_timer
reg read_this_rank;
always @(/*AS*/sending_col or rd_this_rank_r) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_r;
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_ns;
always @(/*AS*/rst or col_rd_wr or sending_col or rtw_cnt_r)
if (rst) rtw_cnt_ns = {RTW_CNT_WIDTH{1'b0}};
else begin
rtw_cnt_ns = rtw_cnt_r;
if (col_rd_wr && |sending_col) rtw_cnt_ns =
CASRD2CASWR_CLKS[RTW_CNT_WIDTH-1:0] - ONE[RTW_CNT_WIDTH-1:0];
else if (|rtw_cnt_r) rtw_cnt_ns = rtw_cnt_r - ONE[RTW_CNT_WIDTH-1:0];
end
wire inhbt_wr_ns = |rtw_cnt_ns;
always @(posedge clk) rtw_cnt_r <= #TCQ rtw_cnt_ns;
always @(inhbt_wr_ns) inhbt_wr = inhbt_wr_ns;
end
endgenerate
// Refresh request generation. Implement a "refresh bank". Referred
// to as pullin-in refresh in the JEDEC spec.
// The refresh_rank_r counter increments when a refresh to this
// rank has been decoded. In the up direction, the count saturates
// at nREFRESH_BANK. As specified in the JEDEC spec, nREFRESH_BANK
// is normally eight. The counter decrements with each refresh_tick,
// saturating at zero. A refresh will be requests when the rank is
// not busy and refresh_rank_r != nREFRESH_BANK, or refresh_rank_r
// equals zero.
localparam REFRESH_BANK_WIDTH = clogb2(nREFRESH_BANK + 1);
generate begin : refresh_generation
reg my_rank_busy;
always @(/*AS*/rank_busy_r) begin
my_rank_busy = 1'b0;
for (i=0; i < nBANK_MACHS; i=i+1)
my_rank_busy = my_rank_busy || rank_busy_r[(i*RANKS)+ID];
end
wire my_refresh =
insert_maint_r1 && ~maint_zq_r && ~maint_sre_r && ~maint_srx_r &&
(maint_rank_r == ID[RANK_WIDTH-1:0]);
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_r;
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_ns;
always @(/*AS*/app_ref_req or init_calib_complete or my_refresh
or refresh_bank_r or refresh_tick)
if (~init_calib_complete)
if (REFRESH_TIMER_DIV == 0)
refresh_bank_ns = nREFRESH_BANK[0+:REFRESH_BANK_WIDTH];
else refresh_bank_ns = {REFRESH_BANK_WIDTH{1'b0}};
else
case ({my_refresh, refresh_tick, app_ref_req})
3'b000, 3'b110, 3'b101, 3'b111 : refresh_bank_ns = refresh_bank_r;
3'b010, 3'b001, 3'b011 : refresh_bank_ns =
(|refresh_bank_r)?
refresh_bank_r - ONE[0+:REFRESH_BANK_WIDTH]:
refresh_bank_r;
3'b100 : refresh_bank_ns =
refresh_bank_r + ONE[0+:REFRESH_BANK_WIDTH];
endcase // case ({my_refresh, refresh_tick})
always @(posedge clk) refresh_bank_r <= #TCQ refresh_bank_ns;
`ifdef MC_SVA
refresh_bank_overflow: assert property (@(posedge clk)
(rst || (refresh_bank_r <= nREFRESH_BANK)));
refresh_bank_underflow: assert property (@(posedge clk)
(rst || ~(~|refresh_bank_r && ~my_refresh && refresh_tick)));
refresh_hi_priority: cover property (@(posedge clk)
(rst && ~|refresh_bank_ns && (refresh_bank_r ==
ONE[0+:REFRESH_BANK_WIDTH])));
refresh_bank_full: cover property (@(posedge clk)
(rst && (refresh_bank_r ==
nREFRESH_BANK[0+:REFRESH_BANK_WIDTH])));
`endif
assign refresh_request = init_calib_complete &&
(~|refresh_bank_r ||
((refresh_bank_r != nREFRESH_BANK[0+:REFRESH_BANK_WIDTH]) && ~my_rank_busy));
end
endgenerate
// Periodic read request generation.
localparam PERIODIC_RD_TIMER_WIDTH = clogb2(PERIODIC_RD_TIMER_DIV + /*idle state*/ 1);
generate begin : periodic_rd_generation
if ( PERIODIC_RD_TIMER_DIV != 0 ) begin // enable periodic reads
reg read_this_rank;
always @(/*AS*/rd_this_rank_r or sending_col) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg read_this_rank_r;
reg read_this_rank_r1;
always @(posedge clk) read_this_rank_r <= #TCQ read_this_rank;
always @(posedge clk) read_this_rank_r1 <= #TCQ read_this_rank_r;
wire int_read_this_rank = read_this_rank &&
(((nCK_PER_CLK == 4) && read_this_rank_r) ||
((nCK_PER_CLK != 4) && read_this_rank_r1));
reg periodic_rd_cntr1_ns;
reg periodic_rd_cntr1_r;
always @(/*AS*/clear_periodic_rd_request or periodic_rd_cntr1_r) begin
periodic_rd_cntr1_ns = periodic_rd_cntr1_r;
if (clear_periodic_rd_request)
periodic_rd_cntr1_ns = periodic_rd_cntr1_r + 1'b1;
end
always @(posedge clk) begin
if (rst) periodic_rd_cntr1_r <= #TCQ 1'b0;
else periodic_rd_cntr1_r <= #TCQ periodic_rd_cntr1_ns;
end
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_r;
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_ns;
always @(/*AS*/init_calib_complete or maint_prescaler_tick_r
or periodic_rd_timer_r or int_read_this_rank) begin
periodic_rd_timer_ns = periodic_rd_timer_r;
if (~init_calib_complete)
periodic_rd_timer_ns = {PERIODIC_RD_TIMER_WIDTH{1'b0}};
else if (int_read_this_rank)
periodic_rd_timer_ns =
PERIODIC_RD_TIMER_DIV[0+:PERIODIC_RD_TIMER_WIDTH];
else if (|periodic_rd_timer_r && maint_prescaler_tick_r)
periodic_rd_timer_ns =
periodic_rd_timer_r - ONE[0+:PERIODIC_RD_TIMER_WIDTH];
end
always @(posedge clk) periodic_rd_timer_r <= #TCQ periodic_rd_timer_ns;
wire periodic_rd_timer_one = maint_prescaler_tick_r &&
(periodic_rd_timer_r == ONE[0+:PERIODIC_RD_TIMER_WIDTH]);
reg periodic_rd_request_r;
wire periodic_rd_request_ns = ~rst &&
((app_periodic_rd_req && init_calib_complete) ||
((PERIODIC_RD_TIMER_DIV != 0) && ~init_calib_complete) ||
// (~(read_this_rank || clear_periodic_rd_request) &&
(~((int_read_this_rank) || (clear_periodic_rd_request && periodic_rd_cntr1_r)) &&
(periodic_rd_request_r || periodic_rd_timer_one)));
always @(posedge clk) periodic_rd_request_r <=
#TCQ periodic_rd_request_ns;
`ifdef MC_SVA
read_clears_periodic_rd_request: cover property (@(posedge clk)
(rst && (periodic_rd_request_r && read_this_rank)));
`endif
assign periodic_rd_request = init_calib_complete && periodic_rd_request_r;
end else
assign periodic_rd_request = 1'b0; //to disable periodic reads
end
endgenerate
endmodule
|
module, since this is a DQ/DQS bus-level requirement,
// not a per-rank requirement.
localparam CASRD2CASWR = CL + (BURST_MODE == "4" ? 2 : 4) + DQRD2DQWR_DLY - CWL;
localparam CASRD2CASWR_CLKS = (nCK_PER_CLK == 1)
? CASRD2CASWR :
(nCK_PER_CLK == 2)
? ((CASRD2CASWR / 2) + (CASRD2CASWR % 2)) :
((CASRD2CASWR / 4) + ((CASRD2CASWR % 4) ? 1 :0));
localparam RTW_CNT_WIDTH = clogb2(CASRD2CASWR_CLKS);
generate
begin : rtw_timer
reg read_this_rank;
always @(/*AS*/sending_col or rd_this_rank_r) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_r;
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_ns;
always @(/*AS*/rst or col_rd_wr or sending_col or rtw_cnt_r)
if (rst) rtw_cnt_ns = {RTW_CNT_WIDTH{1'b0}};
else begin
rtw_cnt_ns = rtw_cnt_r;
if (col_rd_wr && |sending_col) rtw_cnt_ns =
CASRD2CASWR_CLKS[RTW_CNT_WIDTH-1:0] - ONE[RTW_CNT_WIDTH-1:0];
else if (|rtw_cnt_r) rtw_cnt_ns = rtw_cnt_r - ONE[RTW_CNT_WIDTH-1:0];
end
wire inhbt_wr_ns = |rtw_cnt_ns;
always @(posedge clk) rtw_cnt_r <= #TCQ rtw_cnt_ns;
always @(inhbt_wr_ns) inhbt_wr = inhbt_wr_ns;
end
endgenerate
// Refresh request generation. Implement a "refresh bank". Referred
// to as pullin-in refresh in the JEDEC spec.
// The refresh_rank_r counter increments when a refresh to this
// rank has been decoded. In the up direction, the count saturates
// at nREFRESH_BANK. As specified in the JEDEC spec, nREFRESH_BANK
// is normally eight. The counter decrements with each refresh_tick,
// saturating at zero. A refresh will be requests when the rank is
// not busy and refresh_rank_r != nREFRESH_BANK, or refresh_rank_r
// equals zero.
localparam REFRESH_BANK_WIDTH = clogb2(nREFRESH_BANK + 1);
generate begin : refresh_generation
reg my_rank_busy;
always @(/*AS*/rank_busy_r) begin
my_rank_busy = 1'b0;
for (i=0; i < nBANK_MACHS; i=i+1)
my_rank_busy = my_rank_busy || rank_busy_r[(i*RANKS)+ID];
end
wire my_refresh =
insert_maint_r1 && ~maint_zq_r && ~maint_sre_r && ~maint_srx_r &&
(maint_rank_r == ID[RANK_WIDTH-1:0]);
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_r;
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_ns;
always @(/*AS*/app_ref_req or init_calib_complete or my_refresh
or refresh_bank_r or refresh_tick)
if (~init_calib_complete)
if (REFRESH_TIMER_DIV == 0)
refresh_bank_ns = nREFRESH_BANK[0+:REFRESH_BANK_WIDTH];
else refresh_bank_ns = {REFRESH_BANK_WIDTH{1'b0}};
else
case ({my_refresh, refresh_tick, app_ref_req})
3'b000, 3'b110, 3'b101, 3'b111 : refresh_bank_ns = refresh_bank_r;
3'b010, 3'b001, 3'b011 : refresh_bank_ns =
(|refresh_bank_r)?
refresh_bank_r - ONE[0+:REFRESH_BANK_WIDTH]:
refresh_bank_r;
3'b100 : refresh_bank_ns =
refresh_bank_r + ONE[0+:REFRESH_BANK_WIDTH];
endcase // case ({my_refresh, refresh_tick})
always @(posedge clk) refresh_bank_r <= #TCQ refresh_bank_ns;
`ifdef MC_SVA
refresh_bank_overflow: assert property (@(posedge clk)
(rst || (refresh_bank_r <= nREFRESH_BANK)));
refresh_bank_underflow: assert property (@(posedge clk)
(rst || ~(~|refresh_bank_r && ~my_refresh && refresh_tick)));
refresh_hi_priority: cover property (@(posedge clk)
(rst && ~|refresh_bank_ns && (refresh_bank_r ==
ONE[0+:REFRESH_BANK_WIDTH])));
refresh_bank_full: cover property (@(posedge clk)
(rst && (refresh_bank_r ==
nREFRESH_BANK[0+:REFRESH_BANK_WIDTH])));
`endif
assign refresh_request = init_calib_complete &&
(~|refresh_bank_r ||
((refresh_bank_r != nREFRESH_BANK[0+:REFRESH_BANK_WIDTH]) && ~my_rank_busy));
end
endgenerate
// Periodic read request generation.
localparam PERIODIC_RD_TIMER_WIDTH = clogb2(PERIODIC_RD_TIMER_DIV + /*idle state*/ 1);
generate begin : periodic_rd_generation
if ( PERIODIC_RD_TIMER_DIV != 0 ) begin // enable periodic reads
reg read_this_rank;
always @(/*AS*/rd_this_rank_r or sending_col) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg read_this_rank_r;
reg read_this_rank_r1;
always @(posedge clk) read_this_rank_r <= #TCQ read_this_rank;
always @(posedge clk) read_this_rank_r1 <= #TCQ read_this_rank_r;
wire int_read_this_rank = read_this_rank &&
(((nCK_PER_CLK == 4) && read_this_rank_r) ||
((nCK_PER_CLK != 4) && read_this_rank_r1));
reg periodic_rd_cntr1_ns;
reg periodic_rd_cntr1_r;
always @(/*AS*/clear_periodic_rd_request or periodic_rd_cntr1_r) begin
periodic_rd_cntr1_ns = periodic_rd_cntr1_r;
if (clear_periodic_rd_request)
periodic_rd_cntr1_ns = periodic_rd_cntr1_r + 1'b1;
end
always @(posedge clk) begin
if (rst) periodic_rd_cntr1_r <= #TCQ 1'b0;
else periodic_rd_cntr1_r <= #TCQ periodic_rd_cntr1_ns;
end
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_r;
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_ns;
always @(/*AS*/init_calib_complete or maint_prescaler_tick_r
or periodic_rd_timer_r or int_read_this_rank) begin
periodic_rd_timer_ns = periodic_rd_timer_r;
if (~init_calib_complete)
periodic_rd_timer_ns = {PERIODIC_RD_TIMER_WIDTH{1'b0}};
else if (int_read_this_rank)
periodic_rd_timer_ns =
PERIODIC_RD_TIMER_DIV[0+:PERIODIC_RD_TIMER_WIDTH];
else if (|periodic_rd_timer_r && maint_prescaler_tick_r)
periodic_rd_timer_ns =
periodic_rd_timer_r - ONE[0+:PERIODIC_RD_TIMER_WIDTH];
end
always @(posedge clk) periodic_rd_timer_r <= #TCQ periodic_rd_timer_ns;
wire periodic_rd_timer_one = maint_prescaler_tick_r &&
(periodic_rd_timer_r == ONE[0+:PERIODIC_RD_TIMER_WIDTH]);
reg periodic_rd_request_r;
wire periodic_rd_request_ns = ~rst &&
((app_periodic_rd_req && init_calib_complete) ||
((PERIODIC_RD_TIMER_DIV != 0) && ~init_calib_complete) ||
// (~(read_this_rank || clear_periodic_rd_request) &&
(~((int_read_this_rank) || (clear_periodic_rd_request && periodic_rd_cntr1_r)) &&
(periodic_rd_request_r || periodic_rd_timer_one)));
always @(posedge clk) periodic_rd_request_r <=
#TCQ periodic_rd_request_ns;
`ifdef MC_SVA
read_clears_periodic_rd_request: cover property (@(posedge clk)
(rst && (periodic_rd_request_r && read_this_rank)));
`endif
assign periodic_rd_request = init_calib_complete && periodic_rd_request_r;
end else
assign periodic_rd_request = 1'b0; //to disable periodic reads
end
endgenerate
endmodule
|
module, since this is a DQ/DQS bus-level requirement,
// not a per-rank requirement.
localparam CASRD2CASWR = CL + (BURST_MODE == "4" ? 2 : 4) + DQRD2DQWR_DLY - CWL;
localparam CASRD2CASWR_CLKS = (nCK_PER_CLK == 1)
? CASRD2CASWR :
(nCK_PER_CLK == 2)
? ((CASRD2CASWR / 2) + (CASRD2CASWR % 2)) :
((CASRD2CASWR / 4) + ((CASRD2CASWR % 4) ? 1 :0));
localparam RTW_CNT_WIDTH = clogb2(CASRD2CASWR_CLKS);
generate
begin : rtw_timer
reg read_this_rank;
always @(/*AS*/sending_col or rd_this_rank_r) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_r;
reg [RTW_CNT_WIDTH-1:0] rtw_cnt_ns;
always @(/*AS*/rst or col_rd_wr or sending_col or rtw_cnt_r)
if (rst) rtw_cnt_ns = {RTW_CNT_WIDTH{1'b0}};
else begin
rtw_cnt_ns = rtw_cnt_r;
if (col_rd_wr && |sending_col) rtw_cnt_ns =
CASRD2CASWR_CLKS[RTW_CNT_WIDTH-1:0] - ONE[RTW_CNT_WIDTH-1:0];
else if (|rtw_cnt_r) rtw_cnt_ns = rtw_cnt_r - ONE[RTW_CNT_WIDTH-1:0];
end
wire inhbt_wr_ns = |rtw_cnt_ns;
always @(posedge clk) rtw_cnt_r <= #TCQ rtw_cnt_ns;
always @(inhbt_wr_ns) inhbt_wr = inhbt_wr_ns;
end
endgenerate
// Refresh request generation. Implement a "refresh bank". Referred
// to as pullin-in refresh in the JEDEC spec.
// The refresh_rank_r counter increments when a refresh to this
// rank has been decoded. In the up direction, the count saturates
// at nREFRESH_BANK. As specified in the JEDEC spec, nREFRESH_BANK
// is normally eight. The counter decrements with each refresh_tick,
// saturating at zero. A refresh will be requests when the rank is
// not busy and refresh_rank_r != nREFRESH_BANK, or refresh_rank_r
// equals zero.
localparam REFRESH_BANK_WIDTH = clogb2(nREFRESH_BANK + 1);
generate begin : refresh_generation
reg my_rank_busy;
always @(/*AS*/rank_busy_r) begin
my_rank_busy = 1'b0;
for (i=0; i < nBANK_MACHS; i=i+1)
my_rank_busy = my_rank_busy || rank_busy_r[(i*RANKS)+ID];
end
wire my_refresh =
insert_maint_r1 && ~maint_zq_r && ~maint_sre_r && ~maint_srx_r &&
(maint_rank_r == ID[RANK_WIDTH-1:0]);
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_r;
reg [REFRESH_BANK_WIDTH-1:0] refresh_bank_ns;
always @(/*AS*/app_ref_req or init_calib_complete or my_refresh
or refresh_bank_r or refresh_tick)
if (~init_calib_complete)
if (REFRESH_TIMER_DIV == 0)
refresh_bank_ns = nREFRESH_BANK[0+:REFRESH_BANK_WIDTH];
else refresh_bank_ns = {REFRESH_BANK_WIDTH{1'b0}};
else
case ({my_refresh, refresh_tick, app_ref_req})
3'b000, 3'b110, 3'b101, 3'b111 : refresh_bank_ns = refresh_bank_r;
3'b010, 3'b001, 3'b011 : refresh_bank_ns =
(|refresh_bank_r)?
refresh_bank_r - ONE[0+:REFRESH_BANK_WIDTH]:
refresh_bank_r;
3'b100 : refresh_bank_ns =
refresh_bank_r + ONE[0+:REFRESH_BANK_WIDTH];
endcase // case ({my_refresh, refresh_tick})
always @(posedge clk) refresh_bank_r <= #TCQ refresh_bank_ns;
`ifdef MC_SVA
refresh_bank_overflow: assert property (@(posedge clk)
(rst || (refresh_bank_r <= nREFRESH_BANK)));
refresh_bank_underflow: assert property (@(posedge clk)
(rst || ~(~|refresh_bank_r && ~my_refresh && refresh_tick)));
refresh_hi_priority: cover property (@(posedge clk)
(rst && ~|refresh_bank_ns && (refresh_bank_r ==
ONE[0+:REFRESH_BANK_WIDTH])));
refresh_bank_full: cover property (@(posedge clk)
(rst && (refresh_bank_r ==
nREFRESH_BANK[0+:REFRESH_BANK_WIDTH])));
`endif
assign refresh_request = init_calib_complete &&
(~|refresh_bank_r ||
((refresh_bank_r != nREFRESH_BANK[0+:REFRESH_BANK_WIDTH]) && ~my_rank_busy));
end
endgenerate
// Periodic read request generation.
localparam PERIODIC_RD_TIMER_WIDTH = clogb2(PERIODIC_RD_TIMER_DIV + /*idle state*/ 1);
generate begin : periodic_rd_generation
if ( PERIODIC_RD_TIMER_DIV != 0 ) begin // enable periodic reads
reg read_this_rank;
always @(/*AS*/rd_this_rank_r or sending_col) begin
read_this_rank = 1'b0;
for (i = 0; i < nBANK_MACHS; i = i + 1)
read_this_rank =
read_this_rank || (sending_col[i] && rd_this_rank_r[(i*RANKS)+ID]);
end
reg read_this_rank_r;
reg read_this_rank_r1;
always @(posedge clk) read_this_rank_r <= #TCQ read_this_rank;
always @(posedge clk) read_this_rank_r1 <= #TCQ read_this_rank_r;
wire int_read_this_rank = read_this_rank &&
(((nCK_PER_CLK == 4) && read_this_rank_r) ||
((nCK_PER_CLK != 4) && read_this_rank_r1));
reg periodic_rd_cntr1_ns;
reg periodic_rd_cntr1_r;
always @(/*AS*/clear_periodic_rd_request or periodic_rd_cntr1_r) begin
periodic_rd_cntr1_ns = periodic_rd_cntr1_r;
if (clear_periodic_rd_request)
periodic_rd_cntr1_ns = periodic_rd_cntr1_r + 1'b1;
end
always @(posedge clk) begin
if (rst) periodic_rd_cntr1_r <= #TCQ 1'b0;
else periodic_rd_cntr1_r <= #TCQ periodic_rd_cntr1_ns;
end
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_r;
reg [PERIODIC_RD_TIMER_WIDTH-1:0] periodic_rd_timer_ns;
always @(/*AS*/init_calib_complete or maint_prescaler_tick_r
or periodic_rd_timer_r or int_read_this_rank) begin
periodic_rd_timer_ns = periodic_rd_timer_r;
if (~init_calib_complete)
periodic_rd_timer_ns = {PERIODIC_RD_TIMER_WIDTH{1'b0}};
else if (int_read_this_rank)
periodic_rd_timer_ns =
PERIODIC_RD_TIMER_DIV[0+:PERIODIC_RD_TIMER_WIDTH];
else if (|periodic_rd_timer_r && maint_prescaler_tick_r)
periodic_rd_timer_ns =
periodic_rd_timer_r - ONE[0+:PERIODIC_RD_TIMER_WIDTH];
end
always @(posedge clk) periodic_rd_timer_r <= #TCQ periodic_rd_timer_ns;
wire periodic_rd_timer_one = maint_prescaler_tick_r &&
(periodic_rd_timer_r == ONE[0+:PERIODIC_RD_TIMER_WIDTH]);
reg periodic_rd_request_r;
wire periodic_rd_request_ns = ~rst &&
((app_periodic_rd_req && init_calib_complete) ||
((PERIODIC_RD_TIMER_DIV != 0) && ~init_calib_complete) ||
// (~(read_this_rank || clear_periodic_rd_request) &&
(~((int_read_this_rank) || (clear_periodic_rd_request && periodic_rd_cntr1_r)) &&
(periodic_rd_request_r || periodic_rd_timer_one)));
always @(posedge clk) periodic_rd_request_r <=
#TCQ periodic_rd_request_ns;
`ifdef MC_SVA
read_clears_periodic_rd_request: cover property (@(posedge clk)
(rst && (periodic_rd_request_r && read_this_rank)));
`endif
assign periodic_rd_request = init_calib_complete && periodic_rd_request_r;
end else
assign periodic_rd_request = 1'b0; //to disable periodic reads
end
endgenerate
endmodule
|
module mig_7series_v2_3_ddr_phy_rdlvl #
(
parameter TCQ = 100, // clk->out delay (sim only)
parameter nCK_PER_CLK = 2, // # of memory clocks per CLK
parameter CLK_PERIOD = 3333, // Internal clock period (in ps)
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter RANKS = 1, // # of DRAM ranks
parameter PER_BIT_DESKEW = "ON", // Enable per-bit DQ deskew
parameter SIM_CAL_OPTION = "NONE", // Skip various calibration steps
parameter DEBUG_PORT = "OFF", // Enable debug port
parameter DRAM_TYPE = "DDR3", // Memory I/F type: "DDR3", "DDR2"
parameter OCAL_EN = "ON",
parameter IDELAY_ADJ = "ON"
)
(
input clk,
input rst,
// Calibration status, control signals
input mpr_rdlvl_start,
output mpr_rdlvl_done,
output reg mpr_last_byte_done,
output mpr_rnk_done,
input rdlvl_stg1_start,
output reg rdlvl_stg1_done /* synthesis syn_maxfan = 30 */,
output rdlvl_stg1_rnk_done,
output reg rdlvl_stg1_err,
output mpr_rdlvl_err,
output rdlvl_err,
output reg rdlvl_prech_req,
output reg rdlvl_last_byte_done,
output reg rdlvl_assrt_common,
input prech_done,
input phy_if_empty,
input [4:0] idelaye2_init_val,
// Captured data in fabric clock domain
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data,
// Decrement initial Phaser_IN Fine tap delay
input dqs_po_dec_done,
input [5:0] pi_counter_read_val,
// Stage 1 calibration outputs
output reg pi_fine_dly_dec_done,
output reg pi_en_stg2_f,
output reg pi_stg2_f_incdec,
output reg pi_stg2_load,
output reg [5:0] pi_stg2_reg_l,
output [DQS_CNT_WIDTH:0] pi_stg2_rdlvl_cnt,
// To DQ IDELAY required to find left edge of
// valid window
output idelay_ce,
output idelay_inc,
input idelay_ld,
input [DQS_CNT_WIDTH:0] wrcal_cnt,
// Only output if Per-bit de-skew enabled
output reg [5*RANKS*DQ_WIDTH-1:0] dlyval_dq,
// Debug Port
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt,
output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt,
input dbg_idel_up_all,
input dbg_idel_down_all,
input dbg_idel_up_cpt,
input dbg_idel_down_cpt,
input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt,
input dbg_sel_all_idel_cpt,
output [255:0] dbg_phy_rdlvl
);
// minimum time (in IDELAY taps) for which capture data must be stable for
// algorithm to consider a valid data eye to be found. The read leveling
// logic will ignore any window found smaller than this value. Limitations
// on how small this number can be is determined by: (1) the algorithmic
// limitation of how many taps wide the data eye can be (3 taps), and (2)
// how wide regions of "instability" that occur around the edges of the
// read valid window can be (i.e. need to be able to filter out "false"
// windows that occur for a short # of taps around the edges of the true
// data window, although with multi-sampling during read leveling, this is
// not as much a concern) - the larger the value, the more protection
// against "false" windows
localparam MIN_EYE_SIZE = 16;
// Length of calibration sequence (in # of words)
localparam CAL_PAT_LEN = 8;
// Read data shift register length
localparam RD_SHIFT_LEN = CAL_PAT_LEN / (2*nCK_PER_CLK);
// # of cycles required to perform read data shift register compare
// This is defined as from the cycle the new data is loaded until
// signal found_edge_r is valid
localparam RD_SHIFT_COMP_DELAY = 5;
// worst-case # of cycles to wait to ensure that both the SR and
// PREV_SR shift registers have valid data, and that the comparison
// of the two shift register values is valid. The "+1" at the end of
// this equation is a fudge factor, I freely admit that
localparam SR_VALID_DELAY = (2 * RD_SHIFT_LEN) + RD_SHIFT_COMP_DELAY + 1;
// # of clock cycles to wait after changing tap value or read data MUX
// to allow: (1) tap chain to settle, (2) for delayed input to propagate
// thru ISERDES, (3) for the read data comparison logic to have time to
// output the comparison of two consecutive samples of the settled read data
// The minimum delay is 16 cycles, which should be good enough to handle all
// three of the above conditions for the simulation-only case with a short
// training pattern. For H/W (or for simulation with longer training
// pattern), it will take longer to store and compare two consecutive
// samples, and the value of this parameter will reflect that
localparam PIPE_WAIT_CNT = (SR_VALID_DELAY < 8) ? 16 : (SR_VALID_DELAY + 8);
// # of read data samples to examine when detecting whether an edge has
// occured during stage 1 calibration. Width of local param must be
// changed as appropriate. Note that there are two counters used, each
// counter can be changed independently of the other - they are used in
// cascade to create a larger counter
localparam [11:0] DETECT_EDGE_SAMPLE_CNT0 = 12'h001; //12'hFFF;
localparam [11:0] DETECT_EDGE_SAMPLE_CNT1 = 12'h001; // 12'h1FF Must be > 0
localparam [5:0] CAL1_IDLE = 6'h00;
localparam [5:0] CAL1_NEW_DQS_WAIT = 6'h01;
localparam [5:0] CAL1_STORE_FIRST_WAIT = 6'h02;
localparam [5:0] CAL1_PAT_DETECT = 6'h03;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC = 6'h04;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC_WAIT = 6'h05;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC = 6'h06;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC_WAIT = 6'h07;
localparam [5:0] CAL1_DETECT_EDGE = 6'h08;
localparam [5:0] CAL1_IDEL_INC_CPT = 6'h09;
localparam [5:0] CAL1_IDEL_INC_CPT_WAIT = 6'h0A;
localparam [5:0] CAL1_CALC_IDEL = 6'h0B;
localparam [5:0] CAL1_IDEL_DEC_CPT = 6'h0C;
localparam [5:0] CAL1_IDEL_DEC_CPT_WAIT = 6'h0D;
localparam [5:0] CAL1_NEXT_DQS = 6'h0E;
localparam [5:0] CAL1_DONE = 6'h0F;
localparam [5:0] CAL1_PB_STORE_FIRST_WAIT = 6'h10;
localparam [5:0] CAL1_PB_DETECT_EDGE = 6'h11;
localparam [5:0] CAL1_PB_INC_CPT = 6'h12;
localparam [5:0] CAL1_PB_INC_CPT_WAIT = 6'h13;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT = 6'h14;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT_WAIT = 6'h15;
localparam [5:0] CAL1_PB_DETECT_EDGE_DQ = 6'h16;
localparam [5:0] CAL1_PB_INC_DQ = 6'h17;
localparam [5:0] CAL1_PB_INC_DQ_WAIT = 6'h18;
localparam [5:0] CAL1_PB_DEC_CPT = 6'h19;
localparam [5:0] CAL1_PB_DEC_CPT_WAIT = 6'h1A;
localparam [5:0] CAL1_REGL_LOAD = 6'h1B;
localparam [5:0] CAL1_RDLVL_ERR = 6'h1C;
localparam [5:0] CAL1_MPR_NEW_DQS_WAIT = 6'h1D;
localparam [5:0] CAL1_VALID_WAIT = 6'h1E;
localparam [5:0] CAL1_MPR_PAT_DETECT = 6'h1F;
localparam [5:0] CAL1_NEW_DQS_PREWAIT = 6'h20;
integer a;
integer b;
integer d;
integer e;
integer f;
integer h;
integer g;
integer i;
integer j;
integer k;
integer l;
integer m;
integer n;
integer r;
integer p;
integer q;
integer s;
integer t;
integer u;
integer w;
integer ce_i;
integer ce_rnk_i;
integer aa;
integer bb;
integer cc;
integer dd;
genvar x;
genvar z;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_r;
wire [DQS_CNT_WIDTH+2:0]cal1_cnt_cpt_timing;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_timing_r;
reg cal1_dq_idel_ce;
reg cal1_dq_idel_inc;
reg cal1_dlyce_cpt_r;
reg cal1_dlyinc_cpt_r;
reg cal1_dlyce_dq_r;
reg cal1_dlyinc_dq_r;
reg cal1_wait_cnt_en_r;
reg [4:0] cal1_wait_cnt_r;
reg cal1_wait_r;
reg [DQ_WIDTH-1:0] dlyce_dq_r;
reg dlyinc_dq_r;
reg [4:0] dlyval_dq_reg_r [0:RANKS-1][0:DQ_WIDTH-1];
reg cal1_prech_req_r;
reg [5:0] cal1_state_r;
reg [5:0] cal1_state_r1;
reg [5:0] cnt_idel_dec_cpt_r;
reg [3:0] cnt_shift_r;
reg detect_edge_done_r;
reg [5:0] right_edge_taps_r;
reg [5:0] first_edge_taps_r;
reg found_edge_r;
reg found_first_edge_r;
reg found_second_edge_r;
reg found_stable_eye_r;
reg found_stable_eye_last_r;
reg found_edge_all_r;
reg [5:0] tap_cnt_cpt_r;
reg tap_limit_cpt_r;
reg [4:0] idel_tap_cnt_dq_pb_r;
reg idel_tap_limit_dq_pb_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall0_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall1_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise0_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise1_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall2_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall3_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise2_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise3_r;
reg mux_rd_valid_r;
reg new_cnt_cpt_r;
reg [RD_SHIFT_LEN-1:0] old_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] old_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise3_r;
reg [4:0] pb_cnt_eye_size_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] pb_detect_edge_done_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_last_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_first_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_stable_eye_r;
reg [DRAM_WIDTH-1:0] pb_last_tap_jitter_r;
reg pi_en_stg2_f_timing;
reg pi_stg2_f_incdec_timing;
reg pi_stg2_load_timing;
reg [5:0] pi_stg2_reg_l_timing;
reg [DRAM_WIDTH-1:0] prev_sr_diff_r;
reg [RD_SHIFT_LEN-1:0] prev_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] prev_sr_match_cyc2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise3_r;
wire [DQ_WIDTH-1:0] rd_data_rise0;
wire [DQ_WIDTH-1:0] rd_data_fall0;
wire [DQ_WIDTH-1:0] rd_data_rise1;
wire [DQ_WIDTH-1:0] rd_data_fall1;
wire [DQ_WIDTH-1:0] rd_data_rise2;
wire [DQ_WIDTH-1:0] rd_data_fall2;
wire [DQ_WIDTH-1:0] rd_data_rise3;
wire [DQ_WIDTH-1:0] rd_data_fall3;
reg samp_cnt_done_r;
reg samp_edge_cnt0_en_r;
reg [11:0] samp_edge_cnt0_r;
reg samp_edge_cnt1_en_r;
reg [11:0] samp_edge_cnt1_r;
reg [DQS_CNT_WIDTH:0] rd_mux_sel_r;
reg [5:0] second_edge_taps_r;
reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0];
reg store_sr_r;
reg store_sr_req_pulsed_r;
reg store_sr_req_r;
reg sr_valid_r;
reg sr_valid_r1;
reg sr_valid_r2;
reg [DRAM_WIDTH-1:0] old_sr_diff_r;
reg [DRAM_WIDTH-1:0] old_sr_match_cyc2_r;
reg pat0_data_match_r;
reg pat1_data_match_r;
wire pat_data_match_r;
wire [RD_SHIFT_LEN-1:0] pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] pat0_match_fall0_r;
reg pat0_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall1_r;
reg pat0_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall2_r;
reg pat0_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall3_r;
reg pat0_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise0_r;
reg pat0_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise1_r;
reg pat0_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise2_r;
reg pat0_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise3_r;
reg pat0_match_rise3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall0_r;
reg pat1_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall1_r;
reg pat1_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall2_r;
reg pat1_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall3_r;
reg pat1_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise0_r;
reg pat1_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise1_r;
reg pat1_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise2_r;
reg pat1_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise3_r;
reg pat1_match_rise3_and_r;
reg [4:0] idelay_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [5*DQS_WIDTH*RANKS-1:0] idelay_tap_cnt_w;
reg [4:0] idelay_tap_cnt_slice_r;
reg idelay_tap_limit_r;
wire [RD_SHIFT_LEN-1:0] pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall3_r;
reg idel_pat0_match_rise0_and_r;
reg idel_pat0_match_fall0_and_r;
reg idel_pat0_match_rise1_and_r;
reg idel_pat0_match_fall1_and_r;
reg idel_pat0_match_rise2_and_r;
reg idel_pat0_match_fall2_and_r;
reg idel_pat0_match_rise3_and_r;
reg idel_pat0_match_fall3_and_r;
reg idel_pat1_match_rise0_and_r;
reg idel_pat1_match_fall0_and_r;
reg idel_pat1_match_rise1_and_r;
reg idel_pat1_match_fall1_and_r;
reg idel_pat1_match_rise2_and_r;
reg idel_pat1_match_fall2_and_r;
reg idel_pat1_match_rise3_and_r;
reg idel_pat1_match_fall3_and_r;
reg idel_pat0_data_match_r;
reg idel_pat1_data_match_r;
reg idel_pat_data_match;
reg idel_pat_data_match_r;
reg [4:0] idel_dec_cnt;
reg [5:0] rdlvl_dqs_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [1:0] rnk_cnt_r;
reg rdlvl_rank_done_r;
reg [3:0] done_cnt;
reg [1:0] regl_rank_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt_r;
wire [DQS_CNT_WIDTH+2:0]regl_dqs_cnt_timing;
reg regl_rank_done_r;
reg rdlvl_stg1_start_r;
reg dqs_po_dec_done_r1;
reg dqs_po_dec_done_r2;
reg fine_dly_dec_done_r1;
reg fine_dly_dec_done_r2;
reg [3:0] wait_cnt_r;
reg [5:0] pi_rdval_cnt;
reg pi_cnt_dec;
reg mpr_valid_r;
reg mpr_valid_r1;
reg mpr_valid_r2;
reg mpr_rd_rise0_prev_r;
reg mpr_rd_fall0_prev_r;
reg mpr_rd_rise1_prev_r;
reg mpr_rd_fall1_prev_r;
reg mpr_rd_rise2_prev_r;
reg mpr_rd_fall2_prev_r;
reg mpr_rd_rise3_prev_r;
reg mpr_rd_fall3_prev_r;
reg mpr_rdlvl_done_r;
reg mpr_rdlvl_done_r1;
reg mpr_rdlvl_done_r2;
reg mpr_rdlvl_start_r;
reg mpr_rank_done_r;
reg [2:0] stable_idel_cnt;
reg inhibit_edge_detect_r;
reg idel_pat_detect_valid_r;
reg idel_mpr_pat_detect_r;
reg mpr_pat_detect_r;
reg mpr_dec_cpt_r;
reg idel_adj_inc; //IDELAY adjustment
wire [1:0] idelay_adj;
wire pb_detect_edge_setup;
wire pb_detect_edge;
// Debug
reg [6*DQS_WIDTH-1:0] dbg_cpt_first_edge_taps;
reg [6*DQS_WIDTH-1:0] dbg_cpt_second_edge_taps;
reg [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt_w;
//IDELAY adjustment setting for -1
//2'b10 : IDELAY - 1
//2'b01 : IDELAY + 1
//2'b00 : No IDELAY adjustment
assign idelay_adj = (IDELAY_ADJ == "ON") ? 2'b10: 2'b00;
//***************************************************************************
// Debug
//***************************************************************************
always @(*) begin
for (d = 0; d < RANKS; d = d + 1) begin
for (e = 0; e < DQS_WIDTH; e = e + 1) begin
idelay_tap_cnt_w[(5*e+5*DQS_WIDTH*d)+:5] = idelay_tap_cnt_r[d][e];
dbg_cpt_tap_cnt_w[(6*e+6*DQS_WIDTH*d)+:6] = rdlvl_dqs_tap_cnt_r[d][e];
end
end
end
assign mpr_rdlvl_err = rdlvl_stg1_err & (!mpr_rdlvl_done);
assign rdlvl_err = rdlvl_stg1_err & (mpr_rdlvl_done);
assign dbg_phy_rdlvl[0] = rdlvl_stg1_start;
assign dbg_phy_rdlvl[1] = pat_data_match_r;
assign dbg_phy_rdlvl[2] = mux_rd_valid_r;
assign dbg_phy_rdlvl[3] = idelay_tap_limit_r;
assign dbg_phy_rdlvl[8:4] = 'b0;
assign dbg_phy_rdlvl[14:9] = cal1_state_r[5:0];
assign dbg_phy_rdlvl[20:15] = cnt_idel_dec_cpt_r;
assign dbg_phy_rdlvl[21] = found_first_edge_r;
assign dbg_phy_rdlvl[22] = found_second_edge_r;
assign dbg_phy_rdlvl[23] = found_edge_r;
assign dbg_phy_rdlvl[24] = store_sr_r;
// [40:25] previously used for sr, old_sr shift registers. If connecting
// these signals again, don't forget to parameterize based on RD_SHIFT_LEN
assign dbg_phy_rdlvl[40:25] = 'b0;
assign dbg_phy_rdlvl[41] = sr_valid_r;
assign dbg_phy_rdlvl[42] = found_stable_eye_r;
assign dbg_phy_rdlvl[48:43] = tap_cnt_cpt_r;
assign dbg_phy_rdlvl[54:49] = first_edge_taps_r;
assign dbg_phy_rdlvl[60:55] = second_edge_taps_r;
assign dbg_phy_rdlvl[64:61] = cal1_cnt_cpt_timing_r;
assign dbg_phy_rdlvl[65] = cal1_dlyce_cpt_r;
assign dbg_phy_rdlvl[66] = cal1_dlyinc_cpt_r;
assign dbg_phy_rdlvl[67] = found_edge_r;
assign dbg_phy_rdlvl[68] = found_first_edge_r;
assign dbg_phy_rdlvl[73:69] = 'b0;
assign dbg_phy_rdlvl[74] = idel_pat_data_match;
assign dbg_phy_rdlvl[75] = idel_pat0_data_match_r;
assign dbg_phy_rdlvl[76] = idel_pat1_data_match_r;
assign dbg_phy_rdlvl[77] = pat0_data_match_r;
assign dbg_phy_rdlvl[78] = pat1_data_match_r;
assign dbg_phy_rdlvl[79+:5*DQS_WIDTH*RANKS] = idelay_tap_cnt_w;
assign dbg_phy_rdlvl[170+:8] = mux_rd_rise0_r;
assign dbg_phy_rdlvl[178+:8] = mux_rd_fall0_r;
assign dbg_phy_rdlvl[186+:8] = mux_rd_rise1_r;
assign dbg_phy_rdlvl[194+:8] = mux_rd_fall1_r;
assign dbg_phy_rdlvl[202+:8] = mux_rd_rise2_r;
assign dbg_phy_rdlvl[210+:8] = mux_rd_fall2_r;
assign dbg_phy_rdlvl[218+:8] = mux_rd_rise3_r;
assign dbg_phy_rdlvl[226+:8] = mux_rd_fall3_r;
//***************************************************************************
// Debug output
//***************************************************************************
// CPT taps
assign dbg_cpt_first_edge_cnt = dbg_cpt_first_edge_taps;
assign dbg_cpt_second_edge_cnt = dbg_cpt_second_edge_taps;
assign dbg_cpt_tap_cnt = dbg_cpt_tap_cnt_w;
assign dbg_dq_idelay_tap_cnt = idelay_tap_cnt_w;
// Record first and second edges found during CPT calibration
generate
always @(posedge clk)
if (rst) begin
dbg_cpt_first_edge_taps <= #TCQ 'b0;
dbg_cpt_second_edge_taps <= #TCQ 'b0;
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_CALC_IDEL)) begin
//for (ce_rnk_i = 0; ce_rnk_i < RANKS; ce_rnk_i = ce_rnk_i + 1) begin: gen_dbg_cpt_rnk
for (ce_i = 0; ce_i < DQS_WIDTH; ce_i = ce_i + 1) begin: gen_dbg_cpt_edge
if (found_first_edge_r)
dbg_cpt_first_edge_taps[(6*ce_i)+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[(6*ce_i)+:6]
<= #TCQ second_edge_taps_r;
end
//end
end else if (cal1_state_r == CAL1_CALC_IDEL) begin
// Record tap counts of first and second edge edges during
// CPT calibration for each DQS group. If neither edge has
// been found, then those taps will remain 0
if (found_first_edge_r)
dbg_cpt_first_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ second_edge_taps_r;
end
endgenerate
assign rdlvl_stg1_rnk_done = rdlvl_rank_done_r;// || regl_rank_done_r;
assign mpr_rnk_done = mpr_rank_done_r;
assign mpr_rdlvl_done = ((DRAM_TYPE == "DDR3") && (OCAL_EN == "ON")) ? //&& (SIM_CAL_OPTION == "NONE")
mpr_rdlvl_done_r : 1'b1;
//**************************************************************************
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
// coarse delay
//**************************************************************************
assign pi_stg2_rdlvl_cnt = (cal1_state_r == CAL1_REGL_LOAD) ? regl_dqs_cnt_r : cal1_cnt_cpt_r;
assign idelay_ce = cal1_dq_idel_ce;
assign idelay_inc = cal1_dq_idel_inc;
//***************************************************************************
// Assert calib_in_common in FAST_CAL mode for IDELAY tap increments to all
// DQs simultaneously
//***************************************************************************
always @(posedge clk) begin
if (rst)
rdlvl_assrt_common <= #TCQ 1'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") & rdlvl_stg1_start &
!rdlvl_stg1_start_r)
rdlvl_assrt_common <= #TCQ 1'b1;
else if (!idel_pat_data_match_r & idel_pat_data_match)
rdlvl_assrt_common <= #TCQ 1'b0;
end
//***************************************************************************
// Data mux to route appropriate bit to calibration logic - i.e. calibration
// is done sequentially, one bit (or DQS group) at a time
//***************************************************************************
generate
if (nCK_PER_CLK == 4) begin: rd_data_div4_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH];
assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH];
assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH];
assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH];
end else begin: rd_data_div2_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
end
endgenerate
always @(posedge clk) begin
rd_mux_sel_r <= #TCQ cal1_cnt_cpt_r;
end
// Register outputs for improved timing.
// NOTE: Will need to change when per-bit DQ deskew is supported.
// Currenly all bits in DQS group are checked in aggregate
generate
genvar mux_i;
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
end
end
endgenerate
//***************************************************************************
// MPR Read Leveling
//***************************************************************************
// storing the previous read data for checking later. Only bit 0 is used
// since MPR contents (01010101) are available generally on DQ[0] per
// JEDEC spec.
always @(posedge clk)begin
if ((cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
((cal1_state_r == CAL1_MPR_PAT_DETECT) && (idel_pat_detect_valid_r)))begin
mpr_rd_rise0_prev_r <= #TCQ mux_rd_rise0_r[0];
mpr_rd_fall0_prev_r <= #TCQ mux_rd_fall0_r[0];
mpr_rd_rise1_prev_r <= #TCQ mux_rd_rise1_r[0];
mpr_rd_fall1_prev_r <= #TCQ mux_rd_fall1_r[0];
mpr_rd_rise2_prev_r <= #TCQ mux_rd_rise2_r[0];
mpr_rd_fall2_prev_r <= #TCQ mux_rd_fall2_r[0];
mpr_rd_rise3_prev_r <= #TCQ mux_rd_rise3_r[0];
mpr_rd_fall3_prev_r <= #TCQ mux_rd_fall3_r[0];
end
end
generate
if (nCK_PER_CLK == 4) begin: mpr_4to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_NEW_DQS_PREWAIT) |
//(cal1_state_r == CAL1_DETECT_EDGE) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) |
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) |
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) |
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) |
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(mpr_rd_rise2_prev_r == mux_rd_rise2_r[0]) &
(mpr_rd_fall2_prev_r == mux_rd_fall2_r[0]) &
(mpr_rd_rise3_prev_r == mux_rd_rise3_r[0]) &
(mpr_rd_fall3_prev_r == mux_rd_fall3_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b1;
// Wait for settling time after idelay tap increment before
// de-asserting inhibit_edge_detect_r
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 10101010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
&& (idel_pat_detect_valid_r)))
//|| (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 01010101 to 10101010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) ||
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) ||
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) ||
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) ||
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end else if (nCK_PER_CLK == 2) begin: mpr_2to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd0) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b1;
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 1010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
& (idel_pat_detect_valid_r)))
// ||(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 0101 to 1010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end
endgenerate
// Registered signal indicates when mux_rd_rise/fall_r is valid
always @(posedge clk)
mux_rd_valid_r <= #TCQ ~phy_if_empty;
//***************************************************************************
// Decrement initial Phaser_IN fine delay value before proceeding with
// read calibration
//***************************************************************************
always @(posedge clk) begin
dqs_po_dec_done_r1 <= #TCQ dqs_po_dec_done;
dqs_po_dec_done_r2 <= #TCQ dqs_po_dec_done_r1;
fine_dly_dec_done_r2 <= #TCQ fine_dly_dec_done_r1;
pi_fine_dly_dec_done <= #TCQ fine_dly_dec_done_r2;
end
always @(posedge clk) begin
if (rst || pi_cnt_dec)
wait_cnt_r <= #TCQ 'd8;
else if (dqs_po_dec_done_r2 && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst) begin
pi_rdval_cnt <= #TCQ 'd0;
end else if (dqs_po_dec_done_r1 && ~dqs_po_dec_done_r2) begin
pi_rdval_cnt <= #TCQ pi_counter_read_val;
end else if (pi_rdval_cnt > 'd0) begin
if (pi_cnt_dec)
pi_rdval_cnt <= #TCQ pi_rdval_cnt - 1;
else
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end else if (pi_rdval_cnt == 'd0) begin
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end
end
always @(posedge clk) begin
if (rst || (pi_rdval_cnt == 'd0))
pi_cnt_dec <= #TCQ 1'b0;
else if (dqs_po_dec_done_r2 && (pi_rdval_cnt > 'd0)
&& (wait_cnt_r == 'd1))
pi_cnt_dec <= #TCQ 1'b1;
else
pi_cnt_dec <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (rst) begin
fine_dly_dec_done_r1 <= #TCQ 1'b0;
end else if (((pi_cnt_dec == 'd1) && (pi_rdval_cnt == 'd1)) ||
(dqs_po_dec_done_r2 && (pi_rdval_cnt == 'd0))) begin
fine_dly_dec_done_r1 <= #TCQ 1'b1;
end
end
//***************************************************************************
// Demultiplexor to control Phaser_IN delay values
//***************************************************************************
// Read DQS
always @(posedge clk) begin
if (rst) begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (pi_cnt_dec) begin
pi_en_stg2_f_timing <= #TCQ 'b1;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (cal1_dlyce_cpt_r) begin
if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
// Change only specified DQS
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
// if simulating, and "shortcuts" for calibration enabled, apply
// results to all DQSs (i.e. assume same delay on all
// DQSs).
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end
end else begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_en_stg2_f <= #TCQ pi_en_stg2_f_timing;
pi_stg2_f_incdec <= #TCQ pi_stg2_f_incdec_timing;
end
// This counter used to implement settling time between
// Phaser_IN rank register loads to different DQSs
always @(posedge clk) begin
if (rst)
done_cnt <= #TCQ 'b0;
else if (((cal1_state_r == CAL1_REGL_LOAD) &&
(cal1_state_r1 == CAL1_NEXT_DQS)) ||
((done_cnt == 4'd1) && (cal1_state_r != CAL1_DONE)))
done_cnt <= #TCQ 4'b1010;
else if (done_cnt > 'b0)
done_cnt <= #TCQ done_cnt - 1;
end
// During rank register loading the rank count must be sent to
// Phaser_IN via the phy_ctl_wd?? If so phy_init will have to
// issue NOPs during rank register loading with the appropriate
// rank count
always @(posedge clk) begin
if (rst || (regl_rank_done_r == 1'b1))
regl_rank_done_r <= #TCQ 1'b0;
else if ((regl_dqs_cnt == DQS_WIDTH-1) &&
(regl_rank_cnt != RANKS-1) &&
(done_cnt == 4'd1))
regl_rank_done_r <= #TCQ 1'b1;
end
// Temp wire for timing.
// The following in the always block below causes timing issues
// due to DSP block inference
// 6*regl_dqs_cnt.
// replacing this with two left shifts + 1 left shift to avoid
// DSP multiplier.
assign regl_dqs_cnt_timing = {2'd0, regl_dqs_cnt};
// Load Phaser_OUT rank register with rdlvl delay value
// for each DQS per rank.
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0)) begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt <= DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
pi_stg2_load_timing <= #TCQ 'b1;
pi_stg2_reg_l_timing <= #TCQ
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][regl_dqs_cnt];
end else begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_stg2_load <= #TCQ pi_stg2_load_timing;
pi_stg2_reg_l <= #TCQ pi_stg2_reg_l_timing;
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_rank_cnt <= #TCQ 2'b00;
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_rank_cnt <= #TCQ regl_rank_cnt;
else
regl_rank_cnt <= #TCQ regl_rank_cnt + 1;
end
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_dqs_cnt <= #TCQ {DQS_CNT_WIDTH+1{1'b0}};
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
else
regl_dqs_cnt <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt != DQS_WIDTH-1)
&& (done_cnt == 4'd1))
regl_dqs_cnt <= #TCQ regl_dqs_cnt + 1;
else
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
end
always @(posedge clk)
regl_dqs_cnt_r <= #TCQ regl_dqs_cnt;
//*****************************************************************
// DQ Stage 1 CALIBRATION INCREMENT/DECREMENT LOGIC:
// The actual IDELAY elements for each of the DQ bits is set via the
// DLYVAL parallel load port. However, the stage 1 calibration
// algorithm (well most of it) only needs to increment or decrement the DQ
// IDELAY value by 1 at any one time.
//*****************************************************************
// Chip-select generation for each of the individual counters tracking
// IDELAY tap values for each DQ
generate
for (z = 0; z < DQS_WIDTH; z = z + 1) begin: gen_dlyce_dq
always @(posedge clk)
if (rst)
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else
if (SIM_CAL_OPTION == "SKIP_CAL")
// If skipping calibration altogether (only for simulation), no
// need to set DQ IODELAY values - they are hardcoded
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else if (SIM_CAL_OPTION == "FAST_CAL") begin
// If fast calibration option (simulation only) selected, DQ
// IODELAYs across all bytes are updated simultaneously
// (although per-bit deskew within DQS[0] is still supported)
for (h = 0; h < DRAM_WIDTH; h = h + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + h] <= #TCQ cal1_dlyce_dq_r;
end
end else if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (cal1_cnt_cpt_r == z) begin
for (g = 0; g < DRAM_WIDTH; g = g + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + g]
<= #TCQ cal1_dlyce_dq_r;
end
end else
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
end
end
endgenerate
// Also delay increment/decrement control to match delay on DLYCE
always @(posedge clk)
if (rst)
dlyinc_dq_r <= #TCQ 1'b0;
else
dlyinc_dq_r <= #TCQ cal1_dlyinc_dq_r;
// Each DQ has a counter associated with it to record current read-leveling
// delay value
always @(posedge clk)
// Reset or skipping calibration all together
if (rst | (SIM_CAL_OPTION == "SKIP_CAL")) begin
for (aa = 0; aa < RANKS; aa = aa + 1) begin: rst_dlyval_dq_reg_r
for (bb = 0; bb < DQ_WIDTH; bb = bb + 1)
dlyval_dq_reg_r[aa][bb] <= #TCQ 'b0;
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (n = 0; n < RANKS; n = n + 1) begin: gen_dlyval_dq_reg_rnk
for (r = 0; r < DQ_WIDTH; r = r + 1) begin: gen_dlyval_dq_reg
if (dlyce_dq_r[r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] + 5'h01;
else
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] - 5'h01;
end
end
end
end else begin
if (dlyce_dq_r[cal1_cnt_cpt_r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] + 5'h01;
else
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] - 5'h01;
end
end
// Register for timing (help with logic placement)
always @(posedge clk) begin
for (cc = 0; cc < RANKS; cc = cc + 1) begin: dlyval_dq_assgn
for (dd = 0; dd < DQ_WIDTH; dd = dd + 1)
dlyval_dq[((5*dd)+(cc*DQ_WIDTH*5))+:5] <= #TCQ dlyval_dq_reg_r[cc][dd];
end
end
//***************************************************************************
// Generate signal used to delay calibration state machine - used when:
// (1) IDELAY value changed
// (2) RD_MUX_SEL value changed
// Use when a delay is necessary to give the change time to propagate
// through the data pipeline (through IDELAY and ISERDES, and fabric
// pipeline stages)
//***************************************************************************
// List all the stage 1 calibration wait states here.
// verilint STARC-2.7.3.3b off
always @(posedge clk)
if ((cal1_state_r == CAL1_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_NEW_DQS_PREWAIT) ||
(cal1_state_r == CAL1_VALID_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT) ||
(cal1_state_r == CAL1_PB_INC_DQ_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_INC_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_DEC_WAIT))
cal1_wait_cnt_en_r <= #TCQ 1'b1;
else
cal1_wait_cnt_en_r <= #TCQ 1'b0;
// verilint STARC-2.7.3.3b on
always @(posedge clk)
if (!cal1_wait_cnt_en_r) begin
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b1;
end else begin
if (cal1_wait_cnt_r != PIPE_WAIT_CNT - 1) begin
cal1_wait_cnt_r <= #TCQ cal1_wait_cnt_r + 1;
cal1_wait_r <= #TCQ 1'b1;
end else begin
// Need to reset to 0 to handle the case when there are two
// different WAIT states back-to-back
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b0;
end
end
//***************************************************************************
// generate request to PHY_INIT logic to issue precharged. Required when
// calibration can take a long time (during which there are only constant
// reads present on this bus). In this case need to issue perioidic
// precharges to avoid tRAS violation. This signal must meet the following
// requirements: (1) only transition from 0->1 when prech is first needed,
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
//***************************************************************************
always @(posedge clk)
if (rst)
rdlvl_prech_req <= #TCQ 1'b0;
else
rdlvl_prech_req <= #TCQ cal1_prech_req_r;
//***************************************************************************
// Serial-to-parallel register to store last RDDATA_SHIFT_LEN cycles of
// data from ISERDES. The value of this register is also stored, so that
// previous and current values of the ISERDES data can be compared while
// varying the IODELAY taps to see if an "edge" of the data valid window
// has been encountered since the last IODELAY tap adjustment
//***************************************************************************
//***************************************************************************
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
// NOTE: Written using discrete flops, but SRL can be used if the matching
// logic does the comparison sequentially, rather than parallel
//***************************************************************************
generate
genvar rd_i;
if (nCK_PER_CLK == 4) begin: gen_sr_div4
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i];
sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i];
sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i];
sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i];
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
sr_rise2_r[rd_i] <= #TCQ {sr_rise2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise2_r[rd_i]};
sr_fall2_r[rd_i] <= #TCQ {sr_fall2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall2_r[rd_i]};
sr_rise3_r[rd_i] <= #TCQ {sr_rise3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise3_r[rd_i]};
sr_fall3_r[rd_i] <= #TCQ {sr_fall3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall3_r[rd_i]};
end
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_sr_div2
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {mux_rd_fall1_r[rd_i]};
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
end
end
end
end
end
endgenerate
//***************************************************************************
// Conversion to pattern calibration
//***************************************************************************
// Pattern for DQ IDELAY calibration
//*****************************************************************
// Expected data pattern when DQ shifted to the right such that
// DQS before the left edge of the DVW:
// Based on pattern of ({rise,fall}) =
// 0x1, 0xB, 0x4, 0x4, 0xB, 0x9
// Each nibble will look like:
// bit3: 0, 1, 0, 0, 1, 1
// bit2: 0, 0, 1, 1, 0, 0
// bit1: 0, 1, 0, 0, 1, 0
// bit0: 1, 1, 0, 0, 1, 1
// Or if the write is early it could look like:
// 0x4, 0x4, 0xB, 0x9, 0x6, 0xE
// bit3: 0, 0, 1, 1, 0, 1
// bit2: 1, 1, 0, 0, 1, 1
// bit1: 0, 0, 1, 0, 1, 1
// bit0: 0, 0, 1, 1, 0, 0
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
// and the actual training pattern contents change
//*****************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_pat_div4
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign {idel_pat0_rise0[3], idel_pat0_rise0[2],
idel_pat0_rise0[1], idel_pat0_rise0[0]} = 4'h1;
assign {idel_pat0_fall0[3], idel_pat0_fall0[2],
idel_pat0_fall0[1], idel_pat0_fall0[0]} = 4'h7;
assign {idel_pat0_rise1[3], idel_pat0_rise1[2],
idel_pat0_rise1[1], idel_pat0_rise1[0]} = 4'hE;
assign {idel_pat0_fall1[3], idel_pat0_fall1[2],
idel_pat0_fall1[1], idel_pat0_fall1[0]} = 4'hC;
assign {idel_pat0_rise2[3], idel_pat0_rise2[2],
idel_pat0_rise2[1], idel_pat0_rise2[0]} = 4'h9;
assign {idel_pat0_fall2[3], idel_pat0_fall2[2],
idel_pat0_fall2[1], idel_pat0_fall2[0]} = 4'h2;
assign {idel_pat0_rise3[3], idel_pat0_rise3[2],
idel_pat0_rise3[1], idel_pat0_rise3[0]} = 4'h4;
assign {idel_pat0_fall3[3], idel_pat0_fall3[2],
idel_pat0_fall3[1], idel_pat0_fall3[0]} = 4'hB;
// Target pattern for "on-time write"
assign {idel_pat1_rise0[3], idel_pat1_rise0[2],
idel_pat1_rise0[1], idel_pat1_rise0[0]} = 4'h4;
assign {idel_pat1_fall0[3], idel_pat1_fall0[2],
idel_pat1_fall0[1], idel_pat1_fall0[0]} = 4'h9;
assign {idel_pat1_rise1[3], idel_pat1_rise1[2],
idel_pat1_rise1[1], idel_pat1_rise1[0]} = 4'h3;
assign {idel_pat1_fall1[3], idel_pat1_fall1[2],
idel_pat1_fall1[1], idel_pat1_fall1[0]} = 4'h7;
assign {idel_pat1_rise2[3], idel_pat1_rise2[2],
idel_pat1_rise2[1], idel_pat1_rise2[0]} = 4'hE;
assign {idel_pat1_fall2[3], idel_pat1_fall2[2],
idel_pat1_fall2[1], idel_pat1_fall2[0]} = 4'hC;
assign {idel_pat1_rise3[3], idel_pat1_rise3[2],
idel_pat1_rise3[1], idel_pat1_rise3[0]} = 4'h9;
assign {idel_pat1_fall3[3], idel_pat1_fall3[2],
idel_pat1_fall3[1], idel_pat1_fall3[0]} = 4'h2;
// Correct data valid window for "early write"
assign {pat0_rise0[3], pat0_rise0[2],
pat0_rise0[1], pat0_rise0[0]} = 4'h7;
assign {pat0_fall0[3], pat0_fall0[2],
pat0_fall0[1], pat0_fall0[0]} = 4'hE;
assign {pat0_rise1[3], pat0_rise1[2],
pat0_rise1[1], pat0_rise1[0]} = 4'hC;
assign {pat0_fall1[3], pat0_fall1[2],
pat0_fall1[1], pat0_fall1[0]} = 4'h9;
assign {pat0_rise2[3], pat0_rise2[2],
pat0_rise2[1], pat0_rise2[0]} = 4'h2;
assign {pat0_fall2[3], pat0_fall2[2],
pat0_fall2[1], pat0_fall2[0]} = 4'h4;
assign {pat0_rise3[3], pat0_rise3[2],
pat0_rise3[1], pat0_rise3[0]} = 4'hB;
assign {pat0_fall3[3], pat0_fall3[2],
pat0_fall3[1], pat0_fall3[0]} = 4'h1;
// Correct data valid window for "on-time write"
assign {pat1_rise0[3], pat1_rise0[2],
pat1_rise0[1], pat1_rise0[0]} = 4'h9;
assign {pat1_fall0[3], pat1_fall0[2],
pat1_fall0[1], pat1_fall0[0]} = 4'h3;
assign {pat1_rise1[3], pat1_rise1[2],
pat1_rise1[1], pat1_rise1[0]} = 4'h7;
assign {pat1_fall1[3], pat1_fall1[2],
pat1_fall1[1], pat1_fall1[0]} = 4'hE;
assign {pat1_rise2[3], pat1_rise2[2],
pat1_rise2[1], pat1_rise2[0]} = 4'hC;
assign {pat1_fall2[3], pat1_fall2[2],
pat1_fall2[1], pat1_fall2[0]} = 4'h9;
assign {pat1_rise3[3], pat1_rise3[2],
pat1_rise3[1], pat1_rise3[0]} = 4'h2;
assign {pat1_fall3[3], pat1_fall3[2],
pat1_fall3[1], pat1_fall3[0]} = 4'h4;
end else if (nCK_PER_CLK == 2) begin: gen_pat_div2
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign idel_pat0_rise0[3] = 2'b01;
assign idel_pat0_fall0[3] = 2'b00;
assign idel_pat0_rise1[3] = 2'b10;
assign idel_pat0_fall1[3] = 2'b11;
assign idel_pat0_rise0[2] = 2'b00;
assign idel_pat0_fall0[2] = 2'b10;
assign idel_pat0_rise1[2] = 2'b11;
assign idel_pat0_fall1[2] = 2'b10;
assign idel_pat0_rise0[1] = 2'b00;
assign idel_pat0_fall0[1] = 2'b11;
assign idel_pat0_rise1[1] = 2'b10;
assign idel_pat0_fall1[1] = 2'b01;
assign idel_pat0_rise0[0] = 2'b11;
assign idel_pat0_fall0[0] = 2'b10;
assign idel_pat0_rise1[0] = 2'b00;
assign idel_pat0_fall1[0] = 2'b01;
// Target pattern for "on-time write"
assign idel_pat1_rise0[3] = 2'b01;
assign idel_pat1_fall0[3] = 2'b11;
assign idel_pat1_rise1[3] = 2'b01;
assign idel_pat1_fall1[3] = 2'b00;
assign idel_pat1_rise0[2] = 2'b11;
assign idel_pat1_fall0[2] = 2'b01;
assign idel_pat1_rise1[2] = 2'b00;
assign idel_pat1_fall1[2] = 2'b10;
assign idel_pat1_rise0[1] = 2'b01;
assign idel_pat1_fall0[1] = 2'b00;
assign idel_pat1_rise1[1] = 2'b10;
assign idel_pat1_fall1[1] = 2'b11;
assign idel_pat1_rise0[0] = 2'b00;
assign idel_pat1_fall0[0] = 2'b10;
assign idel_pat1_rise1[0] = 2'b11;
assign idel_pat1_fall1[0] = 2'b10;
// Correct data valid window for "early write"
assign pat0_rise0[3] = 2'b00;
assign pat0_fall0[3] = 2'b10;
assign pat0_rise1[3] = 2'b11;
assign pat0_fall1[3] = 2'b10;
assign pat0_rise0[2] = 2'b10;
assign pat0_fall0[2] = 2'b11;
assign pat0_rise1[2] = 2'b10;
assign pat0_fall1[2] = 2'b00;
assign pat0_rise0[1] = 2'b11;
assign pat0_fall0[1] = 2'b10;
assign pat0_rise1[1] = 2'b01;
assign pat0_fall1[1] = 2'b00;
assign pat0_rise0[0] = 2'b10;
assign pat0_fall0[0] = 2'b00;
assign pat0_rise1[0] = 2'b01;
assign pat0_fall1[0] = 2'b11;
// Correct data valid window for "on-time write"
assign pat1_rise0[3] = 2'b11;
assign pat1_fall0[3] = 2'b01;
assign pat1_rise1[3] = 2'b00;
assign pat1_fall1[3] = 2'b10;
assign pat1_rise0[2] = 2'b01;
assign pat1_fall0[2] = 2'b00;
assign pat1_rise1[2] = 2'b10;
assign pat1_fall1[2] = 2'b11;
assign pat1_rise0[1] = 2'b00;
assign pat1_fall0[1] = 2'b10;
assign pat1_rise1[1] = 2'b11;
assign pat1_fall1[1] = 2'b10;
assign pat1_rise0[0] = 2'b10;
assign pat1_fall0[0] = 2'b11;
assign pat1_rise1[0] = 2'b10;
assign pat1_fall1[0] = 2'b00;
end
endgenerate
// Each bit of each byte is compared to expected pattern.
// This was done to prevent (and "drastically decrease") the chance that
// invalid data clocked in when the DQ bus is tri-state (along with a
// combination of the correct data) will resemble the expected data
// pattern. A better fix for this is to change the training pattern and/or
// make the pattern longer.
generate
genvar pt_i;
if (nCK_PER_CLK == 4) begin: gen_pat_match_div4
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat0_rise2[pt_i%4])
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat0_fall2[pt_i%4])
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat0_rise3[pt_i%4])
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat0_fall3[pt_i%4])
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat1_rise2[pt_i%4])
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat1_fall2[pt_i%4])
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat1_rise3[pt_i%4])
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat1_fall3[pt_i%4])
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat0_rise2[pt_i%4])
pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat0_fall2[pt_i%4])
pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat0_rise3[pt_i%4])
pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat0_fall3[pt_i%4])
pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat1_rise2[pt_i%4])
pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat1_fall2[pt_i%4])
pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat1_rise3[pt_i%4])
pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat1_fall3[pt_i%4])
pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_match_rise2_and_r <= #TCQ &idel_pat0_match_rise2_r;
idel_pat0_match_fall2_and_r <= #TCQ &idel_pat0_match_fall2_r;
idel_pat0_match_rise3_and_r <= #TCQ &idel_pat0_match_rise3_r;
idel_pat0_match_fall3_and_r <= #TCQ &idel_pat0_match_fall3_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r &&
idel_pat0_match_rise2_and_r &&
idel_pat0_match_fall2_and_r &&
idel_pat0_match_rise3_and_r &&
idel_pat0_match_fall3_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_match_rise2_and_r <= #TCQ &idel_pat1_match_rise2_r;
idel_pat1_match_fall2_and_r <= #TCQ &idel_pat1_match_fall2_r;
idel_pat1_match_rise3_and_r <= #TCQ &idel_pat1_match_rise3_r;
idel_pat1_match_fall3_and_r <= #TCQ &idel_pat1_match_fall3_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r &&
idel_pat1_match_rise2_and_r &&
idel_pat1_match_fall2_and_r &&
idel_pat1_match_rise3_and_r &&
idel_pat1_match_fall3_and_r);
end
always @(*)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_match_rise2_and_r <= #TCQ &pat0_match_rise2_r;
pat0_match_fall2_and_r <= #TCQ &pat0_match_fall2_r;
pat0_match_rise3_and_r <= #TCQ &pat0_match_rise3_r;
pat0_match_fall3_and_r <= #TCQ &pat0_match_fall3_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r &&
pat0_match_rise2_and_r &&
pat0_match_fall2_and_r &&
pat0_match_rise3_and_r &&
pat0_match_fall3_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_match_rise2_and_r <= #TCQ &pat1_match_rise2_r;
pat1_match_fall2_and_r <= #TCQ &pat1_match_fall2_r;
pat1_match_rise3_and_r <= #TCQ &pat1_match_rise3_r;
pat1_match_fall3_and_r <= #TCQ &pat1_match_fall3_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r &&
pat1_match_rise2_and_r &&
pat1_match_fall2_and_r &&
pat1_match_rise3_and_r &&
pat1_match_fall3_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r);
end
always @(posedge clk) begin
if (sr_valid_r2)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
end
//assign idel_pat_data_match = idel_pat0_data_match_r |
// idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end
endgenerate
always @(posedge clk) begin
rdlvl_stg1_start_r <= #TCQ rdlvl_stg1_start;
mpr_rdlvl_done_r1 <= #TCQ mpr_rdlvl_done_r;
mpr_rdlvl_done_r2 <= #TCQ mpr_rdlvl_done_r1;
mpr_rdlvl_start_r <= #TCQ mpr_rdlvl_start;
end
//***************************************************************************
// First stage calibration: Capture clock
//***************************************************************************
//*****************************************************************
// Keep track of how many samples have been written to shift registers
// Every time RD_SHIFT_LEN samples have been written, then we have a
// full read training pattern loaded into the sr_* registers. Then assert
// sr_valid_r to indicate that: (1) comparison between the sr_* and
// old_sr_* and prev_sr_* registers can take place, (2) transfer of
// the contents of sr_* to old_sr_* and prev_sr_* registers can also
// take place
//*****************************************************************
// verilint STARC-2.2.3.3 off
always @(posedge clk)
if (rst || (mpr_rdlvl_done_r && ~rdlvl_stg1_start)) begin
cnt_shift_r <= #TCQ 'b1;
sr_valid_r <= #TCQ 1'b0;
mpr_valid_r <= #TCQ 1'b0;
end else begin
if (mux_rd_valid_r && mpr_rdlvl_start && ~mpr_rdlvl_done_r) begin
if (cnt_shift_r == 'b0)
mpr_valid_r <= #TCQ 1'b1;
else begin
mpr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
mpr_valid_r <= #TCQ 1'b0;
if (mux_rd_valid_r && rdlvl_stg1_start) begin
if (cnt_shift_r == RD_SHIFT_LEN-1) begin
sr_valid_r <= #TCQ 1'b1;
cnt_shift_r <= #TCQ 'b0;
end else begin
sr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
// When the current mux_rd_* contents are not valid, then
// retain the current value of cnt_shift_r, and make sure
// that sr_valid_r = 0 to prevent any downstream loads or
// comparisons
sr_valid_r <= #TCQ 1'b0;
end
// verilint STARC-2.2.3.3 on
//*****************************************************************
// Logic to determine when either edge of the data eye encountered
// Pre- and post-IDELAY update data pattern is compared, if they
// differ, than an edge has been encountered. Currently no attempt
// made to determine if the data pattern itself is "correct", only
// whether it changes after incrementing the IDELAY (possible
// future enhancement)
//*****************************************************************
// One-way control for ensuring that state machine request to store
// current read data into OLD SR shift register only occurs on a
// valid clock cycle. The FSM provides a one-cycle request pulse.
// It is the responsibility of the FSM to wait the worst-case time
// before relying on any downstream results of this load.
always @(posedge clk)
if (rst)
store_sr_r <= #TCQ 1'b0;
else begin
if (store_sr_req_r)
store_sr_r <= #TCQ 1'b1;
else if ((sr_valid_r || mpr_valid_r) && store_sr_r)
store_sr_r <= #TCQ 1'b0;
end
// Transfer current data to old data, prior to incrementing delay
// Also store data from current sampling window - so that we can detect
// if the current delay tap yields data that is "jittery"
generate
if (nCK_PER_CLK == 4) begin: gen_old_sr_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
// Load last sample (i.e. from current sampling interval)
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
prev_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
prev_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
prev_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
prev_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
old_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
old_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
old_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
old_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_old_sr_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
end
end
end
endgenerate
//*******************************************************
// Match determination occurs over 3 cycles - pipelined for better timing
//*******************************************************
// Match valid with # of cycles of pipelining in match determination
always @(posedge clk) begin
sr_valid_r1 <= #TCQ sr_valid_r;
sr_valid_r2 <= #TCQ sr_valid_r1;
mpr_valid_r1 <= #TCQ mpr_valid_r;
mpr_valid_r2 <= #TCQ mpr_valid_r1;
end
generate
if (nCK_PER_CLK == 4) begin: gen_sr_match_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
// CYCLE1: Compare all bits in DQS grp, generate separate term for
// each bit over four bit times. For example, if there are 8-bits
// per DQS group, 32 terms are generated on cycle 1
// NOTE: Structure HDL such that X on data bus will result in a
// mismatch. This is required for memory models that can drive the
// bus with X's to model uncertainty regions (e.g. Denali)
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == old_sr_rise2_r[z]))
old_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise2_r[z] <= #TCQ old_sr_match_rise2_r[z];
else
old_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == old_sr_fall2_r[z]))
old_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall2_r[z] <= #TCQ old_sr_match_fall2_r[z];
else
old_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == old_sr_rise3_r[z]))
old_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise3_r[z] <= #TCQ old_sr_match_rise3_r[z];
else
old_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == old_sr_fall3_r[z]))
old_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall3_r[z] <= #TCQ old_sr_match_fall3_r[z];
else
old_sr_match_fall3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == prev_sr_rise2_r[z]))
prev_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise2_r[z] <= #TCQ prev_sr_match_rise2_r[z];
else
prev_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == prev_sr_fall2_r[z]))
prev_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall2_r[z] <= #TCQ prev_sr_match_fall2_r[z];
else
prev_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == prev_sr_rise3_r[z]))
prev_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise3_r[z] <= #TCQ prev_sr_match_rise3_r[z];
else
prev_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == prev_sr_fall3_r[z]))
prev_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall3_r[z] <= #TCQ prev_sr_match_fall3_r[z];
else
prev_sr_match_fall3_r[z] <= #TCQ 1'b0;
// CYCLE2: Combine all the comparisons for every 8 words (rise0,
// fall0,rise1, fall1) in the calibration sequence. Now we're down
// to DRAM_WIDTH terms
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z] &
old_sr_match_rise2_r[z] &
old_sr_match_fall2_r[z] &
old_sr_match_rise3_r[z] &
old_sr_match_fall3_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z] &
prev_sr_match_rise2_r[z] &
prev_sr_match_fall2_r[z] &
prev_sr_match_rise3_r[z] &
prev_sr_match_fall3_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end if (nCK_PER_CLK == 2) begin: gen_sr_match_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end
endgenerate
//***************************************************************************
// First stage calibration: DQS Capture
//***************************************************************************
//*******************************************************
// Counters for tracking # of samples compared
// For each comparision point (i.e. to determine if an edge has
// occurred after each IODELAY increment when read leveling),
// multiple samples are compared in order to average out the effects
// of jitter. If any one of these samples is different than the "old"
// sample corresponding to the previous IODELAY value, then an edge
// is declared to be detected.
//*******************************************************
// Two cascaded counters are used to keep track of # of samples compared,
// in order to make it easier to meeting timing on these paths. Once
// optimal sampling interval is determined, it may be possible to remove
// the second counter
always @(posedge clk)
samp_edge_cnt0_en_r <= #TCQ
(cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
// First counter counts # of samples compared
always @(posedge clk)
if (rst)
samp_edge_cnt0_r <= #TCQ 'b0;
else begin
if (!samp_edge_cnt0_en_r)
// Reset sample counter when not in any of the "sampling" states
samp_edge_cnt0_r <= #TCQ 'b0;
else if (sr_valid_r2 || mpr_valid_r2)
// Otherwise, count # of samples compared
samp_edge_cnt0_r <= #TCQ samp_edge_cnt0_r + 1;
end
// Counter #2 enable generation
always @(posedge clk)
if (rst)
samp_edge_cnt1_en_r <= #TCQ 1'b0;
else begin
// Assert pulse when correct number of samples compared
if ((samp_edge_cnt0_r == DETECT_EDGE_SAMPLE_CNT0) &&
(sr_valid_r2 || mpr_valid_r2))
samp_edge_cnt1_en_r <= #TCQ 1'b1;
else
samp_edge_cnt1_en_r <= #TCQ 1'b0;
end
// Counter #2
always @(posedge clk)
if (rst)
samp_edge_cnt1_r <= #TCQ 'b0;
else
if (!samp_edge_cnt0_en_r)
samp_edge_cnt1_r <= #TCQ 'b0;
else if (samp_edge_cnt1_en_r)
samp_edge_cnt1_r <= #TCQ samp_edge_cnt1_r + 1;
always @(posedge clk)
if (rst)
samp_cnt_done_r <= #TCQ 1'b0;
else begin
if (!samp_edge_cnt0_en_r)
samp_cnt_done_r <= #TCQ 'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (samp_edge_cnt0_r == SR_VALID_DELAY-1)
// For simulation only, stay in edge detection mode a minimum
// amount of time - just enough for two data compares to finish
samp_cnt_done_r <= #TCQ 1'b1;
end else begin
if (samp_edge_cnt1_r == DETECT_EDGE_SAMPLE_CNT1)
samp_cnt_done_r <= #TCQ 1'b1;
end
end
//*****************************************************************
// Logic to keep track of (on per-bit basis):
// 1. When a region of stability preceded by a known edge occurs
// 2. If for the current tap, the read data jitters
// 3. If an edge occured between the current and previous tap
// 4. When the current edge detection/sampling interval can end
// Essentially, these are a series of status bits - the stage 1
// calibration FSM monitors these to determine when an edge is
// found. Additional information is provided to help the FSM
// determine if a left or right edge has been found.
//****************************************************************
assign pb_detect_edge_setup
= (cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT);
assign pb_detect_edge
= (cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
generate
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_track_left_edge
always @(posedge clk) begin
if (pb_detect_edge_setup) begin
// Reset eye size, stable eye marker, and jitter marker before
// starting new edge detection iteration
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_found_edge_last_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_found_first_edge_r[z] <= #TCQ 1'b0;
end else if (pb_detect_edge) begin
// Save information on which DQ bits are already out of the
// data valid window - those DQ bits will later not have their
// IDELAY tap value incremented
pb_found_edge_last_r[z] <= #TCQ pb_found_edge_r[z];
if (!pb_detect_edge_done_r[z]) begin
if (samp_cnt_done_r) begin
// If we've reached end of sampling interval, no jitter on
// current tap has been found (although an edge could have
// been found between the current and previous taps), and
// the sampling interval is complete. Increment the stable
// eye counter if no edge found, and always clear the jitter
// flag in preparation for the next tap.
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
if (!pb_found_edge_r[z] && !pb_last_tap_jitter_r[z]) begin
// If the data was completely stable during this tap and
// no edge was found between this and the previous tap
// then increment the stable eye counter "as appropriate"
if (pb_cnt_eye_size_r[z] != MIN_EYE_SIZE-1)
pb_cnt_eye_size_r[z] <= #TCQ pb_cnt_eye_size_r[z] + 1;
else //if (pb_found_first_edge_r[z])
// We've reached minimum stable eye width
pb_found_stable_eye_r[z] <= #TCQ 1'b1;
end else begin
// Otherwise, an edge was found, either because of a
// difference between this and the previous tap's read
// data, and/or because the previous tap's data jittered
// (but not the current tap's data), then just set the
// edge found flag, and enable the stable eye counter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end
end else if (prev_sr_diff_r[z]) begin
// If we find that the current tap read data jitters, then
// set edge and jitter found flags, "enable" the eye size
// counter, and stop sampling interval for this bit
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b1;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end else if (old_sr_diff_r[z] || pb_last_tap_jitter_r[z]) begin
// If either an edge was found (i.e. difference between
// current tap and previous tap read data), or the previous
// tap exhibited jitter (which means by definition that the
// current tap cannot match the previous tap because the
// previous tap gave unstable data), then set the edge found
// flag, and "enable" eye size counter. But do not stop
// sampling interval - we still need to check if the current
// tap exhibits jitter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
end
end
end else begin
// Before every edge detection interval, reset "intra-tap" flags
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
end
end
end
endgenerate
// Combine the above per-bit status flags into combined terms when
// performing deskew on the aggregate data window
always @(posedge clk) begin
detect_edge_done_r <= #TCQ &pb_detect_edge_done_r;
found_edge_r <= #TCQ |pb_found_edge_r;
found_edge_all_r <= #TCQ &pb_found_edge_r;
found_stable_eye_r <= #TCQ &pb_found_stable_eye_r;
end
// last IODELAY "stable eye" indicator is updated only after
// detect_edge_done_r is asserted - so that when we do find the "right edge"
// of the data valid window, found_edge_r = 1, AND found_stable_eye_r = 1
// when detect_edge_done_r = 1 (otherwise, if found_stable_eye_r updates
// immediately, then it never possible to have found_stable_eye_r = 1
// when we detect an edge - and we'll never know whether we've found
// a "right edge")
always @(posedge clk)
if (pb_detect_edge_setup)
found_stable_eye_last_r <= #TCQ 1'b0;
else if (detect_edge_done_r)
found_stable_eye_last_r <= #TCQ found_stable_eye_r;
//*****************************************************************
// Keep track of DQ IDELAYE2 taps used
//*****************************************************************
// Added additional register stage to improve timing
always @(posedge clk)
if (rst)
idelay_tap_cnt_slice_r <= 5'h0;
else
idelay_tap_cnt_slice_r <= idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
always @(posedge clk)
if (rst || (SIM_CAL_OPTION == "SKIP_CAL")) begin //|| new_cnt_cpt_r
for (s = 0; s < RANKS; s = s + 1) begin
for (t = 0; t < DQS_WIDTH; t = t + 1) begin
idelay_tap_cnt_r[s][t] <= #TCQ idelaye2_init_val;
end
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (u = 0; u < RANKS; u = u + 1) begin
for (w = 0; w < DQS_WIDTH; w = w + 1) begin
if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] + 1;
else
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] - 1;
end
end
end
end else if ((rnk_cnt_r == RANKS-1) && (RANKS == 2) &&
rdlvl_rank_done_r && (cal1_state_r == CAL1_IDLE)) begin
for (f = 0; f < DQS_WIDTH; f = f + 1) begin
idelay_tap_cnt_r[rnk_cnt_r][f] <= #TCQ idelay_tap_cnt_r[(rnk_cnt_r-1)][f];
end
end else if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r + 5'h1;
else
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r - 5'h1;
end else if (idelay_ld)
idelay_tap_cnt_r[0][wrcal_cnt] <= #TCQ 5'b00000;
always @(posedge clk)
if (rst || new_cnt_cpt_r)
idelay_tap_limit_r <= #TCQ 1'b0;
else if (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_r] == 'd31)
idelay_tap_limit_r <= #TCQ 1'b1;
//*****************************************************************
// keep track of edge tap counts found, and current capture clock
// tap count
//*****************************************************************
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_cnt_cpt_r <= #TCQ 'b0;
else if (cal1_dlyce_cpt_r) begin
if (cal1_dlyinc_cpt_r)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r + 1;
else if (tap_cnt_cpt_r != 'd0)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r - 1;
end
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(cal1_state_r1 == CAL1_DQ_IDEL_TAP_INC) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_limit_cpt_r <= #TCQ 1'b0;
else if (tap_cnt_cpt_r == 6'd63)
tap_limit_cpt_r <= #TCQ 1'b1;
always @(posedge clk)
cal1_cnt_cpt_timing_r <= #TCQ cal1_cnt_cpt_r;
assign cal1_cnt_cpt_timing = {2'b00, cal1_cnt_cpt_r};
// Storing DQS tap values at the end of each DQS read leveling
always @(posedge clk) begin
if (rst) begin
for (a = 0; a < RANKS; a = a + 1) begin: rst_rdlvl_dqs_tap_count_loop
for (b = 0; b < DQS_WIDTH; b = b + 1)
rdlvl_dqs_tap_cnt_r[a][b] <= #TCQ 'b0;
end
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_NEXT_DQS)) begin
for (p = 0; p < RANKS; p = p +1) begin: rdlvl_dqs_tap_rank_cnt
for(q = 0; q < DQS_WIDTH; q = q +1) begin: rdlvl_dqs_tap_cnt
rdlvl_dqs_tap_cnt_r[p][q] <= #TCQ tap_cnt_cpt_r;
end
end
end else if (SIM_CAL_OPTION == "SKIP_CAL") begin
for (j = 0; j < RANKS; j = j +1) begin: rdlvl_dqs_tap_rnk_cnt
for(i = 0; i < DQS_WIDTH; i = i +1) begin: rdlvl_dqs_cnt
rdlvl_dqs_tap_cnt_r[j][i] <= #TCQ 6'd31;
end
end
end else if (cal1_state_r1 == CAL1_NEXT_DQS) begin
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing_r] <= #TCQ tap_cnt_cpt_r;
end
end
// Counter to track maximum DQ IODELAY tap usage during the per-bit
// deskew portion of stage 1 calibration
always @(posedge clk)
if (rst) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else
if (new_cnt_cpt_r) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else if (|cal1_dlyce_dq_r) begin
if (cal1_dlyinc_dq_r)
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r + 1;
else
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r - 1;
if (idel_tap_cnt_dq_pb_r == 31)
idel_tap_limit_dq_pb_r <= #TCQ 1'b1;
else
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end
//*****************************************************************
always @(posedge clk)
cal1_state_r1 <= #TCQ cal1_state_r;
always @(posedge clk)
if (rst) begin
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
cnt_idel_dec_cpt_r <= #TCQ 6'bxxxxxx;
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
right_edge_taps_r <= #TCQ 6'bxxxxxx;
first_edge_taps_r <= #TCQ 6'bxxxxxx;
new_cnt_cpt_r <= #TCQ 1'b0;
rdlvl_stg1_done <= #TCQ 1'b0;
rdlvl_stg1_err <= #TCQ 1'b0;
second_edge_taps_r <= #TCQ 6'bxxxxxx;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
rnk_cnt_r <= #TCQ 2'b00;
rdlvl_rank_done_r <= #TCQ 1'b0;
idel_dec_cnt <= #TCQ 'd0;
rdlvl_last_byte_done <= #TCQ 1'b0;
idel_pat_detect_valid_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
if (OCAL_EN == "ON")
mpr_rdlvl_done_r <= #TCQ 1'b0;
else
mpr_rdlvl_done_r <= #TCQ 1'b1;
mpr_dec_cpt_r <= #TCQ 1'b0;
end else begin
// default (inactive) states for all "pulse" outputs
// verilint STARC-2.2.3.3 off
cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
new_cnt_cpt_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
case (cal1_state_r)
CAL1_IDLE: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
if (mpr_rdlvl_start && ~mpr_rdlvl_start_r) begin
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
end else
if (rdlvl_stg1_start && ~rdlvl_stg1_start_r) begin
if (SIM_CAL_OPTION == "SKIP_CAL")
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
else if (SIM_CAL_OPTION == "FAST_CAL")
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
else begin
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
end
CAL1_MPR_NEW_DQS_WAIT: begin
cal1_prech_req_r <= #TCQ 1'b0;
if (!cal1_wait_r && mpr_valid_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
// Wait for the new DQS group to change
// also gives time for the read data IN_FIFO to
// output the updated data for the new DQS group
CAL1_NEW_DQS_WAIT: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
if (|pi_counter_read_val) begin //VK_REVIEW
mpr_dec_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
cnt_idel_dec_cpt_r <= #TCQ pi_counter_read_val;
end else if (!cal1_wait_r) begin
//if (!cal1_wait_r) begin
// Store "previous tap" read data. Technically there is no
// "previous" read data, since we are starting a new DQS
// group, so we'll never find an edge at tap 0 unless the
// data is fluctuating/jittering
store_sr_req_r <= #TCQ 1'b1;
// If per-bit deskew is disabled, then skip the first
// portion of stage 1 calibration
if (PER_BIT_DESKEW == "OFF")
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else if (PER_BIT_DESKEW == "ON")
cal1_state_r <= #TCQ CAL1_PB_STORE_FIRST_WAIT;
end
end
//*****************************************************************
// Per-bit deskew states
//*****************************************************************
// Wait state following storage of initial read data
CAL1_PB_STORE_FIRST_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
// Look for an edge on all DQ bits in current DQS group
CAL1_PB_DETECT_EDGE:
if (detect_edge_done_r) begin
if (found_stable_eye_r) begin
// If we've found the left edge for all bits (or more precisely,
// we've found the left edge, and then part of the stable
// window thereafter), then proceed to positioning the CPT clock
// right before the left margin
cnt_idel_dec_cpt_r <= #TCQ MIN_EYE_SIZE + 1;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT;
end else begin
// If we've reached the end of the sampling time, and haven't
// yet found the left margin of all the DQ bits, then:
if (!tap_limit_cpt_r) begin
// If we still have taps left to use, then store current value
// of read data, increment the capture clock, and continue to
// look for (left) edges
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT;
end else begin
// If we ran out of taps moving the capture clock, and we
// haven't finished edge detection, then reset the capture
// clock taps to 0 (gradually, one tap at a time...
// then exit the per-bit portion of the algorithm -
// i.e. proceed to adjust the capture clock and DQ IODELAYs as
cnt_idel_dec_cpt_r <= #TCQ 6'd63;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
end
end
// Increment delay for DQS
CAL1_PB_INC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT_WAIT;
end
// Wait for IODELAY for both capture and internal nodes within
// ISERDES to settle, before checking again for an edge
CAL1_PB_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
end
// We've found the left edges of the windows for all DQ bits
// (actually, we found it MIN_EYE_SIZE taps ago) Decrement capture
// clock IDELAY to position just outside left edge of data window
CAL1_PB_DEC_CPT_LEFT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
CAL1_PB_DEC_CPT_LEFT_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// If there is skew between individual DQ bits, then after we've
// positioned the CPT clock, we will be "in the window" for some
// DQ bits ("early" DQ bits), and "out of the window" for others
// ("late" DQ bits). Increase DQ taps until we are out of the
// window for all DQ bits
CAL1_PB_DETECT_EDGE_DQ:
if (detect_edge_done_r)
if (found_edge_all_r) begin
// We're out of the window for all DQ bits in this DQS group
// We're done with per-bit deskew for this group - now decr
// capture clock IODELAY tap count back to 0, and proceed
// with the rest of stage 1 calibration for this DQS group
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end else
if (!idel_tap_limit_dq_pb_r)
// If we still have DQ taps available for deskew, keep
// incrementing IODELAY tap count for the appropriate DQ bits
cal1_state_r <= #TCQ CAL1_PB_INC_DQ;
else begin
// Otherwise, stop immediately (we've done the best we can)
// and proceed with rest of stage 1 calibration
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
CAL1_PB_INC_DQ: begin
// Increment only those DQ for which an edge hasn't been found yet
cal1_dlyce_dq_r <= #TCQ ~pb_found_edge_last_r;
cal1_dlyinc_dq_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_DQ_WAIT;
end
CAL1_PB_INC_DQ_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// Decrement capture clock taps back to initial value
CAL1_PB_DEC_CPT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
// Wait for capture clock to settle, then proceed to rest of
// state 1 calibration for this DQS group
CAL1_PB_DEC_CPT_WAIT:
if (!cal1_wait_r) begin
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end
// When first starting calibration for a DQS group, save the
// current value of the read data shift register, and use this
// as a reference. Note that for the first iteration of the
// edge detection loop, we will in effect be checking for an edge
// at IODELAY taps = 0 - normally, we are comparing the read data
// for IODELAY taps = N, with the read data for IODELAY taps = N-1
// An edge can only be found at IODELAY taps = 0 if the read data
// is changing during this time (possible due to jitter)
CAL1_STORE_FIRST_WAIT: begin
mpr_dec_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
CAL1_VALID_WAIT: begin
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
CAL1_MPR_PAT_DETECT: begin
// MPR read leveling for centering DQS in valid window before
// OCLKDELAYED calibration begins in order to eliminate read issues
if (idel_pat_detect_valid_r == 1'b0) begin
cal1_state_r <= #TCQ CAL1_VALID_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b1;
end else if (idel_pat_detect_valid_r && idel_mpr_pat_detect_r) begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 'd0;
end else if (!idelay_tap_limit_r)
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
else
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
CAL1_PAT_DETECT: begin
// All DQ bits associated with a DQS are pushed to the right one IDELAY
// tap at a time until first rising DQS is in the tri-state region
// before first rising edge window.
// The detect_edge_done_r condition included to support averaging
// during IDELAY tap increments
if (detect_edge_done_r) begin
if (idel_pat_data_match) begin
case (idelay_adj)
2'b01: begin
cal1_state_r <= CAL1_DQ_IDEL_TAP_INC;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b1;
end
2'b10: begin //DEC by 1
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC ;
idel_dec_cnt <= #TCQ 1'b1;
idel_adj_inc <= #TCQ 1'b0;
end
default: begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
end
endcase
end else if (!idelay_tap_limit_r) begin
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
end else begin
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
end
end
// Increment IDELAY tap by 1 for DQ bits in the byte being calibrated
// until left edge of valid window detected
CAL1_DQ_IDEL_TAP_INC: begin
cal1_dq_idel_ce <= #TCQ 1'b1;
cal1_dq_idel_inc <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b0;
end
CAL1_DQ_IDEL_TAP_INC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
idel_adj_inc <= #TCQ 1'b0;
if (idel_adj_inc)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
else if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
else
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
end
// Decrement by 2 IDELAY taps once idel_pat_data_match detected
CAL1_DQ_IDEL_TAP_DEC: begin
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC_WAIT;
if (idel_dec_cnt >= 'd0)
cal1_dq_idel_ce <= #TCQ 1'b1;
else
cal1_dq_idel_ce <= #TCQ 1'b0;
if (idel_dec_cnt > 'd0)
idel_dec_cnt <= #TCQ idel_dec_cnt - 1;
else
idel_dec_cnt <= #TCQ idel_dec_cnt;
end
CAL1_DQ_IDEL_TAP_DEC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
if ((idel_dec_cnt > 'd0) || (pi_rdval_cnt > 'd0))
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
else if (mpr_dec_cpt_r)
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
end
// Check for presence of data eye edge. During this state, we
// sample the read data multiple times, and look for changes
// in the read data, specifically:
// 1. A change in the read data compared with the value of
// read data from the previous delay tap. This indicates
// that the most recent tap delay increment has moved us
// into either a new window, or moved/kept us in the
// transition/jitter region between windows. Note that this
// condition only needs to be checked for once, and for
// logistical purposes, we check this soon after entering
// this state (see comment in CAL1_DETECT_EDGE below for
// why this is done)
// 2. A change in the read data while we are in this state
// (i.e. in the absence of a tap delay increment). This
// indicates that we're close enough to a window edge that
// jitter will cause the read data to change even in the
// absence of a tap delay change
CAL1_DETECT_EDGE: begin
// Essentially wait for the first comparision to finish, then
// store current data into "old" data register. This store
// happens now, rather than later (e.g. when we've have already
// left this state) in order to avoid the situation the data that
// is stored as "old" data has not been used in an "active
// comparison" - i.e. data is stored after the last comparison
// of this state. In this case, we can miss an edge if the
// following sequence occurs:
// 1. Comparison completes in this state - no edge found
// 2. "Momentary jitter" occurs which "pushes" the data out the
// equivalent of one delay tap
// 3. We store this jittered data as the "old" data
// 4. "Jitter" no longer present
// 5. We increment the delay tap by one
// 6. Now we compare the current with the "old" data - they're
// the same, and no edge is detected
// NOTE: Given the large # of comparisons done in this state, it's
// highly unlikely the above sequence will occur in actual H/W
// Wait for the first load of read data into the comparison
// shift register to finish, then load the current read data
// into the "old" data register. This allows us to do one
// initial comparision between the current read data, and
// stored data corresponding to the previous delay tap
idel_pat_detect_valid_r <= #TCQ 1'b0;
if (!store_sr_req_pulsed_r) begin
// Pulse store_sr_req_r only once in this state
store_sr_req_r <= #TCQ 1'b1;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end else begin
store_sr_req_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end
// Continue to sample read data and look for edges until the
// appropriate time interval (shorter for simulation-only,
// much, much longer for actual h/w) has elapsed
if (detect_edge_done_r) begin
if (tap_limit_cpt_r)
// Only one edge detected and ran out of taps since only one
// bit time worth of taps available for window detection. This
// can happen if at tap 0 DQS is in previous window which results
// in only left edge being detected. Or at tap 0 DQS is in the
// current window resulting in only right edge being detected.
// Depending on the frequency this case can also happen if at
// tap 0 DQS is in the left noise region resulting in only left
// edge being detected.
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
else if (found_edge_r) begin
// Sticky bit - asserted after we encounter an edge, although
// the current edge may not be considered the "first edge" this
// just means we found at least one edge
found_first_edge_r <= #TCQ 1'b1;
// Only the right edge of the data valid window is found
// Record the inner right edge tap value
if (!found_first_edge_r && found_stable_eye_last_r) begin
if (tap_cnt_cpt_r == 'd0)
right_edge_taps_r <= #TCQ 'd0;
else
right_edge_taps_r <= #TCQ tap_cnt_cpt_r;
end
// Both edges of data valid window found:
// If we've found a second edge after a region of stability
// then we must have just passed the second ("right" edge of
// the window. Record this second_edge_taps = current tap-1,
// because we're one past the actual second edge tap, where
// the edge taps represent the extremes of the data valid
// window (i.e. smallest & largest taps where data still valid
if (found_first_edge_r && found_stable_eye_last_r) begin
found_second_edge_r <= #TCQ 1'b1;
second_edge_taps_r <= #TCQ tap_cnt_cpt_r - 1;
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
end else begin
// Otherwise, an edge was found (just not the "second" edge)
// Assuming DQS is in the correct window at tap 0 of Phaser IN
// fine tap. The first edge found is the right edge of the valid
// window and is the beginning of the jitter region hence done!
first_edge_taps_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end else
// Otherwise, if we haven't found an edge....
// If we still have taps left to use, then keep incrementing
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end
// Increment Phaser_IN delay for DQS
CAL1_IDEL_INC_CPT: begin
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT_WAIT;
if (~tap_limit_cpt_r) begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
end else begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
end
end
// Wait for Phaser_In to settle, before checking again for an edge
CAL1_IDEL_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
// Calculate final value of Phaser_IN taps. At this point, one or both
// edges of data eye have been found, and/or all taps have been
// exhausted looking for the edges
// NOTE: We're calculating the amount to decrement by, not the
// absolute setting for DQS.
CAL1_CALC_IDEL: begin
// CASE1: If 2 edges found.
if (found_second_edge_r)
cnt_idel_dec_cpt_r
<= #TCQ ((second_edge_taps_r -
first_edge_taps_r)>>1) + 1;
else if (right_edge_taps_r > 6'd0)
// Only right edge detected
// right_edge_taps_r is the inner right edge tap value
// hence used for calculation
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r - (right_edge_taps_r>>1));
else if (found_first_edge_r)
// Only left edge detected
cnt_idel_dec_cpt_r
<= #TCQ ((tap_cnt_cpt_r - first_edge_taps_r)>>1);
else
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r>>1);
// Now use the value we just calculated to decrement CPT taps
// to the desired calibration point
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// decrement capture clock for final adjustment - center
// capture clock in middle of data eye. This adjustment will occur
// only when both the edges are found usign CPT taps. Must do this
// incrementally to avoid clock glitching (since CPT drives clock
// divider within each ISERDES)
CAL1_IDEL_DEC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// once adjustment is complete, we're done with calibration for
// this DQS, repeat for next DQS
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
if (cnt_idel_dec_cpt_r == 6'b000001) begin
if (mpr_dec_cpt_r) begin
if (|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) begin
idel_dec_cnt <= #TCQ idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
end else
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end else
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
end else
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT_WAIT;
end
CAL1_IDEL_DEC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// Determine whether we're done, or have more DQS's to calibrate
// Also request precharge after every byte, as appropriate
CAL1_NEXT_DQS: begin
//if (mpr_rdlvl_done_r || (DRAM_TYPE == "DDR2"))
cal1_prech_req_r <= #TCQ 1'b1;
//else
// cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// Prepare for another iteration with next DQS group
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
first_edge_taps_r <= #TCQ 'd0;
second_edge_taps_r <= #TCQ 'd0;
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(cal1_cnt_cpt_r >= DQS_WIDTH-1)) begin
if (mpr_rdlvl_done_r) begin
rdlvl_last_byte_done <= #TCQ 1'b1;
mpr_last_byte_done <= #TCQ 1'b0;
end else begin
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b1;
end
end
// Wait until precharge that occurs in between calibration of
// DQS groups is finished
if (prech_done) begin // || (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))) begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
//rdlvl_rank_done_r <= #TCQ 1'b1;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DONE; //CAL1_REGL_LOAD;
end else if (cal1_cnt_cpt_r >= DQS_WIDTH-1) begin
if (~mpr_rdlvl_done_r) begin
mpr_rank_done_r <= #TCQ 1'b1;
// if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_DONE;
cal1_cnt_cpt_r <= #TCQ 'b0;
// end else begin
// // Process DQS groups in next rank
// rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
// new_cnt_cpt_r <= #TCQ 1'b1;
// cal1_cnt_cpt_r <= #TCQ 'b0;
// cal1_state_r <= #TCQ CAL1_IDLE;
// end
end else begin
// All DQS groups in a rank done
rdlvl_rank_done_r <= #TCQ 1'b1;
if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end else begin
// Process DQS groups in next rank
rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end
end
end else begin
// Process next DQS group
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ cal1_cnt_cpt_r + 1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_PREWAIT;
end
end
end
CAL1_NEW_DQS_PREWAIT: begin
if (!cal1_wait_r) begin
if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
else
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
// Load rank registers in Phaser_IN
CAL1_REGL_LOAD: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_cnt_cpt_r <= #TCQ 'b0;
rnk_cnt_r <= #TCQ 2'b00;
if ((regl_rank_cnt == RANKS-1) &&
((regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1))) begin
cal1_state_r <= #TCQ CAL1_DONE;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
end else
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end
CAL1_RDLVL_ERR: begin
rdlvl_stg1_err <= #TCQ 1'b1;
end
// Done with this stage of calibration
// if used, allow DEBUG_PORT to control taps
CAL1_DONE: begin
mpr_rdlvl_done_r <= #TCQ 1'b1;
cal1_prech_req_r <= #TCQ 1'b0;
if (~mpr_rdlvl_done_r && (OCAL_EN=="ON") && (DRAM_TYPE == "DDR3")) begin
rdlvl_stg1_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end else
rdlvl_stg1_done <= #TCQ 1'b1;
end
endcase
end
// verilint STARC-2.2.3.3 on
endmodule
|
module mig_7series_v2_3_ddr_phy_rdlvl #
(
parameter TCQ = 100, // clk->out delay (sim only)
parameter nCK_PER_CLK = 2, // # of memory clocks per CLK
parameter CLK_PERIOD = 3333, // Internal clock period (in ps)
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter RANKS = 1, // # of DRAM ranks
parameter PER_BIT_DESKEW = "ON", // Enable per-bit DQ deskew
parameter SIM_CAL_OPTION = "NONE", // Skip various calibration steps
parameter DEBUG_PORT = "OFF", // Enable debug port
parameter DRAM_TYPE = "DDR3", // Memory I/F type: "DDR3", "DDR2"
parameter OCAL_EN = "ON",
parameter IDELAY_ADJ = "ON"
)
(
input clk,
input rst,
// Calibration status, control signals
input mpr_rdlvl_start,
output mpr_rdlvl_done,
output reg mpr_last_byte_done,
output mpr_rnk_done,
input rdlvl_stg1_start,
output reg rdlvl_stg1_done /* synthesis syn_maxfan = 30 */,
output rdlvl_stg1_rnk_done,
output reg rdlvl_stg1_err,
output mpr_rdlvl_err,
output rdlvl_err,
output reg rdlvl_prech_req,
output reg rdlvl_last_byte_done,
output reg rdlvl_assrt_common,
input prech_done,
input phy_if_empty,
input [4:0] idelaye2_init_val,
// Captured data in fabric clock domain
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data,
// Decrement initial Phaser_IN Fine tap delay
input dqs_po_dec_done,
input [5:0] pi_counter_read_val,
// Stage 1 calibration outputs
output reg pi_fine_dly_dec_done,
output reg pi_en_stg2_f,
output reg pi_stg2_f_incdec,
output reg pi_stg2_load,
output reg [5:0] pi_stg2_reg_l,
output [DQS_CNT_WIDTH:0] pi_stg2_rdlvl_cnt,
// To DQ IDELAY required to find left edge of
// valid window
output idelay_ce,
output idelay_inc,
input idelay_ld,
input [DQS_CNT_WIDTH:0] wrcal_cnt,
// Only output if Per-bit de-skew enabled
output reg [5*RANKS*DQ_WIDTH-1:0] dlyval_dq,
// Debug Port
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt,
output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt,
input dbg_idel_up_all,
input dbg_idel_down_all,
input dbg_idel_up_cpt,
input dbg_idel_down_cpt,
input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt,
input dbg_sel_all_idel_cpt,
output [255:0] dbg_phy_rdlvl
);
// minimum time (in IDELAY taps) for which capture data must be stable for
// algorithm to consider a valid data eye to be found. The read leveling
// logic will ignore any window found smaller than this value. Limitations
// on how small this number can be is determined by: (1) the algorithmic
// limitation of how many taps wide the data eye can be (3 taps), and (2)
// how wide regions of "instability" that occur around the edges of the
// read valid window can be (i.e. need to be able to filter out "false"
// windows that occur for a short # of taps around the edges of the true
// data window, although with multi-sampling during read leveling, this is
// not as much a concern) - the larger the value, the more protection
// against "false" windows
localparam MIN_EYE_SIZE = 16;
// Length of calibration sequence (in # of words)
localparam CAL_PAT_LEN = 8;
// Read data shift register length
localparam RD_SHIFT_LEN = CAL_PAT_LEN / (2*nCK_PER_CLK);
// # of cycles required to perform read data shift register compare
// This is defined as from the cycle the new data is loaded until
// signal found_edge_r is valid
localparam RD_SHIFT_COMP_DELAY = 5;
// worst-case # of cycles to wait to ensure that both the SR and
// PREV_SR shift registers have valid data, and that the comparison
// of the two shift register values is valid. The "+1" at the end of
// this equation is a fudge factor, I freely admit that
localparam SR_VALID_DELAY = (2 * RD_SHIFT_LEN) + RD_SHIFT_COMP_DELAY + 1;
// # of clock cycles to wait after changing tap value or read data MUX
// to allow: (1) tap chain to settle, (2) for delayed input to propagate
// thru ISERDES, (3) for the read data comparison logic to have time to
// output the comparison of two consecutive samples of the settled read data
// The minimum delay is 16 cycles, which should be good enough to handle all
// three of the above conditions for the simulation-only case with a short
// training pattern. For H/W (or for simulation with longer training
// pattern), it will take longer to store and compare two consecutive
// samples, and the value of this parameter will reflect that
localparam PIPE_WAIT_CNT = (SR_VALID_DELAY < 8) ? 16 : (SR_VALID_DELAY + 8);
// # of read data samples to examine when detecting whether an edge has
// occured during stage 1 calibration. Width of local param must be
// changed as appropriate. Note that there are two counters used, each
// counter can be changed independently of the other - they are used in
// cascade to create a larger counter
localparam [11:0] DETECT_EDGE_SAMPLE_CNT0 = 12'h001; //12'hFFF;
localparam [11:0] DETECT_EDGE_SAMPLE_CNT1 = 12'h001; // 12'h1FF Must be > 0
localparam [5:0] CAL1_IDLE = 6'h00;
localparam [5:0] CAL1_NEW_DQS_WAIT = 6'h01;
localparam [5:0] CAL1_STORE_FIRST_WAIT = 6'h02;
localparam [5:0] CAL1_PAT_DETECT = 6'h03;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC = 6'h04;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC_WAIT = 6'h05;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC = 6'h06;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC_WAIT = 6'h07;
localparam [5:0] CAL1_DETECT_EDGE = 6'h08;
localparam [5:0] CAL1_IDEL_INC_CPT = 6'h09;
localparam [5:0] CAL1_IDEL_INC_CPT_WAIT = 6'h0A;
localparam [5:0] CAL1_CALC_IDEL = 6'h0B;
localparam [5:0] CAL1_IDEL_DEC_CPT = 6'h0C;
localparam [5:0] CAL1_IDEL_DEC_CPT_WAIT = 6'h0D;
localparam [5:0] CAL1_NEXT_DQS = 6'h0E;
localparam [5:0] CAL1_DONE = 6'h0F;
localparam [5:0] CAL1_PB_STORE_FIRST_WAIT = 6'h10;
localparam [5:0] CAL1_PB_DETECT_EDGE = 6'h11;
localparam [5:0] CAL1_PB_INC_CPT = 6'h12;
localparam [5:0] CAL1_PB_INC_CPT_WAIT = 6'h13;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT = 6'h14;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT_WAIT = 6'h15;
localparam [5:0] CAL1_PB_DETECT_EDGE_DQ = 6'h16;
localparam [5:0] CAL1_PB_INC_DQ = 6'h17;
localparam [5:0] CAL1_PB_INC_DQ_WAIT = 6'h18;
localparam [5:0] CAL1_PB_DEC_CPT = 6'h19;
localparam [5:0] CAL1_PB_DEC_CPT_WAIT = 6'h1A;
localparam [5:0] CAL1_REGL_LOAD = 6'h1B;
localparam [5:0] CAL1_RDLVL_ERR = 6'h1C;
localparam [5:0] CAL1_MPR_NEW_DQS_WAIT = 6'h1D;
localparam [5:0] CAL1_VALID_WAIT = 6'h1E;
localparam [5:0] CAL1_MPR_PAT_DETECT = 6'h1F;
localparam [5:0] CAL1_NEW_DQS_PREWAIT = 6'h20;
integer a;
integer b;
integer d;
integer e;
integer f;
integer h;
integer g;
integer i;
integer j;
integer k;
integer l;
integer m;
integer n;
integer r;
integer p;
integer q;
integer s;
integer t;
integer u;
integer w;
integer ce_i;
integer ce_rnk_i;
integer aa;
integer bb;
integer cc;
integer dd;
genvar x;
genvar z;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_r;
wire [DQS_CNT_WIDTH+2:0]cal1_cnt_cpt_timing;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_timing_r;
reg cal1_dq_idel_ce;
reg cal1_dq_idel_inc;
reg cal1_dlyce_cpt_r;
reg cal1_dlyinc_cpt_r;
reg cal1_dlyce_dq_r;
reg cal1_dlyinc_dq_r;
reg cal1_wait_cnt_en_r;
reg [4:0] cal1_wait_cnt_r;
reg cal1_wait_r;
reg [DQ_WIDTH-1:0] dlyce_dq_r;
reg dlyinc_dq_r;
reg [4:0] dlyval_dq_reg_r [0:RANKS-1][0:DQ_WIDTH-1];
reg cal1_prech_req_r;
reg [5:0] cal1_state_r;
reg [5:0] cal1_state_r1;
reg [5:0] cnt_idel_dec_cpt_r;
reg [3:0] cnt_shift_r;
reg detect_edge_done_r;
reg [5:0] right_edge_taps_r;
reg [5:0] first_edge_taps_r;
reg found_edge_r;
reg found_first_edge_r;
reg found_second_edge_r;
reg found_stable_eye_r;
reg found_stable_eye_last_r;
reg found_edge_all_r;
reg [5:0] tap_cnt_cpt_r;
reg tap_limit_cpt_r;
reg [4:0] idel_tap_cnt_dq_pb_r;
reg idel_tap_limit_dq_pb_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall0_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall1_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise0_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise1_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall2_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall3_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise2_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise3_r;
reg mux_rd_valid_r;
reg new_cnt_cpt_r;
reg [RD_SHIFT_LEN-1:0] old_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] old_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise3_r;
reg [4:0] pb_cnt_eye_size_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] pb_detect_edge_done_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_last_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_first_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_stable_eye_r;
reg [DRAM_WIDTH-1:0] pb_last_tap_jitter_r;
reg pi_en_stg2_f_timing;
reg pi_stg2_f_incdec_timing;
reg pi_stg2_load_timing;
reg [5:0] pi_stg2_reg_l_timing;
reg [DRAM_WIDTH-1:0] prev_sr_diff_r;
reg [RD_SHIFT_LEN-1:0] prev_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] prev_sr_match_cyc2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise3_r;
wire [DQ_WIDTH-1:0] rd_data_rise0;
wire [DQ_WIDTH-1:0] rd_data_fall0;
wire [DQ_WIDTH-1:0] rd_data_rise1;
wire [DQ_WIDTH-1:0] rd_data_fall1;
wire [DQ_WIDTH-1:0] rd_data_rise2;
wire [DQ_WIDTH-1:0] rd_data_fall2;
wire [DQ_WIDTH-1:0] rd_data_rise3;
wire [DQ_WIDTH-1:0] rd_data_fall3;
reg samp_cnt_done_r;
reg samp_edge_cnt0_en_r;
reg [11:0] samp_edge_cnt0_r;
reg samp_edge_cnt1_en_r;
reg [11:0] samp_edge_cnt1_r;
reg [DQS_CNT_WIDTH:0] rd_mux_sel_r;
reg [5:0] second_edge_taps_r;
reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0];
reg store_sr_r;
reg store_sr_req_pulsed_r;
reg store_sr_req_r;
reg sr_valid_r;
reg sr_valid_r1;
reg sr_valid_r2;
reg [DRAM_WIDTH-1:0] old_sr_diff_r;
reg [DRAM_WIDTH-1:0] old_sr_match_cyc2_r;
reg pat0_data_match_r;
reg pat1_data_match_r;
wire pat_data_match_r;
wire [RD_SHIFT_LEN-1:0] pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] pat0_match_fall0_r;
reg pat0_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall1_r;
reg pat0_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall2_r;
reg pat0_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall3_r;
reg pat0_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise0_r;
reg pat0_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise1_r;
reg pat0_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise2_r;
reg pat0_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise3_r;
reg pat0_match_rise3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall0_r;
reg pat1_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall1_r;
reg pat1_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall2_r;
reg pat1_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall3_r;
reg pat1_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise0_r;
reg pat1_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise1_r;
reg pat1_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise2_r;
reg pat1_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise3_r;
reg pat1_match_rise3_and_r;
reg [4:0] idelay_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [5*DQS_WIDTH*RANKS-1:0] idelay_tap_cnt_w;
reg [4:0] idelay_tap_cnt_slice_r;
reg idelay_tap_limit_r;
wire [RD_SHIFT_LEN-1:0] pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall3_r;
reg idel_pat0_match_rise0_and_r;
reg idel_pat0_match_fall0_and_r;
reg idel_pat0_match_rise1_and_r;
reg idel_pat0_match_fall1_and_r;
reg idel_pat0_match_rise2_and_r;
reg idel_pat0_match_fall2_and_r;
reg idel_pat0_match_rise3_and_r;
reg idel_pat0_match_fall3_and_r;
reg idel_pat1_match_rise0_and_r;
reg idel_pat1_match_fall0_and_r;
reg idel_pat1_match_rise1_and_r;
reg idel_pat1_match_fall1_and_r;
reg idel_pat1_match_rise2_and_r;
reg idel_pat1_match_fall2_and_r;
reg idel_pat1_match_rise3_and_r;
reg idel_pat1_match_fall3_and_r;
reg idel_pat0_data_match_r;
reg idel_pat1_data_match_r;
reg idel_pat_data_match;
reg idel_pat_data_match_r;
reg [4:0] idel_dec_cnt;
reg [5:0] rdlvl_dqs_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [1:0] rnk_cnt_r;
reg rdlvl_rank_done_r;
reg [3:0] done_cnt;
reg [1:0] regl_rank_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt_r;
wire [DQS_CNT_WIDTH+2:0]regl_dqs_cnt_timing;
reg regl_rank_done_r;
reg rdlvl_stg1_start_r;
reg dqs_po_dec_done_r1;
reg dqs_po_dec_done_r2;
reg fine_dly_dec_done_r1;
reg fine_dly_dec_done_r2;
reg [3:0] wait_cnt_r;
reg [5:0] pi_rdval_cnt;
reg pi_cnt_dec;
reg mpr_valid_r;
reg mpr_valid_r1;
reg mpr_valid_r2;
reg mpr_rd_rise0_prev_r;
reg mpr_rd_fall0_prev_r;
reg mpr_rd_rise1_prev_r;
reg mpr_rd_fall1_prev_r;
reg mpr_rd_rise2_prev_r;
reg mpr_rd_fall2_prev_r;
reg mpr_rd_rise3_prev_r;
reg mpr_rd_fall3_prev_r;
reg mpr_rdlvl_done_r;
reg mpr_rdlvl_done_r1;
reg mpr_rdlvl_done_r2;
reg mpr_rdlvl_start_r;
reg mpr_rank_done_r;
reg [2:0] stable_idel_cnt;
reg inhibit_edge_detect_r;
reg idel_pat_detect_valid_r;
reg idel_mpr_pat_detect_r;
reg mpr_pat_detect_r;
reg mpr_dec_cpt_r;
reg idel_adj_inc; //IDELAY adjustment
wire [1:0] idelay_adj;
wire pb_detect_edge_setup;
wire pb_detect_edge;
// Debug
reg [6*DQS_WIDTH-1:0] dbg_cpt_first_edge_taps;
reg [6*DQS_WIDTH-1:0] dbg_cpt_second_edge_taps;
reg [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt_w;
//IDELAY adjustment setting for -1
//2'b10 : IDELAY - 1
//2'b01 : IDELAY + 1
//2'b00 : No IDELAY adjustment
assign idelay_adj = (IDELAY_ADJ == "ON") ? 2'b10: 2'b00;
//***************************************************************************
// Debug
//***************************************************************************
always @(*) begin
for (d = 0; d < RANKS; d = d + 1) begin
for (e = 0; e < DQS_WIDTH; e = e + 1) begin
idelay_tap_cnt_w[(5*e+5*DQS_WIDTH*d)+:5] = idelay_tap_cnt_r[d][e];
dbg_cpt_tap_cnt_w[(6*e+6*DQS_WIDTH*d)+:6] = rdlvl_dqs_tap_cnt_r[d][e];
end
end
end
assign mpr_rdlvl_err = rdlvl_stg1_err & (!mpr_rdlvl_done);
assign rdlvl_err = rdlvl_stg1_err & (mpr_rdlvl_done);
assign dbg_phy_rdlvl[0] = rdlvl_stg1_start;
assign dbg_phy_rdlvl[1] = pat_data_match_r;
assign dbg_phy_rdlvl[2] = mux_rd_valid_r;
assign dbg_phy_rdlvl[3] = idelay_tap_limit_r;
assign dbg_phy_rdlvl[8:4] = 'b0;
assign dbg_phy_rdlvl[14:9] = cal1_state_r[5:0];
assign dbg_phy_rdlvl[20:15] = cnt_idel_dec_cpt_r;
assign dbg_phy_rdlvl[21] = found_first_edge_r;
assign dbg_phy_rdlvl[22] = found_second_edge_r;
assign dbg_phy_rdlvl[23] = found_edge_r;
assign dbg_phy_rdlvl[24] = store_sr_r;
// [40:25] previously used for sr, old_sr shift registers. If connecting
// these signals again, don't forget to parameterize based on RD_SHIFT_LEN
assign dbg_phy_rdlvl[40:25] = 'b0;
assign dbg_phy_rdlvl[41] = sr_valid_r;
assign dbg_phy_rdlvl[42] = found_stable_eye_r;
assign dbg_phy_rdlvl[48:43] = tap_cnt_cpt_r;
assign dbg_phy_rdlvl[54:49] = first_edge_taps_r;
assign dbg_phy_rdlvl[60:55] = second_edge_taps_r;
assign dbg_phy_rdlvl[64:61] = cal1_cnt_cpt_timing_r;
assign dbg_phy_rdlvl[65] = cal1_dlyce_cpt_r;
assign dbg_phy_rdlvl[66] = cal1_dlyinc_cpt_r;
assign dbg_phy_rdlvl[67] = found_edge_r;
assign dbg_phy_rdlvl[68] = found_first_edge_r;
assign dbg_phy_rdlvl[73:69] = 'b0;
assign dbg_phy_rdlvl[74] = idel_pat_data_match;
assign dbg_phy_rdlvl[75] = idel_pat0_data_match_r;
assign dbg_phy_rdlvl[76] = idel_pat1_data_match_r;
assign dbg_phy_rdlvl[77] = pat0_data_match_r;
assign dbg_phy_rdlvl[78] = pat1_data_match_r;
assign dbg_phy_rdlvl[79+:5*DQS_WIDTH*RANKS] = idelay_tap_cnt_w;
assign dbg_phy_rdlvl[170+:8] = mux_rd_rise0_r;
assign dbg_phy_rdlvl[178+:8] = mux_rd_fall0_r;
assign dbg_phy_rdlvl[186+:8] = mux_rd_rise1_r;
assign dbg_phy_rdlvl[194+:8] = mux_rd_fall1_r;
assign dbg_phy_rdlvl[202+:8] = mux_rd_rise2_r;
assign dbg_phy_rdlvl[210+:8] = mux_rd_fall2_r;
assign dbg_phy_rdlvl[218+:8] = mux_rd_rise3_r;
assign dbg_phy_rdlvl[226+:8] = mux_rd_fall3_r;
//***************************************************************************
// Debug output
//***************************************************************************
// CPT taps
assign dbg_cpt_first_edge_cnt = dbg_cpt_first_edge_taps;
assign dbg_cpt_second_edge_cnt = dbg_cpt_second_edge_taps;
assign dbg_cpt_tap_cnt = dbg_cpt_tap_cnt_w;
assign dbg_dq_idelay_tap_cnt = idelay_tap_cnt_w;
// Record first and second edges found during CPT calibration
generate
always @(posedge clk)
if (rst) begin
dbg_cpt_first_edge_taps <= #TCQ 'b0;
dbg_cpt_second_edge_taps <= #TCQ 'b0;
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_CALC_IDEL)) begin
//for (ce_rnk_i = 0; ce_rnk_i < RANKS; ce_rnk_i = ce_rnk_i + 1) begin: gen_dbg_cpt_rnk
for (ce_i = 0; ce_i < DQS_WIDTH; ce_i = ce_i + 1) begin: gen_dbg_cpt_edge
if (found_first_edge_r)
dbg_cpt_first_edge_taps[(6*ce_i)+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[(6*ce_i)+:6]
<= #TCQ second_edge_taps_r;
end
//end
end else if (cal1_state_r == CAL1_CALC_IDEL) begin
// Record tap counts of first and second edge edges during
// CPT calibration for each DQS group. If neither edge has
// been found, then those taps will remain 0
if (found_first_edge_r)
dbg_cpt_first_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ second_edge_taps_r;
end
endgenerate
assign rdlvl_stg1_rnk_done = rdlvl_rank_done_r;// || regl_rank_done_r;
assign mpr_rnk_done = mpr_rank_done_r;
assign mpr_rdlvl_done = ((DRAM_TYPE == "DDR3") && (OCAL_EN == "ON")) ? //&& (SIM_CAL_OPTION == "NONE")
mpr_rdlvl_done_r : 1'b1;
//**************************************************************************
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
// coarse delay
//**************************************************************************
assign pi_stg2_rdlvl_cnt = (cal1_state_r == CAL1_REGL_LOAD) ? regl_dqs_cnt_r : cal1_cnt_cpt_r;
assign idelay_ce = cal1_dq_idel_ce;
assign idelay_inc = cal1_dq_idel_inc;
//***************************************************************************
// Assert calib_in_common in FAST_CAL mode for IDELAY tap increments to all
// DQs simultaneously
//***************************************************************************
always @(posedge clk) begin
if (rst)
rdlvl_assrt_common <= #TCQ 1'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") & rdlvl_stg1_start &
!rdlvl_stg1_start_r)
rdlvl_assrt_common <= #TCQ 1'b1;
else if (!idel_pat_data_match_r & idel_pat_data_match)
rdlvl_assrt_common <= #TCQ 1'b0;
end
//***************************************************************************
// Data mux to route appropriate bit to calibration logic - i.e. calibration
// is done sequentially, one bit (or DQS group) at a time
//***************************************************************************
generate
if (nCK_PER_CLK == 4) begin: rd_data_div4_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH];
assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH];
assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH];
assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH];
end else begin: rd_data_div2_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
end
endgenerate
always @(posedge clk) begin
rd_mux_sel_r <= #TCQ cal1_cnt_cpt_r;
end
// Register outputs for improved timing.
// NOTE: Will need to change when per-bit DQ deskew is supported.
// Currenly all bits in DQS group are checked in aggregate
generate
genvar mux_i;
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
end
end
endgenerate
//***************************************************************************
// MPR Read Leveling
//***************************************************************************
// storing the previous read data for checking later. Only bit 0 is used
// since MPR contents (01010101) are available generally on DQ[0] per
// JEDEC spec.
always @(posedge clk)begin
if ((cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
((cal1_state_r == CAL1_MPR_PAT_DETECT) && (idel_pat_detect_valid_r)))begin
mpr_rd_rise0_prev_r <= #TCQ mux_rd_rise0_r[0];
mpr_rd_fall0_prev_r <= #TCQ mux_rd_fall0_r[0];
mpr_rd_rise1_prev_r <= #TCQ mux_rd_rise1_r[0];
mpr_rd_fall1_prev_r <= #TCQ mux_rd_fall1_r[0];
mpr_rd_rise2_prev_r <= #TCQ mux_rd_rise2_r[0];
mpr_rd_fall2_prev_r <= #TCQ mux_rd_fall2_r[0];
mpr_rd_rise3_prev_r <= #TCQ mux_rd_rise3_r[0];
mpr_rd_fall3_prev_r <= #TCQ mux_rd_fall3_r[0];
end
end
generate
if (nCK_PER_CLK == 4) begin: mpr_4to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_NEW_DQS_PREWAIT) |
//(cal1_state_r == CAL1_DETECT_EDGE) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) |
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) |
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) |
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) |
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(mpr_rd_rise2_prev_r == mux_rd_rise2_r[0]) &
(mpr_rd_fall2_prev_r == mux_rd_fall2_r[0]) &
(mpr_rd_rise3_prev_r == mux_rd_rise3_r[0]) &
(mpr_rd_fall3_prev_r == mux_rd_fall3_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b1;
// Wait for settling time after idelay tap increment before
// de-asserting inhibit_edge_detect_r
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 10101010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
&& (idel_pat_detect_valid_r)))
//|| (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 01010101 to 10101010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) ||
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) ||
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) ||
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) ||
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end else if (nCK_PER_CLK == 2) begin: mpr_2to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd0) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b1;
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 1010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
& (idel_pat_detect_valid_r)))
// ||(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 0101 to 1010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end
endgenerate
// Registered signal indicates when mux_rd_rise/fall_r is valid
always @(posedge clk)
mux_rd_valid_r <= #TCQ ~phy_if_empty;
//***************************************************************************
// Decrement initial Phaser_IN fine delay value before proceeding with
// read calibration
//***************************************************************************
always @(posedge clk) begin
dqs_po_dec_done_r1 <= #TCQ dqs_po_dec_done;
dqs_po_dec_done_r2 <= #TCQ dqs_po_dec_done_r1;
fine_dly_dec_done_r2 <= #TCQ fine_dly_dec_done_r1;
pi_fine_dly_dec_done <= #TCQ fine_dly_dec_done_r2;
end
always @(posedge clk) begin
if (rst || pi_cnt_dec)
wait_cnt_r <= #TCQ 'd8;
else if (dqs_po_dec_done_r2 && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst) begin
pi_rdval_cnt <= #TCQ 'd0;
end else if (dqs_po_dec_done_r1 && ~dqs_po_dec_done_r2) begin
pi_rdval_cnt <= #TCQ pi_counter_read_val;
end else if (pi_rdval_cnt > 'd0) begin
if (pi_cnt_dec)
pi_rdval_cnt <= #TCQ pi_rdval_cnt - 1;
else
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end else if (pi_rdval_cnt == 'd0) begin
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end
end
always @(posedge clk) begin
if (rst || (pi_rdval_cnt == 'd0))
pi_cnt_dec <= #TCQ 1'b0;
else if (dqs_po_dec_done_r2 && (pi_rdval_cnt > 'd0)
&& (wait_cnt_r == 'd1))
pi_cnt_dec <= #TCQ 1'b1;
else
pi_cnt_dec <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (rst) begin
fine_dly_dec_done_r1 <= #TCQ 1'b0;
end else if (((pi_cnt_dec == 'd1) && (pi_rdval_cnt == 'd1)) ||
(dqs_po_dec_done_r2 && (pi_rdval_cnt == 'd0))) begin
fine_dly_dec_done_r1 <= #TCQ 1'b1;
end
end
//***************************************************************************
// Demultiplexor to control Phaser_IN delay values
//***************************************************************************
// Read DQS
always @(posedge clk) begin
if (rst) begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (pi_cnt_dec) begin
pi_en_stg2_f_timing <= #TCQ 'b1;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (cal1_dlyce_cpt_r) begin
if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
// Change only specified DQS
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
// if simulating, and "shortcuts" for calibration enabled, apply
// results to all DQSs (i.e. assume same delay on all
// DQSs).
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end
end else begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_en_stg2_f <= #TCQ pi_en_stg2_f_timing;
pi_stg2_f_incdec <= #TCQ pi_stg2_f_incdec_timing;
end
// This counter used to implement settling time between
// Phaser_IN rank register loads to different DQSs
always @(posedge clk) begin
if (rst)
done_cnt <= #TCQ 'b0;
else if (((cal1_state_r == CAL1_REGL_LOAD) &&
(cal1_state_r1 == CAL1_NEXT_DQS)) ||
((done_cnt == 4'd1) && (cal1_state_r != CAL1_DONE)))
done_cnt <= #TCQ 4'b1010;
else if (done_cnt > 'b0)
done_cnt <= #TCQ done_cnt - 1;
end
// During rank register loading the rank count must be sent to
// Phaser_IN via the phy_ctl_wd?? If so phy_init will have to
// issue NOPs during rank register loading with the appropriate
// rank count
always @(posedge clk) begin
if (rst || (regl_rank_done_r == 1'b1))
regl_rank_done_r <= #TCQ 1'b0;
else if ((regl_dqs_cnt == DQS_WIDTH-1) &&
(regl_rank_cnt != RANKS-1) &&
(done_cnt == 4'd1))
regl_rank_done_r <= #TCQ 1'b1;
end
// Temp wire for timing.
// The following in the always block below causes timing issues
// due to DSP block inference
// 6*regl_dqs_cnt.
// replacing this with two left shifts + 1 left shift to avoid
// DSP multiplier.
assign regl_dqs_cnt_timing = {2'd0, regl_dqs_cnt};
// Load Phaser_OUT rank register with rdlvl delay value
// for each DQS per rank.
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0)) begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt <= DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
pi_stg2_load_timing <= #TCQ 'b1;
pi_stg2_reg_l_timing <= #TCQ
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][regl_dqs_cnt];
end else begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_stg2_load <= #TCQ pi_stg2_load_timing;
pi_stg2_reg_l <= #TCQ pi_stg2_reg_l_timing;
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_rank_cnt <= #TCQ 2'b00;
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_rank_cnt <= #TCQ regl_rank_cnt;
else
regl_rank_cnt <= #TCQ regl_rank_cnt + 1;
end
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_dqs_cnt <= #TCQ {DQS_CNT_WIDTH+1{1'b0}};
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
else
regl_dqs_cnt <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt != DQS_WIDTH-1)
&& (done_cnt == 4'd1))
regl_dqs_cnt <= #TCQ regl_dqs_cnt + 1;
else
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
end
always @(posedge clk)
regl_dqs_cnt_r <= #TCQ regl_dqs_cnt;
//*****************************************************************
// DQ Stage 1 CALIBRATION INCREMENT/DECREMENT LOGIC:
// The actual IDELAY elements for each of the DQ bits is set via the
// DLYVAL parallel load port. However, the stage 1 calibration
// algorithm (well most of it) only needs to increment or decrement the DQ
// IDELAY value by 1 at any one time.
//*****************************************************************
// Chip-select generation for each of the individual counters tracking
// IDELAY tap values for each DQ
generate
for (z = 0; z < DQS_WIDTH; z = z + 1) begin: gen_dlyce_dq
always @(posedge clk)
if (rst)
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else
if (SIM_CAL_OPTION == "SKIP_CAL")
// If skipping calibration altogether (only for simulation), no
// need to set DQ IODELAY values - they are hardcoded
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else if (SIM_CAL_OPTION == "FAST_CAL") begin
// If fast calibration option (simulation only) selected, DQ
// IODELAYs across all bytes are updated simultaneously
// (although per-bit deskew within DQS[0] is still supported)
for (h = 0; h < DRAM_WIDTH; h = h + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + h] <= #TCQ cal1_dlyce_dq_r;
end
end else if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (cal1_cnt_cpt_r == z) begin
for (g = 0; g < DRAM_WIDTH; g = g + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + g]
<= #TCQ cal1_dlyce_dq_r;
end
end else
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
end
end
endgenerate
// Also delay increment/decrement control to match delay on DLYCE
always @(posedge clk)
if (rst)
dlyinc_dq_r <= #TCQ 1'b0;
else
dlyinc_dq_r <= #TCQ cal1_dlyinc_dq_r;
// Each DQ has a counter associated with it to record current read-leveling
// delay value
always @(posedge clk)
// Reset or skipping calibration all together
if (rst | (SIM_CAL_OPTION == "SKIP_CAL")) begin
for (aa = 0; aa < RANKS; aa = aa + 1) begin: rst_dlyval_dq_reg_r
for (bb = 0; bb < DQ_WIDTH; bb = bb + 1)
dlyval_dq_reg_r[aa][bb] <= #TCQ 'b0;
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (n = 0; n < RANKS; n = n + 1) begin: gen_dlyval_dq_reg_rnk
for (r = 0; r < DQ_WIDTH; r = r + 1) begin: gen_dlyval_dq_reg
if (dlyce_dq_r[r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] + 5'h01;
else
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] - 5'h01;
end
end
end
end else begin
if (dlyce_dq_r[cal1_cnt_cpt_r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] + 5'h01;
else
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] - 5'h01;
end
end
// Register for timing (help with logic placement)
always @(posedge clk) begin
for (cc = 0; cc < RANKS; cc = cc + 1) begin: dlyval_dq_assgn
for (dd = 0; dd < DQ_WIDTH; dd = dd + 1)
dlyval_dq[((5*dd)+(cc*DQ_WIDTH*5))+:5] <= #TCQ dlyval_dq_reg_r[cc][dd];
end
end
//***************************************************************************
// Generate signal used to delay calibration state machine - used when:
// (1) IDELAY value changed
// (2) RD_MUX_SEL value changed
// Use when a delay is necessary to give the change time to propagate
// through the data pipeline (through IDELAY and ISERDES, and fabric
// pipeline stages)
//***************************************************************************
// List all the stage 1 calibration wait states here.
// verilint STARC-2.7.3.3b off
always @(posedge clk)
if ((cal1_state_r == CAL1_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_NEW_DQS_PREWAIT) ||
(cal1_state_r == CAL1_VALID_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT) ||
(cal1_state_r == CAL1_PB_INC_DQ_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_INC_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_DEC_WAIT))
cal1_wait_cnt_en_r <= #TCQ 1'b1;
else
cal1_wait_cnt_en_r <= #TCQ 1'b0;
// verilint STARC-2.7.3.3b on
always @(posedge clk)
if (!cal1_wait_cnt_en_r) begin
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b1;
end else begin
if (cal1_wait_cnt_r != PIPE_WAIT_CNT - 1) begin
cal1_wait_cnt_r <= #TCQ cal1_wait_cnt_r + 1;
cal1_wait_r <= #TCQ 1'b1;
end else begin
// Need to reset to 0 to handle the case when there are two
// different WAIT states back-to-back
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b0;
end
end
//***************************************************************************
// generate request to PHY_INIT logic to issue precharged. Required when
// calibration can take a long time (during which there are only constant
// reads present on this bus). In this case need to issue perioidic
// precharges to avoid tRAS violation. This signal must meet the following
// requirements: (1) only transition from 0->1 when prech is first needed,
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
//***************************************************************************
always @(posedge clk)
if (rst)
rdlvl_prech_req <= #TCQ 1'b0;
else
rdlvl_prech_req <= #TCQ cal1_prech_req_r;
//***************************************************************************
// Serial-to-parallel register to store last RDDATA_SHIFT_LEN cycles of
// data from ISERDES. The value of this register is also stored, so that
// previous and current values of the ISERDES data can be compared while
// varying the IODELAY taps to see if an "edge" of the data valid window
// has been encountered since the last IODELAY tap adjustment
//***************************************************************************
//***************************************************************************
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
// NOTE: Written using discrete flops, but SRL can be used if the matching
// logic does the comparison sequentially, rather than parallel
//***************************************************************************
generate
genvar rd_i;
if (nCK_PER_CLK == 4) begin: gen_sr_div4
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i];
sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i];
sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i];
sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i];
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
sr_rise2_r[rd_i] <= #TCQ {sr_rise2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise2_r[rd_i]};
sr_fall2_r[rd_i] <= #TCQ {sr_fall2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall2_r[rd_i]};
sr_rise3_r[rd_i] <= #TCQ {sr_rise3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise3_r[rd_i]};
sr_fall3_r[rd_i] <= #TCQ {sr_fall3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall3_r[rd_i]};
end
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_sr_div2
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {mux_rd_fall1_r[rd_i]};
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
end
end
end
end
end
endgenerate
//***************************************************************************
// Conversion to pattern calibration
//***************************************************************************
// Pattern for DQ IDELAY calibration
//*****************************************************************
// Expected data pattern when DQ shifted to the right such that
// DQS before the left edge of the DVW:
// Based on pattern of ({rise,fall}) =
// 0x1, 0xB, 0x4, 0x4, 0xB, 0x9
// Each nibble will look like:
// bit3: 0, 1, 0, 0, 1, 1
// bit2: 0, 0, 1, 1, 0, 0
// bit1: 0, 1, 0, 0, 1, 0
// bit0: 1, 1, 0, 0, 1, 1
// Or if the write is early it could look like:
// 0x4, 0x4, 0xB, 0x9, 0x6, 0xE
// bit3: 0, 0, 1, 1, 0, 1
// bit2: 1, 1, 0, 0, 1, 1
// bit1: 0, 0, 1, 0, 1, 1
// bit0: 0, 0, 1, 1, 0, 0
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
// and the actual training pattern contents change
//*****************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_pat_div4
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign {idel_pat0_rise0[3], idel_pat0_rise0[2],
idel_pat0_rise0[1], idel_pat0_rise0[0]} = 4'h1;
assign {idel_pat0_fall0[3], idel_pat0_fall0[2],
idel_pat0_fall0[1], idel_pat0_fall0[0]} = 4'h7;
assign {idel_pat0_rise1[3], idel_pat0_rise1[2],
idel_pat0_rise1[1], idel_pat0_rise1[0]} = 4'hE;
assign {idel_pat0_fall1[3], idel_pat0_fall1[2],
idel_pat0_fall1[1], idel_pat0_fall1[0]} = 4'hC;
assign {idel_pat0_rise2[3], idel_pat0_rise2[2],
idel_pat0_rise2[1], idel_pat0_rise2[0]} = 4'h9;
assign {idel_pat0_fall2[3], idel_pat0_fall2[2],
idel_pat0_fall2[1], idel_pat0_fall2[0]} = 4'h2;
assign {idel_pat0_rise3[3], idel_pat0_rise3[2],
idel_pat0_rise3[1], idel_pat0_rise3[0]} = 4'h4;
assign {idel_pat0_fall3[3], idel_pat0_fall3[2],
idel_pat0_fall3[1], idel_pat0_fall3[0]} = 4'hB;
// Target pattern for "on-time write"
assign {idel_pat1_rise0[3], idel_pat1_rise0[2],
idel_pat1_rise0[1], idel_pat1_rise0[0]} = 4'h4;
assign {idel_pat1_fall0[3], idel_pat1_fall0[2],
idel_pat1_fall0[1], idel_pat1_fall0[0]} = 4'h9;
assign {idel_pat1_rise1[3], idel_pat1_rise1[2],
idel_pat1_rise1[1], idel_pat1_rise1[0]} = 4'h3;
assign {idel_pat1_fall1[3], idel_pat1_fall1[2],
idel_pat1_fall1[1], idel_pat1_fall1[0]} = 4'h7;
assign {idel_pat1_rise2[3], idel_pat1_rise2[2],
idel_pat1_rise2[1], idel_pat1_rise2[0]} = 4'hE;
assign {idel_pat1_fall2[3], idel_pat1_fall2[2],
idel_pat1_fall2[1], idel_pat1_fall2[0]} = 4'hC;
assign {idel_pat1_rise3[3], idel_pat1_rise3[2],
idel_pat1_rise3[1], idel_pat1_rise3[0]} = 4'h9;
assign {idel_pat1_fall3[3], idel_pat1_fall3[2],
idel_pat1_fall3[1], idel_pat1_fall3[0]} = 4'h2;
// Correct data valid window for "early write"
assign {pat0_rise0[3], pat0_rise0[2],
pat0_rise0[1], pat0_rise0[0]} = 4'h7;
assign {pat0_fall0[3], pat0_fall0[2],
pat0_fall0[1], pat0_fall0[0]} = 4'hE;
assign {pat0_rise1[3], pat0_rise1[2],
pat0_rise1[1], pat0_rise1[0]} = 4'hC;
assign {pat0_fall1[3], pat0_fall1[2],
pat0_fall1[1], pat0_fall1[0]} = 4'h9;
assign {pat0_rise2[3], pat0_rise2[2],
pat0_rise2[1], pat0_rise2[0]} = 4'h2;
assign {pat0_fall2[3], pat0_fall2[2],
pat0_fall2[1], pat0_fall2[0]} = 4'h4;
assign {pat0_rise3[3], pat0_rise3[2],
pat0_rise3[1], pat0_rise3[0]} = 4'hB;
assign {pat0_fall3[3], pat0_fall3[2],
pat0_fall3[1], pat0_fall3[0]} = 4'h1;
// Correct data valid window for "on-time write"
assign {pat1_rise0[3], pat1_rise0[2],
pat1_rise0[1], pat1_rise0[0]} = 4'h9;
assign {pat1_fall0[3], pat1_fall0[2],
pat1_fall0[1], pat1_fall0[0]} = 4'h3;
assign {pat1_rise1[3], pat1_rise1[2],
pat1_rise1[1], pat1_rise1[0]} = 4'h7;
assign {pat1_fall1[3], pat1_fall1[2],
pat1_fall1[1], pat1_fall1[0]} = 4'hE;
assign {pat1_rise2[3], pat1_rise2[2],
pat1_rise2[1], pat1_rise2[0]} = 4'hC;
assign {pat1_fall2[3], pat1_fall2[2],
pat1_fall2[1], pat1_fall2[0]} = 4'h9;
assign {pat1_rise3[3], pat1_rise3[2],
pat1_rise3[1], pat1_rise3[0]} = 4'h2;
assign {pat1_fall3[3], pat1_fall3[2],
pat1_fall3[1], pat1_fall3[0]} = 4'h4;
end else if (nCK_PER_CLK == 2) begin: gen_pat_div2
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign idel_pat0_rise0[3] = 2'b01;
assign idel_pat0_fall0[3] = 2'b00;
assign idel_pat0_rise1[3] = 2'b10;
assign idel_pat0_fall1[3] = 2'b11;
assign idel_pat0_rise0[2] = 2'b00;
assign idel_pat0_fall0[2] = 2'b10;
assign idel_pat0_rise1[2] = 2'b11;
assign idel_pat0_fall1[2] = 2'b10;
assign idel_pat0_rise0[1] = 2'b00;
assign idel_pat0_fall0[1] = 2'b11;
assign idel_pat0_rise1[1] = 2'b10;
assign idel_pat0_fall1[1] = 2'b01;
assign idel_pat0_rise0[0] = 2'b11;
assign idel_pat0_fall0[0] = 2'b10;
assign idel_pat0_rise1[0] = 2'b00;
assign idel_pat0_fall1[0] = 2'b01;
// Target pattern for "on-time write"
assign idel_pat1_rise0[3] = 2'b01;
assign idel_pat1_fall0[3] = 2'b11;
assign idel_pat1_rise1[3] = 2'b01;
assign idel_pat1_fall1[3] = 2'b00;
assign idel_pat1_rise0[2] = 2'b11;
assign idel_pat1_fall0[2] = 2'b01;
assign idel_pat1_rise1[2] = 2'b00;
assign idel_pat1_fall1[2] = 2'b10;
assign idel_pat1_rise0[1] = 2'b01;
assign idel_pat1_fall0[1] = 2'b00;
assign idel_pat1_rise1[1] = 2'b10;
assign idel_pat1_fall1[1] = 2'b11;
assign idel_pat1_rise0[0] = 2'b00;
assign idel_pat1_fall0[0] = 2'b10;
assign idel_pat1_rise1[0] = 2'b11;
assign idel_pat1_fall1[0] = 2'b10;
// Correct data valid window for "early write"
assign pat0_rise0[3] = 2'b00;
assign pat0_fall0[3] = 2'b10;
assign pat0_rise1[3] = 2'b11;
assign pat0_fall1[3] = 2'b10;
assign pat0_rise0[2] = 2'b10;
assign pat0_fall0[2] = 2'b11;
assign pat0_rise1[2] = 2'b10;
assign pat0_fall1[2] = 2'b00;
assign pat0_rise0[1] = 2'b11;
assign pat0_fall0[1] = 2'b10;
assign pat0_rise1[1] = 2'b01;
assign pat0_fall1[1] = 2'b00;
assign pat0_rise0[0] = 2'b10;
assign pat0_fall0[0] = 2'b00;
assign pat0_rise1[0] = 2'b01;
assign pat0_fall1[0] = 2'b11;
// Correct data valid window for "on-time write"
assign pat1_rise0[3] = 2'b11;
assign pat1_fall0[3] = 2'b01;
assign pat1_rise1[3] = 2'b00;
assign pat1_fall1[3] = 2'b10;
assign pat1_rise0[2] = 2'b01;
assign pat1_fall0[2] = 2'b00;
assign pat1_rise1[2] = 2'b10;
assign pat1_fall1[2] = 2'b11;
assign pat1_rise0[1] = 2'b00;
assign pat1_fall0[1] = 2'b10;
assign pat1_rise1[1] = 2'b11;
assign pat1_fall1[1] = 2'b10;
assign pat1_rise0[0] = 2'b10;
assign pat1_fall0[0] = 2'b11;
assign pat1_rise1[0] = 2'b10;
assign pat1_fall1[0] = 2'b00;
end
endgenerate
// Each bit of each byte is compared to expected pattern.
// This was done to prevent (and "drastically decrease") the chance that
// invalid data clocked in when the DQ bus is tri-state (along with a
// combination of the correct data) will resemble the expected data
// pattern. A better fix for this is to change the training pattern and/or
// make the pattern longer.
generate
genvar pt_i;
if (nCK_PER_CLK == 4) begin: gen_pat_match_div4
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat0_rise2[pt_i%4])
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat0_fall2[pt_i%4])
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat0_rise3[pt_i%4])
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat0_fall3[pt_i%4])
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat1_rise2[pt_i%4])
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat1_fall2[pt_i%4])
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat1_rise3[pt_i%4])
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat1_fall3[pt_i%4])
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat0_rise2[pt_i%4])
pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat0_fall2[pt_i%4])
pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat0_rise3[pt_i%4])
pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat0_fall3[pt_i%4])
pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat1_rise2[pt_i%4])
pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat1_fall2[pt_i%4])
pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat1_rise3[pt_i%4])
pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat1_fall3[pt_i%4])
pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_match_rise2_and_r <= #TCQ &idel_pat0_match_rise2_r;
idel_pat0_match_fall2_and_r <= #TCQ &idel_pat0_match_fall2_r;
idel_pat0_match_rise3_and_r <= #TCQ &idel_pat0_match_rise3_r;
idel_pat0_match_fall3_and_r <= #TCQ &idel_pat0_match_fall3_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r &&
idel_pat0_match_rise2_and_r &&
idel_pat0_match_fall2_and_r &&
idel_pat0_match_rise3_and_r &&
idel_pat0_match_fall3_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_match_rise2_and_r <= #TCQ &idel_pat1_match_rise2_r;
idel_pat1_match_fall2_and_r <= #TCQ &idel_pat1_match_fall2_r;
idel_pat1_match_rise3_and_r <= #TCQ &idel_pat1_match_rise3_r;
idel_pat1_match_fall3_and_r <= #TCQ &idel_pat1_match_fall3_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r &&
idel_pat1_match_rise2_and_r &&
idel_pat1_match_fall2_and_r &&
idel_pat1_match_rise3_and_r &&
idel_pat1_match_fall3_and_r);
end
always @(*)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_match_rise2_and_r <= #TCQ &pat0_match_rise2_r;
pat0_match_fall2_and_r <= #TCQ &pat0_match_fall2_r;
pat0_match_rise3_and_r <= #TCQ &pat0_match_rise3_r;
pat0_match_fall3_and_r <= #TCQ &pat0_match_fall3_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r &&
pat0_match_rise2_and_r &&
pat0_match_fall2_and_r &&
pat0_match_rise3_and_r &&
pat0_match_fall3_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_match_rise2_and_r <= #TCQ &pat1_match_rise2_r;
pat1_match_fall2_and_r <= #TCQ &pat1_match_fall2_r;
pat1_match_rise3_and_r <= #TCQ &pat1_match_rise3_r;
pat1_match_fall3_and_r <= #TCQ &pat1_match_fall3_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r &&
pat1_match_rise2_and_r &&
pat1_match_fall2_and_r &&
pat1_match_rise3_and_r &&
pat1_match_fall3_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r);
end
always @(posedge clk) begin
if (sr_valid_r2)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
end
//assign idel_pat_data_match = idel_pat0_data_match_r |
// idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end
endgenerate
always @(posedge clk) begin
rdlvl_stg1_start_r <= #TCQ rdlvl_stg1_start;
mpr_rdlvl_done_r1 <= #TCQ mpr_rdlvl_done_r;
mpr_rdlvl_done_r2 <= #TCQ mpr_rdlvl_done_r1;
mpr_rdlvl_start_r <= #TCQ mpr_rdlvl_start;
end
//***************************************************************************
// First stage calibration: Capture clock
//***************************************************************************
//*****************************************************************
// Keep track of how many samples have been written to shift registers
// Every time RD_SHIFT_LEN samples have been written, then we have a
// full read training pattern loaded into the sr_* registers. Then assert
// sr_valid_r to indicate that: (1) comparison between the sr_* and
// old_sr_* and prev_sr_* registers can take place, (2) transfer of
// the contents of sr_* to old_sr_* and prev_sr_* registers can also
// take place
//*****************************************************************
// verilint STARC-2.2.3.3 off
always @(posedge clk)
if (rst || (mpr_rdlvl_done_r && ~rdlvl_stg1_start)) begin
cnt_shift_r <= #TCQ 'b1;
sr_valid_r <= #TCQ 1'b0;
mpr_valid_r <= #TCQ 1'b0;
end else begin
if (mux_rd_valid_r && mpr_rdlvl_start && ~mpr_rdlvl_done_r) begin
if (cnt_shift_r == 'b0)
mpr_valid_r <= #TCQ 1'b1;
else begin
mpr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
mpr_valid_r <= #TCQ 1'b0;
if (mux_rd_valid_r && rdlvl_stg1_start) begin
if (cnt_shift_r == RD_SHIFT_LEN-1) begin
sr_valid_r <= #TCQ 1'b1;
cnt_shift_r <= #TCQ 'b0;
end else begin
sr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
// When the current mux_rd_* contents are not valid, then
// retain the current value of cnt_shift_r, and make sure
// that sr_valid_r = 0 to prevent any downstream loads or
// comparisons
sr_valid_r <= #TCQ 1'b0;
end
// verilint STARC-2.2.3.3 on
//*****************************************************************
// Logic to determine when either edge of the data eye encountered
// Pre- and post-IDELAY update data pattern is compared, if they
// differ, than an edge has been encountered. Currently no attempt
// made to determine if the data pattern itself is "correct", only
// whether it changes after incrementing the IDELAY (possible
// future enhancement)
//*****************************************************************
// One-way control for ensuring that state machine request to store
// current read data into OLD SR shift register only occurs on a
// valid clock cycle. The FSM provides a one-cycle request pulse.
// It is the responsibility of the FSM to wait the worst-case time
// before relying on any downstream results of this load.
always @(posedge clk)
if (rst)
store_sr_r <= #TCQ 1'b0;
else begin
if (store_sr_req_r)
store_sr_r <= #TCQ 1'b1;
else if ((sr_valid_r || mpr_valid_r) && store_sr_r)
store_sr_r <= #TCQ 1'b0;
end
// Transfer current data to old data, prior to incrementing delay
// Also store data from current sampling window - so that we can detect
// if the current delay tap yields data that is "jittery"
generate
if (nCK_PER_CLK == 4) begin: gen_old_sr_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
// Load last sample (i.e. from current sampling interval)
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
prev_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
prev_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
prev_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
prev_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
old_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
old_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
old_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
old_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_old_sr_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
end
end
end
endgenerate
//*******************************************************
// Match determination occurs over 3 cycles - pipelined for better timing
//*******************************************************
// Match valid with # of cycles of pipelining in match determination
always @(posedge clk) begin
sr_valid_r1 <= #TCQ sr_valid_r;
sr_valid_r2 <= #TCQ sr_valid_r1;
mpr_valid_r1 <= #TCQ mpr_valid_r;
mpr_valid_r2 <= #TCQ mpr_valid_r1;
end
generate
if (nCK_PER_CLK == 4) begin: gen_sr_match_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
// CYCLE1: Compare all bits in DQS grp, generate separate term for
// each bit over four bit times. For example, if there are 8-bits
// per DQS group, 32 terms are generated on cycle 1
// NOTE: Structure HDL such that X on data bus will result in a
// mismatch. This is required for memory models that can drive the
// bus with X's to model uncertainty regions (e.g. Denali)
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == old_sr_rise2_r[z]))
old_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise2_r[z] <= #TCQ old_sr_match_rise2_r[z];
else
old_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == old_sr_fall2_r[z]))
old_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall2_r[z] <= #TCQ old_sr_match_fall2_r[z];
else
old_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == old_sr_rise3_r[z]))
old_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise3_r[z] <= #TCQ old_sr_match_rise3_r[z];
else
old_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == old_sr_fall3_r[z]))
old_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall3_r[z] <= #TCQ old_sr_match_fall3_r[z];
else
old_sr_match_fall3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == prev_sr_rise2_r[z]))
prev_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise2_r[z] <= #TCQ prev_sr_match_rise2_r[z];
else
prev_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == prev_sr_fall2_r[z]))
prev_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall2_r[z] <= #TCQ prev_sr_match_fall2_r[z];
else
prev_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == prev_sr_rise3_r[z]))
prev_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise3_r[z] <= #TCQ prev_sr_match_rise3_r[z];
else
prev_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == prev_sr_fall3_r[z]))
prev_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall3_r[z] <= #TCQ prev_sr_match_fall3_r[z];
else
prev_sr_match_fall3_r[z] <= #TCQ 1'b0;
// CYCLE2: Combine all the comparisons for every 8 words (rise0,
// fall0,rise1, fall1) in the calibration sequence. Now we're down
// to DRAM_WIDTH terms
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z] &
old_sr_match_rise2_r[z] &
old_sr_match_fall2_r[z] &
old_sr_match_rise3_r[z] &
old_sr_match_fall3_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z] &
prev_sr_match_rise2_r[z] &
prev_sr_match_fall2_r[z] &
prev_sr_match_rise3_r[z] &
prev_sr_match_fall3_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end if (nCK_PER_CLK == 2) begin: gen_sr_match_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end
endgenerate
//***************************************************************************
// First stage calibration: DQS Capture
//***************************************************************************
//*******************************************************
// Counters for tracking # of samples compared
// For each comparision point (i.e. to determine if an edge has
// occurred after each IODELAY increment when read leveling),
// multiple samples are compared in order to average out the effects
// of jitter. If any one of these samples is different than the "old"
// sample corresponding to the previous IODELAY value, then an edge
// is declared to be detected.
//*******************************************************
// Two cascaded counters are used to keep track of # of samples compared,
// in order to make it easier to meeting timing on these paths. Once
// optimal sampling interval is determined, it may be possible to remove
// the second counter
always @(posedge clk)
samp_edge_cnt0_en_r <= #TCQ
(cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
// First counter counts # of samples compared
always @(posedge clk)
if (rst)
samp_edge_cnt0_r <= #TCQ 'b0;
else begin
if (!samp_edge_cnt0_en_r)
// Reset sample counter when not in any of the "sampling" states
samp_edge_cnt0_r <= #TCQ 'b0;
else if (sr_valid_r2 || mpr_valid_r2)
// Otherwise, count # of samples compared
samp_edge_cnt0_r <= #TCQ samp_edge_cnt0_r + 1;
end
// Counter #2 enable generation
always @(posedge clk)
if (rst)
samp_edge_cnt1_en_r <= #TCQ 1'b0;
else begin
// Assert pulse when correct number of samples compared
if ((samp_edge_cnt0_r == DETECT_EDGE_SAMPLE_CNT0) &&
(sr_valid_r2 || mpr_valid_r2))
samp_edge_cnt1_en_r <= #TCQ 1'b1;
else
samp_edge_cnt1_en_r <= #TCQ 1'b0;
end
// Counter #2
always @(posedge clk)
if (rst)
samp_edge_cnt1_r <= #TCQ 'b0;
else
if (!samp_edge_cnt0_en_r)
samp_edge_cnt1_r <= #TCQ 'b0;
else if (samp_edge_cnt1_en_r)
samp_edge_cnt1_r <= #TCQ samp_edge_cnt1_r + 1;
always @(posedge clk)
if (rst)
samp_cnt_done_r <= #TCQ 1'b0;
else begin
if (!samp_edge_cnt0_en_r)
samp_cnt_done_r <= #TCQ 'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (samp_edge_cnt0_r == SR_VALID_DELAY-1)
// For simulation only, stay in edge detection mode a minimum
// amount of time - just enough for two data compares to finish
samp_cnt_done_r <= #TCQ 1'b1;
end else begin
if (samp_edge_cnt1_r == DETECT_EDGE_SAMPLE_CNT1)
samp_cnt_done_r <= #TCQ 1'b1;
end
end
//*****************************************************************
// Logic to keep track of (on per-bit basis):
// 1. When a region of stability preceded by a known edge occurs
// 2. If for the current tap, the read data jitters
// 3. If an edge occured between the current and previous tap
// 4. When the current edge detection/sampling interval can end
// Essentially, these are a series of status bits - the stage 1
// calibration FSM monitors these to determine when an edge is
// found. Additional information is provided to help the FSM
// determine if a left or right edge has been found.
//****************************************************************
assign pb_detect_edge_setup
= (cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT);
assign pb_detect_edge
= (cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
generate
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_track_left_edge
always @(posedge clk) begin
if (pb_detect_edge_setup) begin
// Reset eye size, stable eye marker, and jitter marker before
// starting new edge detection iteration
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_found_edge_last_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_found_first_edge_r[z] <= #TCQ 1'b0;
end else if (pb_detect_edge) begin
// Save information on which DQ bits are already out of the
// data valid window - those DQ bits will later not have their
// IDELAY tap value incremented
pb_found_edge_last_r[z] <= #TCQ pb_found_edge_r[z];
if (!pb_detect_edge_done_r[z]) begin
if (samp_cnt_done_r) begin
// If we've reached end of sampling interval, no jitter on
// current tap has been found (although an edge could have
// been found between the current and previous taps), and
// the sampling interval is complete. Increment the stable
// eye counter if no edge found, and always clear the jitter
// flag in preparation for the next tap.
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
if (!pb_found_edge_r[z] && !pb_last_tap_jitter_r[z]) begin
// If the data was completely stable during this tap and
// no edge was found between this and the previous tap
// then increment the stable eye counter "as appropriate"
if (pb_cnt_eye_size_r[z] != MIN_EYE_SIZE-1)
pb_cnt_eye_size_r[z] <= #TCQ pb_cnt_eye_size_r[z] + 1;
else //if (pb_found_first_edge_r[z])
// We've reached minimum stable eye width
pb_found_stable_eye_r[z] <= #TCQ 1'b1;
end else begin
// Otherwise, an edge was found, either because of a
// difference between this and the previous tap's read
// data, and/or because the previous tap's data jittered
// (but not the current tap's data), then just set the
// edge found flag, and enable the stable eye counter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end
end else if (prev_sr_diff_r[z]) begin
// If we find that the current tap read data jitters, then
// set edge and jitter found flags, "enable" the eye size
// counter, and stop sampling interval for this bit
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b1;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end else if (old_sr_diff_r[z] || pb_last_tap_jitter_r[z]) begin
// If either an edge was found (i.e. difference between
// current tap and previous tap read data), or the previous
// tap exhibited jitter (which means by definition that the
// current tap cannot match the previous tap because the
// previous tap gave unstable data), then set the edge found
// flag, and "enable" eye size counter. But do not stop
// sampling interval - we still need to check if the current
// tap exhibits jitter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
end
end
end else begin
// Before every edge detection interval, reset "intra-tap" flags
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
end
end
end
endgenerate
// Combine the above per-bit status flags into combined terms when
// performing deskew on the aggregate data window
always @(posedge clk) begin
detect_edge_done_r <= #TCQ &pb_detect_edge_done_r;
found_edge_r <= #TCQ |pb_found_edge_r;
found_edge_all_r <= #TCQ &pb_found_edge_r;
found_stable_eye_r <= #TCQ &pb_found_stable_eye_r;
end
// last IODELAY "stable eye" indicator is updated only after
// detect_edge_done_r is asserted - so that when we do find the "right edge"
// of the data valid window, found_edge_r = 1, AND found_stable_eye_r = 1
// when detect_edge_done_r = 1 (otherwise, if found_stable_eye_r updates
// immediately, then it never possible to have found_stable_eye_r = 1
// when we detect an edge - and we'll never know whether we've found
// a "right edge")
always @(posedge clk)
if (pb_detect_edge_setup)
found_stable_eye_last_r <= #TCQ 1'b0;
else if (detect_edge_done_r)
found_stable_eye_last_r <= #TCQ found_stable_eye_r;
//*****************************************************************
// Keep track of DQ IDELAYE2 taps used
//*****************************************************************
// Added additional register stage to improve timing
always @(posedge clk)
if (rst)
idelay_tap_cnt_slice_r <= 5'h0;
else
idelay_tap_cnt_slice_r <= idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
always @(posedge clk)
if (rst || (SIM_CAL_OPTION == "SKIP_CAL")) begin //|| new_cnt_cpt_r
for (s = 0; s < RANKS; s = s + 1) begin
for (t = 0; t < DQS_WIDTH; t = t + 1) begin
idelay_tap_cnt_r[s][t] <= #TCQ idelaye2_init_val;
end
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (u = 0; u < RANKS; u = u + 1) begin
for (w = 0; w < DQS_WIDTH; w = w + 1) begin
if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] + 1;
else
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] - 1;
end
end
end
end else if ((rnk_cnt_r == RANKS-1) && (RANKS == 2) &&
rdlvl_rank_done_r && (cal1_state_r == CAL1_IDLE)) begin
for (f = 0; f < DQS_WIDTH; f = f + 1) begin
idelay_tap_cnt_r[rnk_cnt_r][f] <= #TCQ idelay_tap_cnt_r[(rnk_cnt_r-1)][f];
end
end else if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r + 5'h1;
else
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r - 5'h1;
end else if (idelay_ld)
idelay_tap_cnt_r[0][wrcal_cnt] <= #TCQ 5'b00000;
always @(posedge clk)
if (rst || new_cnt_cpt_r)
idelay_tap_limit_r <= #TCQ 1'b0;
else if (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_r] == 'd31)
idelay_tap_limit_r <= #TCQ 1'b1;
//*****************************************************************
// keep track of edge tap counts found, and current capture clock
// tap count
//*****************************************************************
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_cnt_cpt_r <= #TCQ 'b0;
else if (cal1_dlyce_cpt_r) begin
if (cal1_dlyinc_cpt_r)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r + 1;
else if (tap_cnt_cpt_r != 'd0)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r - 1;
end
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(cal1_state_r1 == CAL1_DQ_IDEL_TAP_INC) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_limit_cpt_r <= #TCQ 1'b0;
else if (tap_cnt_cpt_r == 6'd63)
tap_limit_cpt_r <= #TCQ 1'b1;
always @(posedge clk)
cal1_cnt_cpt_timing_r <= #TCQ cal1_cnt_cpt_r;
assign cal1_cnt_cpt_timing = {2'b00, cal1_cnt_cpt_r};
// Storing DQS tap values at the end of each DQS read leveling
always @(posedge clk) begin
if (rst) begin
for (a = 0; a < RANKS; a = a + 1) begin: rst_rdlvl_dqs_tap_count_loop
for (b = 0; b < DQS_WIDTH; b = b + 1)
rdlvl_dqs_tap_cnt_r[a][b] <= #TCQ 'b0;
end
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_NEXT_DQS)) begin
for (p = 0; p < RANKS; p = p +1) begin: rdlvl_dqs_tap_rank_cnt
for(q = 0; q < DQS_WIDTH; q = q +1) begin: rdlvl_dqs_tap_cnt
rdlvl_dqs_tap_cnt_r[p][q] <= #TCQ tap_cnt_cpt_r;
end
end
end else if (SIM_CAL_OPTION == "SKIP_CAL") begin
for (j = 0; j < RANKS; j = j +1) begin: rdlvl_dqs_tap_rnk_cnt
for(i = 0; i < DQS_WIDTH; i = i +1) begin: rdlvl_dqs_cnt
rdlvl_dqs_tap_cnt_r[j][i] <= #TCQ 6'd31;
end
end
end else if (cal1_state_r1 == CAL1_NEXT_DQS) begin
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing_r] <= #TCQ tap_cnt_cpt_r;
end
end
// Counter to track maximum DQ IODELAY tap usage during the per-bit
// deskew portion of stage 1 calibration
always @(posedge clk)
if (rst) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else
if (new_cnt_cpt_r) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else if (|cal1_dlyce_dq_r) begin
if (cal1_dlyinc_dq_r)
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r + 1;
else
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r - 1;
if (idel_tap_cnt_dq_pb_r == 31)
idel_tap_limit_dq_pb_r <= #TCQ 1'b1;
else
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end
//*****************************************************************
always @(posedge clk)
cal1_state_r1 <= #TCQ cal1_state_r;
always @(posedge clk)
if (rst) begin
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
cnt_idel_dec_cpt_r <= #TCQ 6'bxxxxxx;
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
right_edge_taps_r <= #TCQ 6'bxxxxxx;
first_edge_taps_r <= #TCQ 6'bxxxxxx;
new_cnt_cpt_r <= #TCQ 1'b0;
rdlvl_stg1_done <= #TCQ 1'b0;
rdlvl_stg1_err <= #TCQ 1'b0;
second_edge_taps_r <= #TCQ 6'bxxxxxx;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
rnk_cnt_r <= #TCQ 2'b00;
rdlvl_rank_done_r <= #TCQ 1'b0;
idel_dec_cnt <= #TCQ 'd0;
rdlvl_last_byte_done <= #TCQ 1'b0;
idel_pat_detect_valid_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
if (OCAL_EN == "ON")
mpr_rdlvl_done_r <= #TCQ 1'b0;
else
mpr_rdlvl_done_r <= #TCQ 1'b1;
mpr_dec_cpt_r <= #TCQ 1'b0;
end else begin
// default (inactive) states for all "pulse" outputs
// verilint STARC-2.2.3.3 off
cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
new_cnt_cpt_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
case (cal1_state_r)
CAL1_IDLE: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
if (mpr_rdlvl_start && ~mpr_rdlvl_start_r) begin
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
end else
if (rdlvl_stg1_start && ~rdlvl_stg1_start_r) begin
if (SIM_CAL_OPTION == "SKIP_CAL")
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
else if (SIM_CAL_OPTION == "FAST_CAL")
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
else begin
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
end
CAL1_MPR_NEW_DQS_WAIT: begin
cal1_prech_req_r <= #TCQ 1'b0;
if (!cal1_wait_r && mpr_valid_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
// Wait for the new DQS group to change
// also gives time for the read data IN_FIFO to
// output the updated data for the new DQS group
CAL1_NEW_DQS_WAIT: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
if (|pi_counter_read_val) begin //VK_REVIEW
mpr_dec_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
cnt_idel_dec_cpt_r <= #TCQ pi_counter_read_val;
end else if (!cal1_wait_r) begin
//if (!cal1_wait_r) begin
// Store "previous tap" read data. Technically there is no
// "previous" read data, since we are starting a new DQS
// group, so we'll never find an edge at tap 0 unless the
// data is fluctuating/jittering
store_sr_req_r <= #TCQ 1'b1;
// If per-bit deskew is disabled, then skip the first
// portion of stage 1 calibration
if (PER_BIT_DESKEW == "OFF")
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else if (PER_BIT_DESKEW == "ON")
cal1_state_r <= #TCQ CAL1_PB_STORE_FIRST_WAIT;
end
end
//*****************************************************************
// Per-bit deskew states
//*****************************************************************
// Wait state following storage of initial read data
CAL1_PB_STORE_FIRST_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
// Look for an edge on all DQ bits in current DQS group
CAL1_PB_DETECT_EDGE:
if (detect_edge_done_r) begin
if (found_stable_eye_r) begin
// If we've found the left edge for all bits (or more precisely,
// we've found the left edge, and then part of the stable
// window thereafter), then proceed to positioning the CPT clock
// right before the left margin
cnt_idel_dec_cpt_r <= #TCQ MIN_EYE_SIZE + 1;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT;
end else begin
// If we've reached the end of the sampling time, and haven't
// yet found the left margin of all the DQ bits, then:
if (!tap_limit_cpt_r) begin
// If we still have taps left to use, then store current value
// of read data, increment the capture clock, and continue to
// look for (left) edges
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT;
end else begin
// If we ran out of taps moving the capture clock, and we
// haven't finished edge detection, then reset the capture
// clock taps to 0 (gradually, one tap at a time...
// then exit the per-bit portion of the algorithm -
// i.e. proceed to adjust the capture clock and DQ IODELAYs as
cnt_idel_dec_cpt_r <= #TCQ 6'd63;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
end
end
// Increment delay for DQS
CAL1_PB_INC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT_WAIT;
end
// Wait for IODELAY for both capture and internal nodes within
// ISERDES to settle, before checking again for an edge
CAL1_PB_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
end
// We've found the left edges of the windows for all DQ bits
// (actually, we found it MIN_EYE_SIZE taps ago) Decrement capture
// clock IDELAY to position just outside left edge of data window
CAL1_PB_DEC_CPT_LEFT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
CAL1_PB_DEC_CPT_LEFT_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// If there is skew between individual DQ bits, then after we've
// positioned the CPT clock, we will be "in the window" for some
// DQ bits ("early" DQ bits), and "out of the window" for others
// ("late" DQ bits). Increase DQ taps until we are out of the
// window for all DQ bits
CAL1_PB_DETECT_EDGE_DQ:
if (detect_edge_done_r)
if (found_edge_all_r) begin
// We're out of the window for all DQ bits in this DQS group
// We're done with per-bit deskew for this group - now decr
// capture clock IODELAY tap count back to 0, and proceed
// with the rest of stage 1 calibration for this DQS group
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end else
if (!idel_tap_limit_dq_pb_r)
// If we still have DQ taps available for deskew, keep
// incrementing IODELAY tap count for the appropriate DQ bits
cal1_state_r <= #TCQ CAL1_PB_INC_DQ;
else begin
// Otherwise, stop immediately (we've done the best we can)
// and proceed with rest of stage 1 calibration
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
CAL1_PB_INC_DQ: begin
// Increment only those DQ for which an edge hasn't been found yet
cal1_dlyce_dq_r <= #TCQ ~pb_found_edge_last_r;
cal1_dlyinc_dq_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_DQ_WAIT;
end
CAL1_PB_INC_DQ_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// Decrement capture clock taps back to initial value
CAL1_PB_DEC_CPT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
// Wait for capture clock to settle, then proceed to rest of
// state 1 calibration for this DQS group
CAL1_PB_DEC_CPT_WAIT:
if (!cal1_wait_r) begin
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end
// When first starting calibration for a DQS group, save the
// current value of the read data shift register, and use this
// as a reference. Note that for the first iteration of the
// edge detection loop, we will in effect be checking for an edge
// at IODELAY taps = 0 - normally, we are comparing the read data
// for IODELAY taps = N, with the read data for IODELAY taps = N-1
// An edge can only be found at IODELAY taps = 0 if the read data
// is changing during this time (possible due to jitter)
CAL1_STORE_FIRST_WAIT: begin
mpr_dec_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
CAL1_VALID_WAIT: begin
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
CAL1_MPR_PAT_DETECT: begin
// MPR read leveling for centering DQS in valid window before
// OCLKDELAYED calibration begins in order to eliminate read issues
if (idel_pat_detect_valid_r == 1'b0) begin
cal1_state_r <= #TCQ CAL1_VALID_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b1;
end else if (idel_pat_detect_valid_r && idel_mpr_pat_detect_r) begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 'd0;
end else if (!idelay_tap_limit_r)
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
else
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
CAL1_PAT_DETECT: begin
// All DQ bits associated with a DQS are pushed to the right one IDELAY
// tap at a time until first rising DQS is in the tri-state region
// before first rising edge window.
// The detect_edge_done_r condition included to support averaging
// during IDELAY tap increments
if (detect_edge_done_r) begin
if (idel_pat_data_match) begin
case (idelay_adj)
2'b01: begin
cal1_state_r <= CAL1_DQ_IDEL_TAP_INC;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b1;
end
2'b10: begin //DEC by 1
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC ;
idel_dec_cnt <= #TCQ 1'b1;
idel_adj_inc <= #TCQ 1'b0;
end
default: begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
end
endcase
end else if (!idelay_tap_limit_r) begin
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
end else begin
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
end
end
// Increment IDELAY tap by 1 for DQ bits in the byte being calibrated
// until left edge of valid window detected
CAL1_DQ_IDEL_TAP_INC: begin
cal1_dq_idel_ce <= #TCQ 1'b1;
cal1_dq_idel_inc <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b0;
end
CAL1_DQ_IDEL_TAP_INC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
idel_adj_inc <= #TCQ 1'b0;
if (idel_adj_inc)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
else if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
else
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
end
// Decrement by 2 IDELAY taps once idel_pat_data_match detected
CAL1_DQ_IDEL_TAP_DEC: begin
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC_WAIT;
if (idel_dec_cnt >= 'd0)
cal1_dq_idel_ce <= #TCQ 1'b1;
else
cal1_dq_idel_ce <= #TCQ 1'b0;
if (idel_dec_cnt > 'd0)
idel_dec_cnt <= #TCQ idel_dec_cnt - 1;
else
idel_dec_cnt <= #TCQ idel_dec_cnt;
end
CAL1_DQ_IDEL_TAP_DEC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
if ((idel_dec_cnt > 'd0) || (pi_rdval_cnt > 'd0))
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
else if (mpr_dec_cpt_r)
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
end
// Check for presence of data eye edge. During this state, we
// sample the read data multiple times, and look for changes
// in the read data, specifically:
// 1. A change in the read data compared with the value of
// read data from the previous delay tap. This indicates
// that the most recent tap delay increment has moved us
// into either a new window, or moved/kept us in the
// transition/jitter region between windows. Note that this
// condition only needs to be checked for once, and for
// logistical purposes, we check this soon after entering
// this state (see comment in CAL1_DETECT_EDGE below for
// why this is done)
// 2. A change in the read data while we are in this state
// (i.e. in the absence of a tap delay increment). This
// indicates that we're close enough to a window edge that
// jitter will cause the read data to change even in the
// absence of a tap delay change
CAL1_DETECT_EDGE: begin
// Essentially wait for the first comparision to finish, then
// store current data into "old" data register. This store
// happens now, rather than later (e.g. when we've have already
// left this state) in order to avoid the situation the data that
// is stored as "old" data has not been used in an "active
// comparison" - i.e. data is stored after the last comparison
// of this state. In this case, we can miss an edge if the
// following sequence occurs:
// 1. Comparison completes in this state - no edge found
// 2. "Momentary jitter" occurs which "pushes" the data out the
// equivalent of one delay tap
// 3. We store this jittered data as the "old" data
// 4. "Jitter" no longer present
// 5. We increment the delay tap by one
// 6. Now we compare the current with the "old" data - they're
// the same, and no edge is detected
// NOTE: Given the large # of comparisons done in this state, it's
// highly unlikely the above sequence will occur in actual H/W
// Wait for the first load of read data into the comparison
// shift register to finish, then load the current read data
// into the "old" data register. This allows us to do one
// initial comparision between the current read data, and
// stored data corresponding to the previous delay tap
idel_pat_detect_valid_r <= #TCQ 1'b0;
if (!store_sr_req_pulsed_r) begin
// Pulse store_sr_req_r only once in this state
store_sr_req_r <= #TCQ 1'b1;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end else begin
store_sr_req_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end
// Continue to sample read data and look for edges until the
// appropriate time interval (shorter for simulation-only,
// much, much longer for actual h/w) has elapsed
if (detect_edge_done_r) begin
if (tap_limit_cpt_r)
// Only one edge detected and ran out of taps since only one
// bit time worth of taps available for window detection. This
// can happen if at tap 0 DQS is in previous window which results
// in only left edge being detected. Or at tap 0 DQS is in the
// current window resulting in only right edge being detected.
// Depending on the frequency this case can also happen if at
// tap 0 DQS is in the left noise region resulting in only left
// edge being detected.
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
else if (found_edge_r) begin
// Sticky bit - asserted after we encounter an edge, although
// the current edge may not be considered the "first edge" this
// just means we found at least one edge
found_first_edge_r <= #TCQ 1'b1;
// Only the right edge of the data valid window is found
// Record the inner right edge tap value
if (!found_first_edge_r && found_stable_eye_last_r) begin
if (tap_cnt_cpt_r == 'd0)
right_edge_taps_r <= #TCQ 'd0;
else
right_edge_taps_r <= #TCQ tap_cnt_cpt_r;
end
// Both edges of data valid window found:
// If we've found a second edge after a region of stability
// then we must have just passed the second ("right" edge of
// the window. Record this second_edge_taps = current tap-1,
// because we're one past the actual second edge tap, where
// the edge taps represent the extremes of the data valid
// window (i.e. smallest & largest taps where data still valid
if (found_first_edge_r && found_stable_eye_last_r) begin
found_second_edge_r <= #TCQ 1'b1;
second_edge_taps_r <= #TCQ tap_cnt_cpt_r - 1;
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
end else begin
// Otherwise, an edge was found (just not the "second" edge)
// Assuming DQS is in the correct window at tap 0 of Phaser IN
// fine tap. The first edge found is the right edge of the valid
// window and is the beginning of the jitter region hence done!
first_edge_taps_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end else
// Otherwise, if we haven't found an edge....
// If we still have taps left to use, then keep incrementing
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end
// Increment Phaser_IN delay for DQS
CAL1_IDEL_INC_CPT: begin
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT_WAIT;
if (~tap_limit_cpt_r) begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
end else begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
end
end
// Wait for Phaser_In to settle, before checking again for an edge
CAL1_IDEL_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
// Calculate final value of Phaser_IN taps. At this point, one or both
// edges of data eye have been found, and/or all taps have been
// exhausted looking for the edges
// NOTE: We're calculating the amount to decrement by, not the
// absolute setting for DQS.
CAL1_CALC_IDEL: begin
// CASE1: If 2 edges found.
if (found_second_edge_r)
cnt_idel_dec_cpt_r
<= #TCQ ((second_edge_taps_r -
first_edge_taps_r)>>1) + 1;
else if (right_edge_taps_r > 6'd0)
// Only right edge detected
// right_edge_taps_r is the inner right edge tap value
// hence used for calculation
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r - (right_edge_taps_r>>1));
else if (found_first_edge_r)
// Only left edge detected
cnt_idel_dec_cpt_r
<= #TCQ ((tap_cnt_cpt_r - first_edge_taps_r)>>1);
else
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r>>1);
// Now use the value we just calculated to decrement CPT taps
// to the desired calibration point
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// decrement capture clock for final adjustment - center
// capture clock in middle of data eye. This adjustment will occur
// only when both the edges are found usign CPT taps. Must do this
// incrementally to avoid clock glitching (since CPT drives clock
// divider within each ISERDES)
CAL1_IDEL_DEC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// once adjustment is complete, we're done with calibration for
// this DQS, repeat for next DQS
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
if (cnt_idel_dec_cpt_r == 6'b000001) begin
if (mpr_dec_cpt_r) begin
if (|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) begin
idel_dec_cnt <= #TCQ idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
end else
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end else
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
end else
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT_WAIT;
end
CAL1_IDEL_DEC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// Determine whether we're done, or have more DQS's to calibrate
// Also request precharge after every byte, as appropriate
CAL1_NEXT_DQS: begin
//if (mpr_rdlvl_done_r || (DRAM_TYPE == "DDR2"))
cal1_prech_req_r <= #TCQ 1'b1;
//else
// cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// Prepare for another iteration with next DQS group
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
first_edge_taps_r <= #TCQ 'd0;
second_edge_taps_r <= #TCQ 'd0;
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(cal1_cnt_cpt_r >= DQS_WIDTH-1)) begin
if (mpr_rdlvl_done_r) begin
rdlvl_last_byte_done <= #TCQ 1'b1;
mpr_last_byte_done <= #TCQ 1'b0;
end else begin
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b1;
end
end
// Wait until precharge that occurs in between calibration of
// DQS groups is finished
if (prech_done) begin // || (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))) begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
//rdlvl_rank_done_r <= #TCQ 1'b1;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DONE; //CAL1_REGL_LOAD;
end else if (cal1_cnt_cpt_r >= DQS_WIDTH-1) begin
if (~mpr_rdlvl_done_r) begin
mpr_rank_done_r <= #TCQ 1'b1;
// if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_DONE;
cal1_cnt_cpt_r <= #TCQ 'b0;
// end else begin
// // Process DQS groups in next rank
// rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
// new_cnt_cpt_r <= #TCQ 1'b1;
// cal1_cnt_cpt_r <= #TCQ 'b0;
// cal1_state_r <= #TCQ CAL1_IDLE;
// end
end else begin
// All DQS groups in a rank done
rdlvl_rank_done_r <= #TCQ 1'b1;
if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end else begin
// Process DQS groups in next rank
rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end
end
end else begin
// Process next DQS group
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ cal1_cnt_cpt_r + 1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_PREWAIT;
end
end
end
CAL1_NEW_DQS_PREWAIT: begin
if (!cal1_wait_r) begin
if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
else
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
// Load rank registers in Phaser_IN
CAL1_REGL_LOAD: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_cnt_cpt_r <= #TCQ 'b0;
rnk_cnt_r <= #TCQ 2'b00;
if ((regl_rank_cnt == RANKS-1) &&
((regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1))) begin
cal1_state_r <= #TCQ CAL1_DONE;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
end else
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end
CAL1_RDLVL_ERR: begin
rdlvl_stg1_err <= #TCQ 1'b1;
end
// Done with this stage of calibration
// if used, allow DEBUG_PORT to control taps
CAL1_DONE: begin
mpr_rdlvl_done_r <= #TCQ 1'b1;
cal1_prech_req_r <= #TCQ 1'b0;
if (~mpr_rdlvl_done_r && (OCAL_EN=="ON") && (DRAM_TYPE == "DDR3")) begin
rdlvl_stg1_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end else
rdlvl_stg1_done <= #TCQ 1'b1;
end
endcase
end
// verilint STARC-2.2.3.3 on
endmodule
|
module mig_7series_v2_3_ddr_phy_rdlvl #
(
parameter TCQ = 100, // clk->out delay (sim only)
parameter nCK_PER_CLK = 2, // # of memory clocks per CLK
parameter CLK_PERIOD = 3333, // Internal clock period (in ps)
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter RANKS = 1, // # of DRAM ranks
parameter PER_BIT_DESKEW = "ON", // Enable per-bit DQ deskew
parameter SIM_CAL_OPTION = "NONE", // Skip various calibration steps
parameter DEBUG_PORT = "OFF", // Enable debug port
parameter DRAM_TYPE = "DDR3", // Memory I/F type: "DDR3", "DDR2"
parameter OCAL_EN = "ON",
parameter IDELAY_ADJ = "ON"
)
(
input clk,
input rst,
// Calibration status, control signals
input mpr_rdlvl_start,
output mpr_rdlvl_done,
output reg mpr_last_byte_done,
output mpr_rnk_done,
input rdlvl_stg1_start,
output reg rdlvl_stg1_done /* synthesis syn_maxfan = 30 */,
output rdlvl_stg1_rnk_done,
output reg rdlvl_stg1_err,
output mpr_rdlvl_err,
output rdlvl_err,
output reg rdlvl_prech_req,
output reg rdlvl_last_byte_done,
output reg rdlvl_assrt_common,
input prech_done,
input phy_if_empty,
input [4:0] idelaye2_init_val,
// Captured data in fabric clock domain
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data,
// Decrement initial Phaser_IN Fine tap delay
input dqs_po_dec_done,
input [5:0] pi_counter_read_val,
// Stage 1 calibration outputs
output reg pi_fine_dly_dec_done,
output reg pi_en_stg2_f,
output reg pi_stg2_f_incdec,
output reg pi_stg2_load,
output reg [5:0] pi_stg2_reg_l,
output [DQS_CNT_WIDTH:0] pi_stg2_rdlvl_cnt,
// To DQ IDELAY required to find left edge of
// valid window
output idelay_ce,
output idelay_inc,
input idelay_ld,
input [DQS_CNT_WIDTH:0] wrcal_cnt,
// Only output if Per-bit de-skew enabled
output reg [5*RANKS*DQ_WIDTH-1:0] dlyval_dq,
// Debug Port
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt,
output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt,
input dbg_idel_up_all,
input dbg_idel_down_all,
input dbg_idel_up_cpt,
input dbg_idel_down_cpt,
input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt,
input dbg_sel_all_idel_cpt,
output [255:0] dbg_phy_rdlvl
);
// minimum time (in IDELAY taps) for which capture data must be stable for
// algorithm to consider a valid data eye to be found. The read leveling
// logic will ignore any window found smaller than this value. Limitations
// on how small this number can be is determined by: (1) the algorithmic
// limitation of how many taps wide the data eye can be (3 taps), and (2)
// how wide regions of "instability" that occur around the edges of the
// read valid window can be (i.e. need to be able to filter out "false"
// windows that occur for a short # of taps around the edges of the true
// data window, although with multi-sampling during read leveling, this is
// not as much a concern) - the larger the value, the more protection
// against "false" windows
localparam MIN_EYE_SIZE = 16;
// Length of calibration sequence (in # of words)
localparam CAL_PAT_LEN = 8;
// Read data shift register length
localparam RD_SHIFT_LEN = CAL_PAT_LEN / (2*nCK_PER_CLK);
// # of cycles required to perform read data shift register compare
// This is defined as from the cycle the new data is loaded until
// signal found_edge_r is valid
localparam RD_SHIFT_COMP_DELAY = 5;
// worst-case # of cycles to wait to ensure that both the SR and
// PREV_SR shift registers have valid data, and that the comparison
// of the two shift register values is valid. The "+1" at the end of
// this equation is a fudge factor, I freely admit that
localparam SR_VALID_DELAY = (2 * RD_SHIFT_LEN) + RD_SHIFT_COMP_DELAY + 1;
// # of clock cycles to wait after changing tap value or read data MUX
// to allow: (1) tap chain to settle, (2) for delayed input to propagate
// thru ISERDES, (3) for the read data comparison logic to have time to
// output the comparison of two consecutive samples of the settled read data
// The minimum delay is 16 cycles, which should be good enough to handle all
// three of the above conditions for the simulation-only case with a short
// training pattern. For H/W (or for simulation with longer training
// pattern), it will take longer to store and compare two consecutive
// samples, and the value of this parameter will reflect that
localparam PIPE_WAIT_CNT = (SR_VALID_DELAY < 8) ? 16 : (SR_VALID_DELAY + 8);
// # of read data samples to examine when detecting whether an edge has
// occured during stage 1 calibration. Width of local param must be
// changed as appropriate. Note that there are two counters used, each
// counter can be changed independently of the other - they are used in
// cascade to create a larger counter
localparam [11:0] DETECT_EDGE_SAMPLE_CNT0 = 12'h001; //12'hFFF;
localparam [11:0] DETECT_EDGE_SAMPLE_CNT1 = 12'h001; // 12'h1FF Must be > 0
localparam [5:0] CAL1_IDLE = 6'h00;
localparam [5:0] CAL1_NEW_DQS_WAIT = 6'h01;
localparam [5:0] CAL1_STORE_FIRST_WAIT = 6'h02;
localparam [5:0] CAL1_PAT_DETECT = 6'h03;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC = 6'h04;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC_WAIT = 6'h05;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC = 6'h06;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC_WAIT = 6'h07;
localparam [5:0] CAL1_DETECT_EDGE = 6'h08;
localparam [5:0] CAL1_IDEL_INC_CPT = 6'h09;
localparam [5:0] CAL1_IDEL_INC_CPT_WAIT = 6'h0A;
localparam [5:0] CAL1_CALC_IDEL = 6'h0B;
localparam [5:0] CAL1_IDEL_DEC_CPT = 6'h0C;
localparam [5:0] CAL1_IDEL_DEC_CPT_WAIT = 6'h0D;
localparam [5:0] CAL1_NEXT_DQS = 6'h0E;
localparam [5:0] CAL1_DONE = 6'h0F;
localparam [5:0] CAL1_PB_STORE_FIRST_WAIT = 6'h10;
localparam [5:0] CAL1_PB_DETECT_EDGE = 6'h11;
localparam [5:0] CAL1_PB_INC_CPT = 6'h12;
localparam [5:0] CAL1_PB_INC_CPT_WAIT = 6'h13;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT = 6'h14;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT_WAIT = 6'h15;
localparam [5:0] CAL1_PB_DETECT_EDGE_DQ = 6'h16;
localparam [5:0] CAL1_PB_INC_DQ = 6'h17;
localparam [5:0] CAL1_PB_INC_DQ_WAIT = 6'h18;
localparam [5:0] CAL1_PB_DEC_CPT = 6'h19;
localparam [5:0] CAL1_PB_DEC_CPT_WAIT = 6'h1A;
localparam [5:0] CAL1_REGL_LOAD = 6'h1B;
localparam [5:0] CAL1_RDLVL_ERR = 6'h1C;
localparam [5:0] CAL1_MPR_NEW_DQS_WAIT = 6'h1D;
localparam [5:0] CAL1_VALID_WAIT = 6'h1E;
localparam [5:0] CAL1_MPR_PAT_DETECT = 6'h1F;
localparam [5:0] CAL1_NEW_DQS_PREWAIT = 6'h20;
integer a;
integer b;
integer d;
integer e;
integer f;
integer h;
integer g;
integer i;
integer j;
integer k;
integer l;
integer m;
integer n;
integer r;
integer p;
integer q;
integer s;
integer t;
integer u;
integer w;
integer ce_i;
integer ce_rnk_i;
integer aa;
integer bb;
integer cc;
integer dd;
genvar x;
genvar z;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_r;
wire [DQS_CNT_WIDTH+2:0]cal1_cnt_cpt_timing;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_timing_r;
reg cal1_dq_idel_ce;
reg cal1_dq_idel_inc;
reg cal1_dlyce_cpt_r;
reg cal1_dlyinc_cpt_r;
reg cal1_dlyce_dq_r;
reg cal1_dlyinc_dq_r;
reg cal1_wait_cnt_en_r;
reg [4:0] cal1_wait_cnt_r;
reg cal1_wait_r;
reg [DQ_WIDTH-1:0] dlyce_dq_r;
reg dlyinc_dq_r;
reg [4:0] dlyval_dq_reg_r [0:RANKS-1][0:DQ_WIDTH-1];
reg cal1_prech_req_r;
reg [5:0] cal1_state_r;
reg [5:0] cal1_state_r1;
reg [5:0] cnt_idel_dec_cpt_r;
reg [3:0] cnt_shift_r;
reg detect_edge_done_r;
reg [5:0] right_edge_taps_r;
reg [5:0] first_edge_taps_r;
reg found_edge_r;
reg found_first_edge_r;
reg found_second_edge_r;
reg found_stable_eye_r;
reg found_stable_eye_last_r;
reg found_edge_all_r;
reg [5:0] tap_cnt_cpt_r;
reg tap_limit_cpt_r;
reg [4:0] idel_tap_cnt_dq_pb_r;
reg idel_tap_limit_dq_pb_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall0_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall1_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise0_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise1_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall2_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall3_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise2_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise3_r;
reg mux_rd_valid_r;
reg new_cnt_cpt_r;
reg [RD_SHIFT_LEN-1:0] old_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] old_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise3_r;
reg [4:0] pb_cnt_eye_size_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] pb_detect_edge_done_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_last_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_first_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_stable_eye_r;
reg [DRAM_WIDTH-1:0] pb_last_tap_jitter_r;
reg pi_en_stg2_f_timing;
reg pi_stg2_f_incdec_timing;
reg pi_stg2_load_timing;
reg [5:0] pi_stg2_reg_l_timing;
reg [DRAM_WIDTH-1:0] prev_sr_diff_r;
reg [RD_SHIFT_LEN-1:0] prev_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] prev_sr_match_cyc2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise3_r;
wire [DQ_WIDTH-1:0] rd_data_rise0;
wire [DQ_WIDTH-1:0] rd_data_fall0;
wire [DQ_WIDTH-1:0] rd_data_rise1;
wire [DQ_WIDTH-1:0] rd_data_fall1;
wire [DQ_WIDTH-1:0] rd_data_rise2;
wire [DQ_WIDTH-1:0] rd_data_fall2;
wire [DQ_WIDTH-1:0] rd_data_rise3;
wire [DQ_WIDTH-1:0] rd_data_fall3;
reg samp_cnt_done_r;
reg samp_edge_cnt0_en_r;
reg [11:0] samp_edge_cnt0_r;
reg samp_edge_cnt1_en_r;
reg [11:0] samp_edge_cnt1_r;
reg [DQS_CNT_WIDTH:0] rd_mux_sel_r;
reg [5:0] second_edge_taps_r;
reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0];
reg store_sr_r;
reg store_sr_req_pulsed_r;
reg store_sr_req_r;
reg sr_valid_r;
reg sr_valid_r1;
reg sr_valid_r2;
reg [DRAM_WIDTH-1:0] old_sr_diff_r;
reg [DRAM_WIDTH-1:0] old_sr_match_cyc2_r;
reg pat0_data_match_r;
reg pat1_data_match_r;
wire pat_data_match_r;
wire [RD_SHIFT_LEN-1:0] pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] pat0_match_fall0_r;
reg pat0_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall1_r;
reg pat0_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall2_r;
reg pat0_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall3_r;
reg pat0_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise0_r;
reg pat0_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise1_r;
reg pat0_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise2_r;
reg pat0_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise3_r;
reg pat0_match_rise3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall0_r;
reg pat1_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall1_r;
reg pat1_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall2_r;
reg pat1_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall3_r;
reg pat1_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise0_r;
reg pat1_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise1_r;
reg pat1_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise2_r;
reg pat1_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise3_r;
reg pat1_match_rise3_and_r;
reg [4:0] idelay_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [5*DQS_WIDTH*RANKS-1:0] idelay_tap_cnt_w;
reg [4:0] idelay_tap_cnt_slice_r;
reg idelay_tap_limit_r;
wire [RD_SHIFT_LEN-1:0] pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall3_r;
reg idel_pat0_match_rise0_and_r;
reg idel_pat0_match_fall0_and_r;
reg idel_pat0_match_rise1_and_r;
reg idel_pat0_match_fall1_and_r;
reg idel_pat0_match_rise2_and_r;
reg idel_pat0_match_fall2_and_r;
reg idel_pat0_match_rise3_and_r;
reg idel_pat0_match_fall3_and_r;
reg idel_pat1_match_rise0_and_r;
reg idel_pat1_match_fall0_and_r;
reg idel_pat1_match_rise1_and_r;
reg idel_pat1_match_fall1_and_r;
reg idel_pat1_match_rise2_and_r;
reg idel_pat1_match_fall2_and_r;
reg idel_pat1_match_rise3_and_r;
reg idel_pat1_match_fall3_and_r;
reg idel_pat0_data_match_r;
reg idel_pat1_data_match_r;
reg idel_pat_data_match;
reg idel_pat_data_match_r;
reg [4:0] idel_dec_cnt;
reg [5:0] rdlvl_dqs_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [1:0] rnk_cnt_r;
reg rdlvl_rank_done_r;
reg [3:0] done_cnt;
reg [1:0] regl_rank_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt_r;
wire [DQS_CNT_WIDTH+2:0]regl_dqs_cnt_timing;
reg regl_rank_done_r;
reg rdlvl_stg1_start_r;
reg dqs_po_dec_done_r1;
reg dqs_po_dec_done_r2;
reg fine_dly_dec_done_r1;
reg fine_dly_dec_done_r2;
reg [3:0] wait_cnt_r;
reg [5:0] pi_rdval_cnt;
reg pi_cnt_dec;
reg mpr_valid_r;
reg mpr_valid_r1;
reg mpr_valid_r2;
reg mpr_rd_rise0_prev_r;
reg mpr_rd_fall0_prev_r;
reg mpr_rd_rise1_prev_r;
reg mpr_rd_fall1_prev_r;
reg mpr_rd_rise2_prev_r;
reg mpr_rd_fall2_prev_r;
reg mpr_rd_rise3_prev_r;
reg mpr_rd_fall3_prev_r;
reg mpr_rdlvl_done_r;
reg mpr_rdlvl_done_r1;
reg mpr_rdlvl_done_r2;
reg mpr_rdlvl_start_r;
reg mpr_rank_done_r;
reg [2:0] stable_idel_cnt;
reg inhibit_edge_detect_r;
reg idel_pat_detect_valid_r;
reg idel_mpr_pat_detect_r;
reg mpr_pat_detect_r;
reg mpr_dec_cpt_r;
reg idel_adj_inc; //IDELAY adjustment
wire [1:0] idelay_adj;
wire pb_detect_edge_setup;
wire pb_detect_edge;
// Debug
reg [6*DQS_WIDTH-1:0] dbg_cpt_first_edge_taps;
reg [6*DQS_WIDTH-1:0] dbg_cpt_second_edge_taps;
reg [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt_w;
//IDELAY adjustment setting for -1
//2'b10 : IDELAY - 1
//2'b01 : IDELAY + 1
//2'b00 : No IDELAY adjustment
assign idelay_adj = (IDELAY_ADJ == "ON") ? 2'b10: 2'b00;
//***************************************************************************
// Debug
//***************************************************************************
always @(*) begin
for (d = 0; d < RANKS; d = d + 1) begin
for (e = 0; e < DQS_WIDTH; e = e + 1) begin
idelay_tap_cnt_w[(5*e+5*DQS_WIDTH*d)+:5] = idelay_tap_cnt_r[d][e];
dbg_cpt_tap_cnt_w[(6*e+6*DQS_WIDTH*d)+:6] = rdlvl_dqs_tap_cnt_r[d][e];
end
end
end
assign mpr_rdlvl_err = rdlvl_stg1_err & (!mpr_rdlvl_done);
assign rdlvl_err = rdlvl_stg1_err & (mpr_rdlvl_done);
assign dbg_phy_rdlvl[0] = rdlvl_stg1_start;
assign dbg_phy_rdlvl[1] = pat_data_match_r;
assign dbg_phy_rdlvl[2] = mux_rd_valid_r;
assign dbg_phy_rdlvl[3] = idelay_tap_limit_r;
assign dbg_phy_rdlvl[8:4] = 'b0;
assign dbg_phy_rdlvl[14:9] = cal1_state_r[5:0];
assign dbg_phy_rdlvl[20:15] = cnt_idel_dec_cpt_r;
assign dbg_phy_rdlvl[21] = found_first_edge_r;
assign dbg_phy_rdlvl[22] = found_second_edge_r;
assign dbg_phy_rdlvl[23] = found_edge_r;
assign dbg_phy_rdlvl[24] = store_sr_r;
// [40:25] previously used for sr, old_sr shift registers. If connecting
// these signals again, don't forget to parameterize based on RD_SHIFT_LEN
assign dbg_phy_rdlvl[40:25] = 'b0;
assign dbg_phy_rdlvl[41] = sr_valid_r;
assign dbg_phy_rdlvl[42] = found_stable_eye_r;
assign dbg_phy_rdlvl[48:43] = tap_cnt_cpt_r;
assign dbg_phy_rdlvl[54:49] = first_edge_taps_r;
assign dbg_phy_rdlvl[60:55] = second_edge_taps_r;
assign dbg_phy_rdlvl[64:61] = cal1_cnt_cpt_timing_r;
assign dbg_phy_rdlvl[65] = cal1_dlyce_cpt_r;
assign dbg_phy_rdlvl[66] = cal1_dlyinc_cpt_r;
assign dbg_phy_rdlvl[67] = found_edge_r;
assign dbg_phy_rdlvl[68] = found_first_edge_r;
assign dbg_phy_rdlvl[73:69] = 'b0;
assign dbg_phy_rdlvl[74] = idel_pat_data_match;
assign dbg_phy_rdlvl[75] = idel_pat0_data_match_r;
assign dbg_phy_rdlvl[76] = idel_pat1_data_match_r;
assign dbg_phy_rdlvl[77] = pat0_data_match_r;
assign dbg_phy_rdlvl[78] = pat1_data_match_r;
assign dbg_phy_rdlvl[79+:5*DQS_WIDTH*RANKS] = idelay_tap_cnt_w;
assign dbg_phy_rdlvl[170+:8] = mux_rd_rise0_r;
assign dbg_phy_rdlvl[178+:8] = mux_rd_fall0_r;
assign dbg_phy_rdlvl[186+:8] = mux_rd_rise1_r;
assign dbg_phy_rdlvl[194+:8] = mux_rd_fall1_r;
assign dbg_phy_rdlvl[202+:8] = mux_rd_rise2_r;
assign dbg_phy_rdlvl[210+:8] = mux_rd_fall2_r;
assign dbg_phy_rdlvl[218+:8] = mux_rd_rise3_r;
assign dbg_phy_rdlvl[226+:8] = mux_rd_fall3_r;
//***************************************************************************
// Debug output
//***************************************************************************
// CPT taps
assign dbg_cpt_first_edge_cnt = dbg_cpt_first_edge_taps;
assign dbg_cpt_second_edge_cnt = dbg_cpt_second_edge_taps;
assign dbg_cpt_tap_cnt = dbg_cpt_tap_cnt_w;
assign dbg_dq_idelay_tap_cnt = idelay_tap_cnt_w;
// Record first and second edges found during CPT calibration
generate
always @(posedge clk)
if (rst) begin
dbg_cpt_first_edge_taps <= #TCQ 'b0;
dbg_cpt_second_edge_taps <= #TCQ 'b0;
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_CALC_IDEL)) begin
//for (ce_rnk_i = 0; ce_rnk_i < RANKS; ce_rnk_i = ce_rnk_i + 1) begin: gen_dbg_cpt_rnk
for (ce_i = 0; ce_i < DQS_WIDTH; ce_i = ce_i + 1) begin: gen_dbg_cpt_edge
if (found_first_edge_r)
dbg_cpt_first_edge_taps[(6*ce_i)+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[(6*ce_i)+:6]
<= #TCQ second_edge_taps_r;
end
//end
end else if (cal1_state_r == CAL1_CALC_IDEL) begin
// Record tap counts of first and second edge edges during
// CPT calibration for each DQS group. If neither edge has
// been found, then those taps will remain 0
if (found_first_edge_r)
dbg_cpt_first_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ second_edge_taps_r;
end
endgenerate
assign rdlvl_stg1_rnk_done = rdlvl_rank_done_r;// || regl_rank_done_r;
assign mpr_rnk_done = mpr_rank_done_r;
assign mpr_rdlvl_done = ((DRAM_TYPE == "DDR3") && (OCAL_EN == "ON")) ? //&& (SIM_CAL_OPTION == "NONE")
mpr_rdlvl_done_r : 1'b1;
//**************************************************************************
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
// coarse delay
//**************************************************************************
assign pi_stg2_rdlvl_cnt = (cal1_state_r == CAL1_REGL_LOAD) ? regl_dqs_cnt_r : cal1_cnt_cpt_r;
assign idelay_ce = cal1_dq_idel_ce;
assign idelay_inc = cal1_dq_idel_inc;
//***************************************************************************
// Assert calib_in_common in FAST_CAL mode for IDELAY tap increments to all
// DQs simultaneously
//***************************************************************************
always @(posedge clk) begin
if (rst)
rdlvl_assrt_common <= #TCQ 1'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") & rdlvl_stg1_start &
!rdlvl_stg1_start_r)
rdlvl_assrt_common <= #TCQ 1'b1;
else if (!idel_pat_data_match_r & idel_pat_data_match)
rdlvl_assrt_common <= #TCQ 1'b0;
end
//***************************************************************************
// Data mux to route appropriate bit to calibration logic - i.e. calibration
// is done sequentially, one bit (or DQS group) at a time
//***************************************************************************
generate
if (nCK_PER_CLK == 4) begin: rd_data_div4_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH];
assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH];
assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH];
assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH];
end else begin: rd_data_div2_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
end
endgenerate
always @(posedge clk) begin
rd_mux_sel_r <= #TCQ cal1_cnt_cpt_r;
end
// Register outputs for improved timing.
// NOTE: Will need to change when per-bit DQ deskew is supported.
// Currenly all bits in DQS group are checked in aggregate
generate
genvar mux_i;
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
end
end
endgenerate
//***************************************************************************
// MPR Read Leveling
//***************************************************************************
// storing the previous read data for checking later. Only bit 0 is used
// since MPR contents (01010101) are available generally on DQ[0] per
// JEDEC spec.
always @(posedge clk)begin
if ((cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
((cal1_state_r == CAL1_MPR_PAT_DETECT) && (idel_pat_detect_valid_r)))begin
mpr_rd_rise0_prev_r <= #TCQ mux_rd_rise0_r[0];
mpr_rd_fall0_prev_r <= #TCQ mux_rd_fall0_r[0];
mpr_rd_rise1_prev_r <= #TCQ mux_rd_rise1_r[0];
mpr_rd_fall1_prev_r <= #TCQ mux_rd_fall1_r[0];
mpr_rd_rise2_prev_r <= #TCQ mux_rd_rise2_r[0];
mpr_rd_fall2_prev_r <= #TCQ mux_rd_fall2_r[0];
mpr_rd_rise3_prev_r <= #TCQ mux_rd_rise3_r[0];
mpr_rd_fall3_prev_r <= #TCQ mux_rd_fall3_r[0];
end
end
generate
if (nCK_PER_CLK == 4) begin: mpr_4to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_NEW_DQS_PREWAIT) |
//(cal1_state_r == CAL1_DETECT_EDGE) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) |
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) |
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) |
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) |
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(mpr_rd_rise2_prev_r == mux_rd_rise2_r[0]) &
(mpr_rd_fall2_prev_r == mux_rd_fall2_r[0]) &
(mpr_rd_rise3_prev_r == mux_rd_rise3_r[0]) &
(mpr_rd_fall3_prev_r == mux_rd_fall3_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b1;
// Wait for settling time after idelay tap increment before
// de-asserting inhibit_edge_detect_r
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 10101010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
&& (idel_pat_detect_valid_r)))
//|| (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 01010101 to 10101010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) ||
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) ||
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) ||
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) ||
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end else if (nCK_PER_CLK == 2) begin: mpr_2to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd0) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b1;
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 1010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
& (idel_pat_detect_valid_r)))
// ||(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 0101 to 1010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end
endgenerate
// Registered signal indicates when mux_rd_rise/fall_r is valid
always @(posedge clk)
mux_rd_valid_r <= #TCQ ~phy_if_empty;
//***************************************************************************
// Decrement initial Phaser_IN fine delay value before proceeding with
// read calibration
//***************************************************************************
always @(posedge clk) begin
dqs_po_dec_done_r1 <= #TCQ dqs_po_dec_done;
dqs_po_dec_done_r2 <= #TCQ dqs_po_dec_done_r1;
fine_dly_dec_done_r2 <= #TCQ fine_dly_dec_done_r1;
pi_fine_dly_dec_done <= #TCQ fine_dly_dec_done_r2;
end
always @(posedge clk) begin
if (rst || pi_cnt_dec)
wait_cnt_r <= #TCQ 'd8;
else if (dqs_po_dec_done_r2 && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst) begin
pi_rdval_cnt <= #TCQ 'd0;
end else if (dqs_po_dec_done_r1 && ~dqs_po_dec_done_r2) begin
pi_rdval_cnt <= #TCQ pi_counter_read_val;
end else if (pi_rdval_cnt > 'd0) begin
if (pi_cnt_dec)
pi_rdval_cnt <= #TCQ pi_rdval_cnt - 1;
else
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end else if (pi_rdval_cnt == 'd0) begin
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end
end
always @(posedge clk) begin
if (rst || (pi_rdval_cnt == 'd0))
pi_cnt_dec <= #TCQ 1'b0;
else if (dqs_po_dec_done_r2 && (pi_rdval_cnt > 'd0)
&& (wait_cnt_r == 'd1))
pi_cnt_dec <= #TCQ 1'b1;
else
pi_cnt_dec <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (rst) begin
fine_dly_dec_done_r1 <= #TCQ 1'b0;
end else if (((pi_cnt_dec == 'd1) && (pi_rdval_cnt == 'd1)) ||
(dqs_po_dec_done_r2 && (pi_rdval_cnt == 'd0))) begin
fine_dly_dec_done_r1 <= #TCQ 1'b1;
end
end
//***************************************************************************
// Demultiplexor to control Phaser_IN delay values
//***************************************************************************
// Read DQS
always @(posedge clk) begin
if (rst) begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (pi_cnt_dec) begin
pi_en_stg2_f_timing <= #TCQ 'b1;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (cal1_dlyce_cpt_r) begin
if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
// Change only specified DQS
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
// if simulating, and "shortcuts" for calibration enabled, apply
// results to all DQSs (i.e. assume same delay on all
// DQSs).
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end
end else begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_en_stg2_f <= #TCQ pi_en_stg2_f_timing;
pi_stg2_f_incdec <= #TCQ pi_stg2_f_incdec_timing;
end
// This counter used to implement settling time between
// Phaser_IN rank register loads to different DQSs
always @(posedge clk) begin
if (rst)
done_cnt <= #TCQ 'b0;
else if (((cal1_state_r == CAL1_REGL_LOAD) &&
(cal1_state_r1 == CAL1_NEXT_DQS)) ||
((done_cnt == 4'd1) && (cal1_state_r != CAL1_DONE)))
done_cnt <= #TCQ 4'b1010;
else if (done_cnt > 'b0)
done_cnt <= #TCQ done_cnt - 1;
end
// During rank register loading the rank count must be sent to
// Phaser_IN via the phy_ctl_wd?? If so phy_init will have to
// issue NOPs during rank register loading with the appropriate
// rank count
always @(posedge clk) begin
if (rst || (regl_rank_done_r == 1'b1))
regl_rank_done_r <= #TCQ 1'b0;
else if ((regl_dqs_cnt == DQS_WIDTH-1) &&
(regl_rank_cnt != RANKS-1) &&
(done_cnt == 4'd1))
regl_rank_done_r <= #TCQ 1'b1;
end
// Temp wire for timing.
// The following in the always block below causes timing issues
// due to DSP block inference
// 6*regl_dqs_cnt.
// replacing this with two left shifts + 1 left shift to avoid
// DSP multiplier.
assign regl_dqs_cnt_timing = {2'd0, regl_dqs_cnt};
// Load Phaser_OUT rank register with rdlvl delay value
// for each DQS per rank.
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0)) begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt <= DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
pi_stg2_load_timing <= #TCQ 'b1;
pi_stg2_reg_l_timing <= #TCQ
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][regl_dqs_cnt];
end else begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_stg2_load <= #TCQ pi_stg2_load_timing;
pi_stg2_reg_l <= #TCQ pi_stg2_reg_l_timing;
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_rank_cnt <= #TCQ 2'b00;
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_rank_cnt <= #TCQ regl_rank_cnt;
else
regl_rank_cnt <= #TCQ regl_rank_cnt + 1;
end
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_dqs_cnt <= #TCQ {DQS_CNT_WIDTH+1{1'b0}};
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
else
regl_dqs_cnt <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt != DQS_WIDTH-1)
&& (done_cnt == 4'd1))
regl_dqs_cnt <= #TCQ regl_dqs_cnt + 1;
else
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
end
always @(posedge clk)
regl_dqs_cnt_r <= #TCQ regl_dqs_cnt;
//*****************************************************************
// DQ Stage 1 CALIBRATION INCREMENT/DECREMENT LOGIC:
// The actual IDELAY elements for each of the DQ bits is set via the
// DLYVAL parallel load port. However, the stage 1 calibration
// algorithm (well most of it) only needs to increment or decrement the DQ
// IDELAY value by 1 at any one time.
//*****************************************************************
// Chip-select generation for each of the individual counters tracking
// IDELAY tap values for each DQ
generate
for (z = 0; z < DQS_WIDTH; z = z + 1) begin: gen_dlyce_dq
always @(posedge clk)
if (rst)
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else
if (SIM_CAL_OPTION == "SKIP_CAL")
// If skipping calibration altogether (only for simulation), no
// need to set DQ IODELAY values - they are hardcoded
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else if (SIM_CAL_OPTION == "FAST_CAL") begin
// If fast calibration option (simulation only) selected, DQ
// IODELAYs across all bytes are updated simultaneously
// (although per-bit deskew within DQS[0] is still supported)
for (h = 0; h < DRAM_WIDTH; h = h + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + h] <= #TCQ cal1_dlyce_dq_r;
end
end else if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (cal1_cnt_cpt_r == z) begin
for (g = 0; g < DRAM_WIDTH; g = g + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + g]
<= #TCQ cal1_dlyce_dq_r;
end
end else
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
end
end
endgenerate
// Also delay increment/decrement control to match delay on DLYCE
always @(posedge clk)
if (rst)
dlyinc_dq_r <= #TCQ 1'b0;
else
dlyinc_dq_r <= #TCQ cal1_dlyinc_dq_r;
// Each DQ has a counter associated with it to record current read-leveling
// delay value
always @(posedge clk)
// Reset or skipping calibration all together
if (rst | (SIM_CAL_OPTION == "SKIP_CAL")) begin
for (aa = 0; aa < RANKS; aa = aa + 1) begin: rst_dlyval_dq_reg_r
for (bb = 0; bb < DQ_WIDTH; bb = bb + 1)
dlyval_dq_reg_r[aa][bb] <= #TCQ 'b0;
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (n = 0; n < RANKS; n = n + 1) begin: gen_dlyval_dq_reg_rnk
for (r = 0; r < DQ_WIDTH; r = r + 1) begin: gen_dlyval_dq_reg
if (dlyce_dq_r[r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] + 5'h01;
else
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] - 5'h01;
end
end
end
end else begin
if (dlyce_dq_r[cal1_cnt_cpt_r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] + 5'h01;
else
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] - 5'h01;
end
end
// Register for timing (help with logic placement)
always @(posedge clk) begin
for (cc = 0; cc < RANKS; cc = cc + 1) begin: dlyval_dq_assgn
for (dd = 0; dd < DQ_WIDTH; dd = dd + 1)
dlyval_dq[((5*dd)+(cc*DQ_WIDTH*5))+:5] <= #TCQ dlyval_dq_reg_r[cc][dd];
end
end
//***************************************************************************
// Generate signal used to delay calibration state machine - used when:
// (1) IDELAY value changed
// (2) RD_MUX_SEL value changed
// Use when a delay is necessary to give the change time to propagate
// through the data pipeline (through IDELAY and ISERDES, and fabric
// pipeline stages)
//***************************************************************************
// List all the stage 1 calibration wait states here.
// verilint STARC-2.7.3.3b off
always @(posedge clk)
if ((cal1_state_r == CAL1_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_NEW_DQS_PREWAIT) ||
(cal1_state_r == CAL1_VALID_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT) ||
(cal1_state_r == CAL1_PB_INC_DQ_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_INC_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_DEC_WAIT))
cal1_wait_cnt_en_r <= #TCQ 1'b1;
else
cal1_wait_cnt_en_r <= #TCQ 1'b0;
// verilint STARC-2.7.3.3b on
always @(posedge clk)
if (!cal1_wait_cnt_en_r) begin
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b1;
end else begin
if (cal1_wait_cnt_r != PIPE_WAIT_CNT - 1) begin
cal1_wait_cnt_r <= #TCQ cal1_wait_cnt_r + 1;
cal1_wait_r <= #TCQ 1'b1;
end else begin
// Need to reset to 0 to handle the case when there are two
// different WAIT states back-to-back
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b0;
end
end
//***************************************************************************
// generate request to PHY_INIT logic to issue precharged. Required when
// calibration can take a long time (during which there are only constant
// reads present on this bus). In this case need to issue perioidic
// precharges to avoid tRAS violation. This signal must meet the following
// requirements: (1) only transition from 0->1 when prech is first needed,
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
//***************************************************************************
always @(posedge clk)
if (rst)
rdlvl_prech_req <= #TCQ 1'b0;
else
rdlvl_prech_req <= #TCQ cal1_prech_req_r;
//***************************************************************************
// Serial-to-parallel register to store last RDDATA_SHIFT_LEN cycles of
// data from ISERDES. The value of this register is also stored, so that
// previous and current values of the ISERDES data can be compared while
// varying the IODELAY taps to see if an "edge" of the data valid window
// has been encountered since the last IODELAY tap adjustment
//***************************************************************************
//***************************************************************************
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
// NOTE: Written using discrete flops, but SRL can be used if the matching
// logic does the comparison sequentially, rather than parallel
//***************************************************************************
generate
genvar rd_i;
if (nCK_PER_CLK == 4) begin: gen_sr_div4
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i];
sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i];
sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i];
sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i];
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
sr_rise2_r[rd_i] <= #TCQ {sr_rise2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise2_r[rd_i]};
sr_fall2_r[rd_i] <= #TCQ {sr_fall2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall2_r[rd_i]};
sr_rise3_r[rd_i] <= #TCQ {sr_rise3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise3_r[rd_i]};
sr_fall3_r[rd_i] <= #TCQ {sr_fall3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall3_r[rd_i]};
end
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_sr_div2
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {mux_rd_fall1_r[rd_i]};
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
end
end
end
end
end
endgenerate
//***************************************************************************
// Conversion to pattern calibration
//***************************************************************************
// Pattern for DQ IDELAY calibration
//*****************************************************************
// Expected data pattern when DQ shifted to the right such that
// DQS before the left edge of the DVW:
// Based on pattern of ({rise,fall}) =
// 0x1, 0xB, 0x4, 0x4, 0xB, 0x9
// Each nibble will look like:
// bit3: 0, 1, 0, 0, 1, 1
// bit2: 0, 0, 1, 1, 0, 0
// bit1: 0, 1, 0, 0, 1, 0
// bit0: 1, 1, 0, 0, 1, 1
// Or if the write is early it could look like:
// 0x4, 0x4, 0xB, 0x9, 0x6, 0xE
// bit3: 0, 0, 1, 1, 0, 1
// bit2: 1, 1, 0, 0, 1, 1
// bit1: 0, 0, 1, 0, 1, 1
// bit0: 0, 0, 1, 1, 0, 0
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
// and the actual training pattern contents change
//*****************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_pat_div4
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign {idel_pat0_rise0[3], idel_pat0_rise0[2],
idel_pat0_rise0[1], idel_pat0_rise0[0]} = 4'h1;
assign {idel_pat0_fall0[3], idel_pat0_fall0[2],
idel_pat0_fall0[1], idel_pat0_fall0[0]} = 4'h7;
assign {idel_pat0_rise1[3], idel_pat0_rise1[2],
idel_pat0_rise1[1], idel_pat0_rise1[0]} = 4'hE;
assign {idel_pat0_fall1[3], idel_pat0_fall1[2],
idel_pat0_fall1[1], idel_pat0_fall1[0]} = 4'hC;
assign {idel_pat0_rise2[3], idel_pat0_rise2[2],
idel_pat0_rise2[1], idel_pat0_rise2[0]} = 4'h9;
assign {idel_pat0_fall2[3], idel_pat0_fall2[2],
idel_pat0_fall2[1], idel_pat0_fall2[0]} = 4'h2;
assign {idel_pat0_rise3[3], idel_pat0_rise3[2],
idel_pat0_rise3[1], idel_pat0_rise3[0]} = 4'h4;
assign {idel_pat0_fall3[3], idel_pat0_fall3[2],
idel_pat0_fall3[1], idel_pat0_fall3[0]} = 4'hB;
// Target pattern for "on-time write"
assign {idel_pat1_rise0[3], idel_pat1_rise0[2],
idel_pat1_rise0[1], idel_pat1_rise0[0]} = 4'h4;
assign {idel_pat1_fall0[3], idel_pat1_fall0[2],
idel_pat1_fall0[1], idel_pat1_fall0[0]} = 4'h9;
assign {idel_pat1_rise1[3], idel_pat1_rise1[2],
idel_pat1_rise1[1], idel_pat1_rise1[0]} = 4'h3;
assign {idel_pat1_fall1[3], idel_pat1_fall1[2],
idel_pat1_fall1[1], idel_pat1_fall1[0]} = 4'h7;
assign {idel_pat1_rise2[3], idel_pat1_rise2[2],
idel_pat1_rise2[1], idel_pat1_rise2[0]} = 4'hE;
assign {idel_pat1_fall2[3], idel_pat1_fall2[2],
idel_pat1_fall2[1], idel_pat1_fall2[0]} = 4'hC;
assign {idel_pat1_rise3[3], idel_pat1_rise3[2],
idel_pat1_rise3[1], idel_pat1_rise3[0]} = 4'h9;
assign {idel_pat1_fall3[3], idel_pat1_fall3[2],
idel_pat1_fall3[1], idel_pat1_fall3[0]} = 4'h2;
// Correct data valid window for "early write"
assign {pat0_rise0[3], pat0_rise0[2],
pat0_rise0[1], pat0_rise0[0]} = 4'h7;
assign {pat0_fall0[3], pat0_fall0[2],
pat0_fall0[1], pat0_fall0[0]} = 4'hE;
assign {pat0_rise1[3], pat0_rise1[2],
pat0_rise1[1], pat0_rise1[0]} = 4'hC;
assign {pat0_fall1[3], pat0_fall1[2],
pat0_fall1[1], pat0_fall1[0]} = 4'h9;
assign {pat0_rise2[3], pat0_rise2[2],
pat0_rise2[1], pat0_rise2[0]} = 4'h2;
assign {pat0_fall2[3], pat0_fall2[2],
pat0_fall2[1], pat0_fall2[0]} = 4'h4;
assign {pat0_rise3[3], pat0_rise3[2],
pat0_rise3[1], pat0_rise3[0]} = 4'hB;
assign {pat0_fall3[3], pat0_fall3[2],
pat0_fall3[1], pat0_fall3[0]} = 4'h1;
// Correct data valid window for "on-time write"
assign {pat1_rise0[3], pat1_rise0[2],
pat1_rise0[1], pat1_rise0[0]} = 4'h9;
assign {pat1_fall0[3], pat1_fall0[2],
pat1_fall0[1], pat1_fall0[0]} = 4'h3;
assign {pat1_rise1[3], pat1_rise1[2],
pat1_rise1[1], pat1_rise1[0]} = 4'h7;
assign {pat1_fall1[3], pat1_fall1[2],
pat1_fall1[1], pat1_fall1[0]} = 4'hE;
assign {pat1_rise2[3], pat1_rise2[2],
pat1_rise2[1], pat1_rise2[0]} = 4'hC;
assign {pat1_fall2[3], pat1_fall2[2],
pat1_fall2[1], pat1_fall2[0]} = 4'h9;
assign {pat1_rise3[3], pat1_rise3[2],
pat1_rise3[1], pat1_rise3[0]} = 4'h2;
assign {pat1_fall3[3], pat1_fall3[2],
pat1_fall3[1], pat1_fall3[0]} = 4'h4;
end else if (nCK_PER_CLK == 2) begin: gen_pat_div2
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign idel_pat0_rise0[3] = 2'b01;
assign idel_pat0_fall0[3] = 2'b00;
assign idel_pat0_rise1[3] = 2'b10;
assign idel_pat0_fall1[3] = 2'b11;
assign idel_pat0_rise0[2] = 2'b00;
assign idel_pat0_fall0[2] = 2'b10;
assign idel_pat0_rise1[2] = 2'b11;
assign idel_pat0_fall1[2] = 2'b10;
assign idel_pat0_rise0[1] = 2'b00;
assign idel_pat0_fall0[1] = 2'b11;
assign idel_pat0_rise1[1] = 2'b10;
assign idel_pat0_fall1[1] = 2'b01;
assign idel_pat0_rise0[0] = 2'b11;
assign idel_pat0_fall0[0] = 2'b10;
assign idel_pat0_rise1[0] = 2'b00;
assign idel_pat0_fall1[0] = 2'b01;
// Target pattern for "on-time write"
assign idel_pat1_rise0[3] = 2'b01;
assign idel_pat1_fall0[3] = 2'b11;
assign idel_pat1_rise1[3] = 2'b01;
assign idel_pat1_fall1[3] = 2'b00;
assign idel_pat1_rise0[2] = 2'b11;
assign idel_pat1_fall0[2] = 2'b01;
assign idel_pat1_rise1[2] = 2'b00;
assign idel_pat1_fall1[2] = 2'b10;
assign idel_pat1_rise0[1] = 2'b01;
assign idel_pat1_fall0[1] = 2'b00;
assign idel_pat1_rise1[1] = 2'b10;
assign idel_pat1_fall1[1] = 2'b11;
assign idel_pat1_rise0[0] = 2'b00;
assign idel_pat1_fall0[0] = 2'b10;
assign idel_pat1_rise1[0] = 2'b11;
assign idel_pat1_fall1[0] = 2'b10;
// Correct data valid window for "early write"
assign pat0_rise0[3] = 2'b00;
assign pat0_fall0[3] = 2'b10;
assign pat0_rise1[3] = 2'b11;
assign pat0_fall1[3] = 2'b10;
assign pat0_rise0[2] = 2'b10;
assign pat0_fall0[2] = 2'b11;
assign pat0_rise1[2] = 2'b10;
assign pat0_fall1[2] = 2'b00;
assign pat0_rise0[1] = 2'b11;
assign pat0_fall0[1] = 2'b10;
assign pat0_rise1[1] = 2'b01;
assign pat0_fall1[1] = 2'b00;
assign pat0_rise0[0] = 2'b10;
assign pat0_fall0[0] = 2'b00;
assign pat0_rise1[0] = 2'b01;
assign pat0_fall1[0] = 2'b11;
// Correct data valid window for "on-time write"
assign pat1_rise0[3] = 2'b11;
assign pat1_fall0[3] = 2'b01;
assign pat1_rise1[3] = 2'b00;
assign pat1_fall1[3] = 2'b10;
assign pat1_rise0[2] = 2'b01;
assign pat1_fall0[2] = 2'b00;
assign pat1_rise1[2] = 2'b10;
assign pat1_fall1[2] = 2'b11;
assign pat1_rise0[1] = 2'b00;
assign pat1_fall0[1] = 2'b10;
assign pat1_rise1[1] = 2'b11;
assign pat1_fall1[1] = 2'b10;
assign pat1_rise0[0] = 2'b10;
assign pat1_fall0[0] = 2'b11;
assign pat1_rise1[0] = 2'b10;
assign pat1_fall1[0] = 2'b00;
end
endgenerate
// Each bit of each byte is compared to expected pattern.
// This was done to prevent (and "drastically decrease") the chance that
// invalid data clocked in when the DQ bus is tri-state (along with a
// combination of the correct data) will resemble the expected data
// pattern. A better fix for this is to change the training pattern and/or
// make the pattern longer.
generate
genvar pt_i;
if (nCK_PER_CLK == 4) begin: gen_pat_match_div4
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat0_rise2[pt_i%4])
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat0_fall2[pt_i%4])
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat0_rise3[pt_i%4])
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat0_fall3[pt_i%4])
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat1_rise2[pt_i%4])
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat1_fall2[pt_i%4])
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat1_rise3[pt_i%4])
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat1_fall3[pt_i%4])
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat0_rise2[pt_i%4])
pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat0_fall2[pt_i%4])
pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat0_rise3[pt_i%4])
pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat0_fall3[pt_i%4])
pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat1_rise2[pt_i%4])
pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat1_fall2[pt_i%4])
pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat1_rise3[pt_i%4])
pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat1_fall3[pt_i%4])
pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_match_rise2_and_r <= #TCQ &idel_pat0_match_rise2_r;
idel_pat0_match_fall2_and_r <= #TCQ &idel_pat0_match_fall2_r;
idel_pat0_match_rise3_and_r <= #TCQ &idel_pat0_match_rise3_r;
idel_pat0_match_fall3_and_r <= #TCQ &idel_pat0_match_fall3_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r &&
idel_pat0_match_rise2_and_r &&
idel_pat0_match_fall2_and_r &&
idel_pat0_match_rise3_and_r &&
idel_pat0_match_fall3_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_match_rise2_and_r <= #TCQ &idel_pat1_match_rise2_r;
idel_pat1_match_fall2_and_r <= #TCQ &idel_pat1_match_fall2_r;
idel_pat1_match_rise3_and_r <= #TCQ &idel_pat1_match_rise3_r;
idel_pat1_match_fall3_and_r <= #TCQ &idel_pat1_match_fall3_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r &&
idel_pat1_match_rise2_and_r &&
idel_pat1_match_fall2_and_r &&
idel_pat1_match_rise3_and_r &&
idel_pat1_match_fall3_and_r);
end
always @(*)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_match_rise2_and_r <= #TCQ &pat0_match_rise2_r;
pat0_match_fall2_and_r <= #TCQ &pat0_match_fall2_r;
pat0_match_rise3_and_r <= #TCQ &pat0_match_rise3_r;
pat0_match_fall3_and_r <= #TCQ &pat0_match_fall3_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r &&
pat0_match_rise2_and_r &&
pat0_match_fall2_and_r &&
pat0_match_rise3_and_r &&
pat0_match_fall3_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_match_rise2_and_r <= #TCQ &pat1_match_rise2_r;
pat1_match_fall2_and_r <= #TCQ &pat1_match_fall2_r;
pat1_match_rise3_and_r <= #TCQ &pat1_match_rise3_r;
pat1_match_fall3_and_r <= #TCQ &pat1_match_fall3_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r &&
pat1_match_rise2_and_r &&
pat1_match_fall2_and_r &&
pat1_match_rise3_and_r &&
pat1_match_fall3_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r);
end
always @(posedge clk) begin
if (sr_valid_r2)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
end
//assign idel_pat_data_match = idel_pat0_data_match_r |
// idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end
endgenerate
always @(posedge clk) begin
rdlvl_stg1_start_r <= #TCQ rdlvl_stg1_start;
mpr_rdlvl_done_r1 <= #TCQ mpr_rdlvl_done_r;
mpr_rdlvl_done_r2 <= #TCQ mpr_rdlvl_done_r1;
mpr_rdlvl_start_r <= #TCQ mpr_rdlvl_start;
end
//***************************************************************************
// First stage calibration: Capture clock
//***************************************************************************
//*****************************************************************
// Keep track of how many samples have been written to shift registers
// Every time RD_SHIFT_LEN samples have been written, then we have a
// full read training pattern loaded into the sr_* registers. Then assert
// sr_valid_r to indicate that: (1) comparison between the sr_* and
// old_sr_* and prev_sr_* registers can take place, (2) transfer of
// the contents of sr_* to old_sr_* and prev_sr_* registers can also
// take place
//*****************************************************************
// verilint STARC-2.2.3.3 off
always @(posedge clk)
if (rst || (mpr_rdlvl_done_r && ~rdlvl_stg1_start)) begin
cnt_shift_r <= #TCQ 'b1;
sr_valid_r <= #TCQ 1'b0;
mpr_valid_r <= #TCQ 1'b0;
end else begin
if (mux_rd_valid_r && mpr_rdlvl_start && ~mpr_rdlvl_done_r) begin
if (cnt_shift_r == 'b0)
mpr_valid_r <= #TCQ 1'b1;
else begin
mpr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
mpr_valid_r <= #TCQ 1'b0;
if (mux_rd_valid_r && rdlvl_stg1_start) begin
if (cnt_shift_r == RD_SHIFT_LEN-1) begin
sr_valid_r <= #TCQ 1'b1;
cnt_shift_r <= #TCQ 'b0;
end else begin
sr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
// When the current mux_rd_* contents are not valid, then
// retain the current value of cnt_shift_r, and make sure
// that sr_valid_r = 0 to prevent any downstream loads or
// comparisons
sr_valid_r <= #TCQ 1'b0;
end
// verilint STARC-2.2.3.3 on
//*****************************************************************
// Logic to determine when either edge of the data eye encountered
// Pre- and post-IDELAY update data pattern is compared, if they
// differ, than an edge has been encountered. Currently no attempt
// made to determine if the data pattern itself is "correct", only
// whether it changes after incrementing the IDELAY (possible
// future enhancement)
//*****************************************************************
// One-way control for ensuring that state machine request to store
// current read data into OLD SR shift register only occurs on a
// valid clock cycle. The FSM provides a one-cycle request pulse.
// It is the responsibility of the FSM to wait the worst-case time
// before relying on any downstream results of this load.
always @(posedge clk)
if (rst)
store_sr_r <= #TCQ 1'b0;
else begin
if (store_sr_req_r)
store_sr_r <= #TCQ 1'b1;
else if ((sr_valid_r || mpr_valid_r) && store_sr_r)
store_sr_r <= #TCQ 1'b0;
end
// Transfer current data to old data, prior to incrementing delay
// Also store data from current sampling window - so that we can detect
// if the current delay tap yields data that is "jittery"
generate
if (nCK_PER_CLK == 4) begin: gen_old_sr_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
// Load last sample (i.e. from current sampling interval)
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
prev_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
prev_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
prev_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
prev_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
old_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
old_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
old_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
old_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_old_sr_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
end
end
end
endgenerate
//*******************************************************
// Match determination occurs over 3 cycles - pipelined for better timing
//*******************************************************
// Match valid with # of cycles of pipelining in match determination
always @(posedge clk) begin
sr_valid_r1 <= #TCQ sr_valid_r;
sr_valid_r2 <= #TCQ sr_valid_r1;
mpr_valid_r1 <= #TCQ mpr_valid_r;
mpr_valid_r2 <= #TCQ mpr_valid_r1;
end
generate
if (nCK_PER_CLK == 4) begin: gen_sr_match_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
// CYCLE1: Compare all bits in DQS grp, generate separate term for
// each bit over four bit times. For example, if there are 8-bits
// per DQS group, 32 terms are generated on cycle 1
// NOTE: Structure HDL such that X on data bus will result in a
// mismatch. This is required for memory models that can drive the
// bus with X's to model uncertainty regions (e.g. Denali)
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == old_sr_rise2_r[z]))
old_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise2_r[z] <= #TCQ old_sr_match_rise2_r[z];
else
old_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == old_sr_fall2_r[z]))
old_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall2_r[z] <= #TCQ old_sr_match_fall2_r[z];
else
old_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == old_sr_rise3_r[z]))
old_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise3_r[z] <= #TCQ old_sr_match_rise3_r[z];
else
old_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == old_sr_fall3_r[z]))
old_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall3_r[z] <= #TCQ old_sr_match_fall3_r[z];
else
old_sr_match_fall3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == prev_sr_rise2_r[z]))
prev_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise2_r[z] <= #TCQ prev_sr_match_rise2_r[z];
else
prev_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == prev_sr_fall2_r[z]))
prev_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall2_r[z] <= #TCQ prev_sr_match_fall2_r[z];
else
prev_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == prev_sr_rise3_r[z]))
prev_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise3_r[z] <= #TCQ prev_sr_match_rise3_r[z];
else
prev_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == prev_sr_fall3_r[z]))
prev_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall3_r[z] <= #TCQ prev_sr_match_fall3_r[z];
else
prev_sr_match_fall3_r[z] <= #TCQ 1'b0;
// CYCLE2: Combine all the comparisons for every 8 words (rise0,
// fall0,rise1, fall1) in the calibration sequence. Now we're down
// to DRAM_WIDTH terms
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z] &
old_sr_match_rise2_r[z] &
old_sr_match_fall2_r[z] &
old_sr_match_rise3_r[z] &
old_sr_match_fall3_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z] &
prev_sr_match_rise2_r[z] &
prev_sr_match_fall2_r[z] &
prev_sr_match_rise3_r[z] &
prev_sr_match_fall3_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end if (nCK_PER_CLK == 2) begin: gen_sr_match_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end
endgenerate
//***************************************************************************
// First stage calibration: DQS Capture
//***************************************************************************
//*******************************************************
// Counters for tracking # of samples compared
// For each comparision point (i.e. to determine if an edge has
// occurred after each IODELAY increment when read leveling),
// multiple samples are compared in order to average out the effects
// of jitter. If any one of these samples is different than the "old"
// sample corresponding to the previous IODELAY value, then an edge
// is declared to be detected.
//*******************************************************
// Two cascaded counters are used to keep track of # of samples compared,
// in order to make it easier to meeting timing on these paths. Once
// optimal sampling interval is determined, it may be possible to remove
// the second counter
always @(posedge clk)
samp_edge_cnt0_en_r <= #TCQ
(cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
// First counter counts # of samples compared
always @(posedge clk)
if (rst)
samp_edge_cnt0_r <= #TCQ 'b0;
else begin
if (!samp_edge_cnt0_en_r)
// Reset sample counter when not in any of the "sampling" states
samp_edge_cnt0_r <= #TCQ 'b0;
else if (sr_valid_r2 || mpr_valid_r2)
// Otherwise, count # of samples compared
samp_edge_cnt0_r <= #TCQ samp_edge_cnt0_r + 1;
end
// Counter #2 enable generation
always @(posedge clk)
if (rst)
samp_edge_cnt1_en_r <= #TCQ 1'b0;
else begin
// Assert pulse when correct number of samples compared
if ((samp_edge_cnt0_r == DETECT_EDGE_SAMPLE_CNT0) &&
(sr_valid_r2 || mpr_valid_r2))
samp_edge_cnt1_en_r <= #TCQ 1'b1;
else
samp_edge_cnt1_en_r <= #TCQ 1'b0;
end
// Counter #2
always @(posedge clk)
if (rst)
samp_edge_cnt1_r <= #TCQ 'b0;
else
if (!samp_edge_cnt0_en_r)
samp_edge_cnt1_r <= #TCQ 'b0;
else if (samp_edge_cnt1_en_r)
samp_edge_cnt1_r <= #TCQ samp_edge_cnt1_r + 1;
always @(posedge clk)
if (rst)
samp_cnt_done_r <= #TCQ 1'b0;
else begin
if (!samp_edge_cnt0_en_r)
samp_cnt_done_r <= #TCQ 'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (samp_edge_cnt0_r == SR_VALID_DELAY-1)
// For simulation only, stay in edge detection mode a minimum
// amount of time - just enough for two data compares to finish
samp_cnt_done_r <= #TCQ 1'b1;
end else begin
if (samp_edge_cnt1_r == DETECT_EDGE_SAMPLE_CNT1)
samp_cnt_done_r <= #TCQ 1'b1;
end
end
//*****************************************************************
// Logic to keep track of (on per-bit basis):
// 1. When a region of stability preceded by a known edge occurs
// 2. If for the current tap, the read data jitters
// 3. If an edge occured between the current and previous tap
// 4. When the current edge detection/sampling interval can end
// Essentially, these are a series of status bits - the stage 1
// calibration FSM monitors these to determine when an edge is
// found. Additional information is provided to help the FSM
// determine if a left or right edge has been found.
//****************************************************************
assign pb_detect_edge_setup
= (cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT);
assign pb_detect_edge
= (cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
generate
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_track_left_edge
always @(posedge clk) begin
if (pb_detect_edge_setup) begin
// Reset eye size, stable eye marker, and jitter marker before
// starting new edge detection iteration
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_found_edge_last_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_found_first_edge_r[z] <= #TCQ 1'b0;
end else if (pb_detect_edge) begin
// Save information on which DQ bits are already out of the
// data valid window - those DQ bits will later not have their
// IDELAY tap value incremented
pb_found_edge_last_r[z] <= #TCQ pb_found_edge_r[z];
if (!pb_detect_edge_done_r[z]) begin
if (samp_cnt_done_r) begin
// If we've reached end of sampling interval, no jitter on
// current tap has been found (although an edge could have
// been found between the current and previous taps), and
// the sampling interval is complete. Increment the stable
// eye counter if no edge found, and always clear the jitter
// flag in preparation for the next tap.
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
if (!pb_found_edge_r[z] && !pb_last_tap_jitter_r[z]) begin
// If the data was completely stable during this tap and
// no edge was found between this and the previous tap
// then increment the stable eye counter "as appropriate"
if (pb_cnt_eye_size_r[z] != MIN_EYE_SIZE-1)
pb_cnt_eye_size_r[z] <= #TCQ pb_cnt_eye_size_r[z] + 1;
else //if (pb_found_first_edge_r[z])
// We've reached minimum stable eye width
pb_found_stable_eye_r[z] <= #TCQ 1'b1;
end else begin
// Otherwise, an edge was found, either because of a
// difference between this and the previous tap's read
// data, and/or because the previous tap's data jittered
// (but not the current tap's data), then just set the
// edge found flag, and enable the stable eye counter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end
end else if (prev_sr_diff_r[z]) begin
// If we find that the current tap read data jitters, then
// set edge and jitter found flags, "enable" the eye size
// counter, and stop sampling interval for this bit
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b1;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end else if (old_sr_diff_r[z] || pb_last_tap_jitter_r[z]) begin
// If either an edge was found (i.e. difference between
// current tap and previous tap read data), or the previous
// tap exhibited jitter (which means by definition that the
// current tap cannot match the previous tap because the
// previous tap gave unstable data), then set the edge found
// flag, and "enable" eye size counter. But do not stop
// sampling interval - we still need to check if the current
// tap exhibits jitter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
end
end
end else begin
// Before every edge detection interval, reset "intra-tap" flags
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
end
end
end
endgenerate
// Combine the above per-bit status flags into combined terms when
// performing deskew on the aggregate data window
always @(posedge clk) begin
detect_edge_done_r <= #TCQ &pb_detect_edge_done_r;
found_edge_r <= #TCQ |pb_found_edge_r;
found_edge_all_r <= #TCQ &pb_found_edge_r;
found_stable_eye_r <= #TCQ &pb_found_stable_eye_r;
end
// last IODELAY "stable eye" indicator is updated only after
// detect_edge_done_r is asserted - so that when we do find the "right edge"
// of the data valid window, found_edge_r = 1, AND found_stable_eye_r = 1
// when detect_edge_done_r = 1 (otherwise, if found_stable_eye_r updates
// immediately, then it never possible to have found_stable_eye_r = 1
// when we detect an edge - and we'll never know whether we've found
// a "right edge")
always @(posedge clk)
if (pb_detect_edge_setup)
found_stable_eye_last_r <= #TCQ 1'b0;
else if (detect_edge_done_r)
found_stable_eye_last_r <= #TCQ found_stable_eye_r;
//*****************************************************************
// Keep track of DQ IDELAYE2 taps used
//*****************************************************************
// Added additional register stage to improve timing
always @(posedge clk)
if (rst)
idelay_tap_cnt_slice_r <= 5'h0;
else
idelay_tap_cnt_slice_r <= idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
always @(posedge clk)
if (rst || (SIM_CAL_OPTION == "SKIP_CAL")) begin //|| new_cnt_cpt_r
for (s = 0; s < RANKS; s = s + 1) begin
for (t = 0; t < DQS_WIDTH; t = t + 1) begin
idelay_tap_cnt_r[s][t] <= #TCQ idelaye2_init_val;
end
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (u = 0; u < RANKS; u = u + 1) begin
for (w = 0; w < DQS_WIDTH; w = w + 1) begin
if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] + 1;
else
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] - 1;
end
end
end
end else if ((rnk_cnt_r == RANKS-1) && (RANKS == 2) &&
rdlvl_rank_done_r && (cal1_state_r == CAL1_IDLE)) begin
for (f = 0; f < DQS_WIDTH; f = f + 1) begin
idelay_tap_cnt_r[rnk_cnt_r][f] <= #TCQ idelay_tap_cnt_r[(rnk_cnt_r-1)][f];
end
end else if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r + 5'h1;
else
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r - 5'h1;
end else if (idelay_ld)
idelay_tap_cnt_r[0][wrcal_cnt] <= #TCQ 5'b00000;
always @(posedge clk)
if (rst || new_cnt_cpt_r)
idelay_tap_limit_r <= #TCQ 1'b0;
else if (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_r] == 'd31)
idelay_tap_limit_r <= #TCQ 1'b1;
//*****************************************************************
// keep track of edge tap counts found, and current capture clock
// tap count
//*****************************************************************
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_cnt_cpt_r <= #TCQ 'b0;
else if (cal1_dlyce_cpt_r) begin
if (cal1_dlyinc_cpt_r)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r + 1;
else if (tap_cnt_cpt_r != 'd0)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r - 1;
end
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(cal1_state_r1 == CAL1_DQ_IDEL_TAP_INC) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_limit_cpt_r <= #TCQ 1'b0;
else if (tap_cnt_cpt_r == 6'd63)
tap_limit_cpt_r <= #TCQ 1'b1;
always @(posedge clk)
cal1_cnt_cpt_timing_r <= #TCQ cal1_cnt_cpt_r;
assign cal1_cnt_cpt_timing = {2'b00, cal1_cnt_cpt_r};
// Storing DQS tap values at the end of each DQS read leveling
always @(posedge clk) begin
if (rst) begin
for (a = 0; a < RANKS; a = a + 1) begin: rst_rdlvl_dqs_tap_count_loop
for (b = 0; b < DQS_WIDTH; b = b + 1)
rdlvl_dqs_tap_cnt_r[a][b] <= #TCQ 'b0;
end
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_NEXT_DQS)) begin
for (p = 0; p < RANKS; p = p +1) begin: rdlvl_dqs_tap_rank_cnt
for(q = 0; q < DQS_WIDTH; q = q +1) begin: rdlvl_dqs_tap_cnt
rdlvl_dqs_tap_cnt_r[p][q] <= #TCQ tap_cnt_cpt_r;
end
end
end else if (SIM_CAL_OPTION == "SKIP_CAL") begin
for (j = 0; j < RANKS; j = j +1) begin: rdlvl_dqs_tap_rnk_cnt
for(i = 0; i < DQS_WIDTH; i = i +1) begin: rdlvl_dqs_cnt
rdlvl_dqs_tap_cnt_r[j][i] <= #TCQ 6'd31;
end
end
end else if (cal1_state_r1 == CAL1_NEXT_DQS) begin
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing_r] <= #TCQ tap_cnt_cpt_r;
end
end
// Counter to track maximum DQ IODELAY tap usage during the per-bit
// deskew portion of stage 1 calibration
always @(posedge clk)
if (rst) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else
if (new_cnt_cpt_r) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else if (|cal1_dlyce_dq_r) begin
if (cal1_dlyinc_dq_r)
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r + 1;
else
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r - 1;
if (idel_tap_cnt_dq_pb_r == 31)
idel_tap_limit_dq_pb_r <= #TCQ 1'b1;
else
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end
//*****************************************************************
always @(posedge clk)
cal1_state_r1 <= #TCQ cal1_state_r;
always @(posedge clk)
if (rst) begin
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
cnt_idel_dec_cpt_r <= #TCQ 6'bxxxxxx;
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
right_edge_taps_r <= #TCQ 6'bxxxxxx;
first_edge_taps_r <= #TCQ 6'bxxxxxx;
new_cnt_cpt_r <= #TCQ 1'b0;
rdlvl_stg1_done <= #TCQ 1'b0;
rdlvl_stg1_err <= #TCQ 1'b0;
second_edge_taps_r <= #TCQ 6'bxxxxxx;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
rnk_cnt_r <= #TCQ 2'b00;
rdlvl_rank_done_r <= #TCQ 1'b0;
idel_dec_cnt <= #TCQ 'd0;
rdlvl_last_byte_done <= #TCQ 1'b0;
idel_pat_detect_valid_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
if (OCAL_EN == "ON")
mpr_rdlvl_done_r <= #TCQ 1'b0;
else
mpr_rdlvl_done_r <= #TCQ 1'b1;
mpr_dec_cpt_r <= #TCQ 1'b0;
end else begin
// default (inactive) states for all "pulse" outputs
// verilint STARC-2.2.3.3 off
cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
new_cnt_cpt_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
case (cal1_state_r)
CAL1_IDLE: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
if (mpr_rdlvl_start && ~mpr_rdlvl_start_r) begin
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
end else
if (rdlvl_stg1_start && ~rdlvl_stg1_start_r) begin
if (SIM_CAL_OPTION == "SKIP_CAL")
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
else if (SIM_CAL_OPTION == "FAST_CAL")
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
else begin
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
end
CAL1_MPR_NEW_DQS_WAIT: begin
cal1_prech_req_r <= #TCQ 1'b0;
if (!cal1_wait_r && mpr_valid_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
// Wait for the new DQS group to change
// also gives time for the read data IN_FIFO to
// output the updated data for the new DQS group
CAL1_NEW_DQS_WAIT: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
if (|pi_counter_read_val) begin //VK_REVIEW
mpr_dec_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
cnt_idel_dec_cpt_r <= #TCQ pi_counter_read_val;
end else if (!cal1_wait_r) begin
//if (!cal1_wait_r) begin
// Store "previous tap" read data. Technically there is no
// "previous" read data, since we are starting a new DQS
// group, so we'll never find an edge at tap 0 unless the
// data is fluctuating/jittering
store_sr_req_r <= #TCQ 1'b1;
// If per-bit deskew is disabled, then skip the first
// portion of stage 1 calibration
if (PER_BIT_DESKEW == "OFF")
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else if (PER_BIT_DESKEW == "ON")
cal1_state_r <= #TCQ CAL1_PB_STORE_FIRST_WAIT;
end
end
//*****************************************************************
// Per-bit deskew states
//*****************************************************************
// Wait state following storage of initial read data
CAL1_PB_STORE_FIRST_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
// Look for an edge on all DQ bits in current DQS group
CAL1_PB_DETECT_EDGE:
if (detect_edge_done_r) begin
if (found_stable_eye_r) begin
// If we've found the left edge for all bits (or more precisely,
// we've found the left edge, and then part of the stable
// window thereafter), then proceed to positioning the CPT clock
// right before the left margin
cnt_idel_dec_cpt_r <= #TCQ MIN_EYE_SIZE + 1;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT;
end else begin
// If we've reached the end of the sampling time, and haven't
// yet found the left margin of all the DQ bits, then:
if (!tap_limit_cpt_r) begin
// If we still have taps left to use, then store current value
// of read data, increment the capture clock, and continue to
// look for (left) edges
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT;
end else begin
// If we ran out of taps moving the capture clock, and we
// haven't finished edge detection, then reset the capture
// clock taps to 0 (gradually, one tap at a time...
// then exit the per-bit portion of the algorithm -
// i.e. proceed to adjust the capture clock and DQ IODELAYs as
cnt_idel_dec_cpt_r <= #TCQ 6'd63;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
end
end
// Increment delay for DQS
CAL1_PB_INC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT_WAIT;
end
// Wait for IODELAY for both capture and internal nodes within
// ISERDES to settle, before checking again for an edge
CAL1_PB_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
end
// We've found the left edges of the windows for all DQ bits
// (actually, we found it MIN_EYE_SIZE taps ago) Decrement capture
// clock IDELAY to position just outside left edge of data window
CAL1_PB_DEC_CPT_LEFT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
CAL1_PB_DEC_CPT_LEFT_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// If there is skew between individual DQ bits, then after we've
// positioned the CPT clock, we will be "in the window" for some
// DQ bits ("early" DQ bits), and "out of the window" for others
// ("late" DQ bits). Increase DQ taps until we are out of the
// window for all DQ bits
CAL1_PB_DETECT_EDGE_DQ:
if (detect_edge_done_r)
if (found_edge_all_r) begin
// We're out of the window for all DQ bits in this DQS group
// We're done with per-bit deskew for this group - now decr
// capture clock IODELAY tap count back to 0, and proceed
// with the rest of stage 1 calibration for this DQS group
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end else
if (!idel_tap_limit_dq_pb_r)
// If we still have DQ taps available for deskew, keep
// incrementing IODELAY tap count for the appropriate DQ bits
cal1_state_r <= #TCQ CAL1_PB_INC_DQ;
else begin
// Otherwise, stop immediately (we've done the best we can)
// and proceed with rest of stage 1 calibration
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
CAL1_PB_INC_DQ: begin
// Increment only those DQ for which an edge hasn't been found yet
cal1_dlyce_dq_r <= #TCQ ~pb_found_edge_last_r;
cal1_dlyinc_dq_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_DQ_WAIT;
end
CAL1_PB_INC_DQ_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// Decrement capture clock taps back to initial value
CAL1_PB_DEC_CPT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
// Wait for capture clock to settle, then proceed to rest of
// state 1 calibration for this DQS group
CAL1_PB_DEC_CPT_WAIT:
if (!cal1_wait_r) begin
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end
// When first starting calibration for a DQS group, save the
// current value of the read data shift register, and use this
// as a reference. Note that for the first iteration of the
// edge detection loop, we will in effect be checking for an edge
// at IODELAY taps = 0 - normally, we are comparing the read data
// for IODELAY taps = N, with the read data for IODELAY taps = N-1
// An edge can only be found at IODELAY taps = 0 if the read data
// is changing during this time (possible due to jitter)
CAL1_STORE_FIRST_WAIT: begin
mpr_dec_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
CAL1_VALID_WAIT: begin
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
CAL1_MPR_PAT_DETECT: begin
// MPR read leveling for centering DQS in valid window before
// OCLKDELAYED calibration begins in order to eliminate read issues
if (idel_pat_detect_valid_r == 1'b0) begin
cal1_state_r <= #TCQ CAL1_VALID_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b1;
end else if (idel_pat_detect_valid_r && idel_mpr_pat_detect_r) begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 'd0;
end else if (!idelay_tap_limit_r)
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
else
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
CAL1_PAT_DETECT: begin
// All DQ bits associated with a DQS are pushed to the right one IDELAY
// tap at a time until first rising DQS is in the tri-state region
// before first rising edge window.
// The detect_edge_done_r condition included to support averaging
// during IDELAY tap increments
if (detect_edge_done_r) begin
if (idel_pat_data_match) begin
case (idelay_adj)
2'b01: begin
cal1_state_r <= CAL1_DQ_IDEL_TAP_INC;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b1;
end
2'b10: begin //DEC by 1
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC ;
idel_dec_cnt <= #TCQ 1'b1;
idel_adj_inc <= #TCQ 1'b0;
end
default: begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
end
endcase
end else if (!idelay_tap_limit_r) begin
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
end else begin
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
end
end
// Increment IDELAY tap by 1 for DQ bits in the byte being calibrated
// until left edge of valid window detected
CAL1_DQ_IDEL_TAP_INC: begin
cal1_dq_idel_ce <= #TCQ 1'b1;
cal1_dq_idel_inc <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b0;
end
CAL1_DQ_IDEL_TAP_INC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
idel_adj_inc <= #TCQ 1'b0;
if (idel_adj_inc)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
else if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
else
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
end
// Decrement by 2 IDELAY taps once idel_pat_data_match detected
CAL1_DQ_IDEL_TAP_DEC: begin
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC_WAIT;
if (idel_dec_cnt >= 'd0)
cal1_dq_idel_ce <= #TCQ 1'b1;
else
cal1_dq_idel_ce <= #TCQ 1'b0;
if (idel_dec_cnt > 'd0)
idel_dec_cnt <= #TCQ idel_dec_cnt - 1;
else
idel_dec_cnt <= #TCQ idel_dec_cnt;
end
CAL1_DQ_IDEL_TAP_DEC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
if ((idel_dec_cnt > 'd0) || (pi_rdval_cnt > 'd0))
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
else if (mpr_dec_cpt_r)
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
end
// Check for presence of data eye edge. During this state, we
// sample the read data multiple times, and look for changes
// in the read data, specifically:
// 1. A change in the read data compared with the value of
// read data from the previous delay tap. This indicates
// that the most recent tap delay increment has moved us
// into either a new window, or moved/kept us in the
// transition/jitter region between windows. Note that this
// condition only needs to be checked for once, and for
// logistical purposes, we check this soon after entering
// this state (see comment in CAL1_DETECT_EDGE below for
// why this is done)
// 2. A change in the read data while we are in this state
// (i.e. in the absence of a tap delay increment). This
// indicates that we're close enough to a window edge that
// jitter will cause the read data to change even in the
// absence of a tap delay change
CAL1_DETECT_EDGE: begin
// Essentially wait for the first comparision to finish, then
// store current data into "old" data register. This store
// happens now, rather than later (e.g. when we've have already
// left this state) in order to avoid the situation the data that
// is stored as "old" data has not been used in an "active
// comparison" - i.e. data is stored after the last comparison
// of this state. In this case, we can miss an edge if the
// following sequence occurs:
// 1. Comparison completes in this state - no edge found
// 2. "Momentary jitter" occurs which "pushes" the data out the
// equivalent of one delay tap
// 3. We store this jittered data as the "old" data
// 4. "Jitter" no longer present
// 5. We increment the delay tap by one
// 6. Now we compare the current with the "old" data - they're
// the same, and no edge is detected
// NOTE: Given the large # of comparisons done in this state, it's
// highly unlikely the above sequence will occur in actual H/W
// Wait for the first load of read data into the comparison
// shift register to finish, then load the current read data
// into the "old" data register. This allows us to do one
// initial comparision between the current read data, and
// stored data corresponding to the previous delay tap
idel_pat_detect_valid_r <= #TCQ 1'b0;
if (!store_sr_req_pulsed_r) begin
// Pulse store_sr_req_r only once in this state
store_sr_req_r <= #TCQ 1'b1;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end else begin
store_sr_req_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end
// Continue to sample read data and look for edges until the
// appropriate time interval (shorter for simulation-only,
// much, much longer for actual h/w) has elapsed
if (detect_edge_done_r) begin
if (tap_limit_cpt_r)
// Only one edge detected and ran out of taps since only one
// bit time worth of taps available for window detection. This
// can happen if at tap 0 DQS is in previous window which results
// in only left edge being detected. Or at tap 0 DQS is in the
// current window resulting in only right edge being detected.
// Depending on the frequency this case can also happen if at
// tap 0 DQS is in the left noise region resulting in only left
// edge being detected.
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
else if (found_edge_r) begin
// Sticky bit - asserted after we encounter an edge, although
// the current edge may not be considered the "first edge" this
// just means we found at least one edge
found_first_edge_r <= #TCQ 1'b1;
// Only the right edge of the data valid window is found
// Record the inner right edge tap value
if (!found_first_edge_r && found_stable_eye_last_r) begin
if (tap_cnt_cpt_r == 'd0)
right_edge_taps_r <= #TCQ 'd0;
else
right_edge_taps_r <= #TCQ tap_cnt_cpt_r;
end
// Both edges of data valid window found:
// If we've found a second edge after a region of stability
// then we must have just passed the second ("right" edge of
// the window. Record this second_edge_taps = current tap-1,
// because we're one past the actual second edge tap, where
// the edge taps represent the extremes of the data valid
// window (i.e. smallest & largest taps where data still valid
if (found_first_edge_r && found_stable_eye_last_r) begin
found_second_edge_r <= #TCQ 1'b1;
second_edge_taps_r <= #TCQ tap_cnt_cpt_r - 1;
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
end else begin
// Otherwise, an edge was found (just not the "second" edge)
// Assuming DQS is in the correct window at tap 0 of Phaser IN
// fine tap. The first edge found is the right edge of the valid
// window and is the beginning of the jitter region hence done!
first_edge_taps_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end else
// Otherwise, if we haven't found an edge....
// If we still have taps left to use, then keep incrementing
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end
// Increment Phaser_IN delay for DQS
CAL1_IDEL_INC_CPT: begin
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT_WAIT;
if (~tap_limit_cpt_r) begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
end else begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
end
end
// Wait for Phaser_In to settle, before checking again for an edge
CAL1_IDEL_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
// Calculate final value of Phaser_IN taps. At this point, one or both
// edges of data eye have been found, and/or all taps have been
// exhausted looking for the edges
// NOTE: We're calculating the amount to decrement by, not the
// absolute setting for DQS.
CAL1_CALC_IDEL: begin
// CASE1: If 2 edges found.
if (found_second_edge_r)
cnt_idel_dec_cpt_r
<= #TCQ ((second_edge_taps_r -
first_edge_taps_r)>>1) + 1;
else if (right_edge_taps_r > 6'd0)
// Only right edge detected
// right_edge_taps_r is the inner right edge tap value
// hence used for calculation
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r - (right_edge_taps_r>>1));
else if (found_first_edge_r)
// Only left edge detected
cnt_idel_dec_cpt_r
<= #TCQ ((tap_cnt_cpt_r - first_edge_taps_r)>>1);
else
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r>>1);
// Now use the value we just calculated to decrement CPT taps
// to the desired calibration point
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// decrement capture clock for final adjustment - center
// capture clock in middle of data eye. This adjustment will occur
// only when both the edges are found usign CPT taps. Must do this
// incrementally to avoid clock glitching (since CPT drives clock
// divider within each ISERDES)
CAL1_IDEL_DEC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// once adjustment is complete, we're done with calibration for
// this DQS, repeat for next DQS
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
if (cnt_idel_dec_cpt_r == 6'b000001) begin
if (mpr_dec_cpt_r) begin
if (|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) begin
idel_dec_cnt <= #TCQ idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
end else
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end else
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
end else
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT_WAIT;
end
CAL1_IDEL_DEC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// Determine whether we're done, or have more DQS's to calibrate
// Also request precharge after every byte, as appropriate
CAL1_NEXT_DQS: begin
//if (mpr_rdlvl_done_r || (DRAM_TYPE == "DDR2"))
cal1_prech_req_r <= #TCQ 1'b1;
//else
// cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// Prepare for another iteration with next DQS group
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
first_edge_taps_r <= #TCQ 'd0;
second_edge_taps_r <= #TCQ 'd0;
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(cal1_cnt_cpt_r >= DQS_WIDTH-1)) begin
if (mpr_rdlvl_done_r) begin
rdlvl_last_byte_done <= #TCQ 1'b1;
mpr_last_byte_done <= #TCQ 1'b0;
end else begin
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b1;
end
end
// Wait until precharge that occurs in between calibration of
// DQS groups is finished
if (prech_done) begin // || (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))) begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
//rdlvl_rank_done_r <= #TCQ 1'b1;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DONE; //CAL1_REGL_LOAD;
end else if (cal1_cnt_cpt_r >= DQS_WIDTH-1) begin
if (~mpr_rdlvl_done_r) begin
mpr_rank_done_r <= #TCQ 1'b1;
// if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_DONE;
cal1_cnt_cpt_r <= #TCQ 'b0;
// end else begin
// // Process DQS groups in next rank
// rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
// new_cnt_cpt_r <= #TCQ 1'b1;
// cal1_cnt_cpt_r <= #TCQ 'b0;
// cal1_state_r <= #TCQ CAL1_IDLE;
// end
end else begin
// All DQS groups in a rank done
rdlvl_rank_done_r <= #TCQ 1'b1;
if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end else begin
// Process DQS groups in next rank
rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end
end
end else begin
// Process next DQS group
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ cal1_cnt_cpt_r + 1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_PREWAIT;
end
end
end
CAL1_NEW_DQS_PREWAIT: begin
if (!cal1_wait_r) begin
if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
else
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
// Load rank registers in Phaser_IN
CAL1_REGL_LOAD: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_cnt_cpt_r <= #TCQ 'b0;
rnk_cnt_r <= #TCQ 2'b00;
if ((regl_rank_cnt == RANKS-1) &&
((regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1))) begin
cal1_state_r <= #TCQ CAL1_DONE;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
end else
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end
CAL1_RDLVL_ERR: begin
rdlvl_stg1_err <= #TCQ 1'b1;
end
// Done with this stage of calibration
// if used, allow DEBUG_PORT to control taps
CAL1_DONE: begin
mpr_rdlvl_done_r <= #TCQ 1'b1;
cal1_prech_req_r <= #TCQ 1'b0;
if (~mpr_rdlvl_done_r && (OCAL_EN=="ON") && (DRAM_TYPE == "DDR3")) begin
rdlvl_stg1_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end else
rdlvl_stg1_done <= #TCQ 1'b1;
end
endcase
end
// verilint STARC-2.2.3.3 on
endmodule
|
module mig_7series_v2_3_ddr_phy_rdlvl #
(
parameter TCQ = 100, // clk->out delay (sim only)
parameter nCK_PER_CLK = 2, // # of memory clocks per CLK
parameter CLK_PERIOD = 3333, // Internal clock period (in ps)
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter RANKS = 1, // # of DRAM ranks
parameter PER_BIT_DESKEW = "ON", // Enable per-bit DQ deskew
parameter SIM_CAL_OPTION = "NONE", // Skip various calibration steps
parameter DEBUG_PORT = "OFF", // Enable debug port
parameter DRAM_TYPE = "DDR3", // Memory I/F type: "DDR3", "DDR2"
parameter OCAL_EN = "ON",
parameter IDELAY_ADJ = "ON"
)
(
input clk,
input rst,
// Calibration status, control signals
input mpr_rdlvl_start,
output mpr_rdlvl_done,
output reg mpr_last_byte_done,
output mpr_rnk_done,
input rdlvl_stg1_start,
output reg rdlvl_stg1_done /* synthesis syn_maxfan = 30 */,
output rdlvl_stg1_rnk_done,
output reg rdlvl_stg1_err,
output mpr_rdlvl_err,
output rdlvl_err,
output reg rdlvl_prech_req,
output reg rdlvl_last_byte_done,
output reg rdlvl_assrt_common,
input prech_done,
input phy_if_empty,
input [4:0] idelaye2_init_val,
// Captured data in fabric clock domain
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data,
// Decrement initial Phaser_IN Fine tap delay
input dqs_po_dec_done,
input [5:0] pi_counter_read_val,
// Stage 1 calibration outputs
output reg pi_fine_dly_dec_done,
output reg pi_en_stg2_f,
output reg pi_stg2_f_incdec,
output reg pi_stg2_load,
output reg [5:0] pi_stg2_reg_l,
output [DQS_CNT_WIDTH:0] pi_stg2_rdlvl_cnt,
// To DQ IDELAY required to find left edge of
// valid window
output idelay_ce,
output idelay_inc,
input idelay_ld,
input [DQS_CNT_WIDTH:0] wrcal_cnt,
// Only output if Per-bit de-skew enabled
output reg [5*RANKS*DQ_WIDTH-1:0] dlyval_dq,
// Debug Port
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt,
output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt,
output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt,
input dbg_idel_up_all,
input dbg_idel_down_all,
input dbg_idel_up_cpt,
input dbg_idel_down_cpt,
input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt,
input dbg_sel_all_idel_cpt,
output [255:0] dbg_phy_rdlvl
);
// minimum time (in IDELAY taps) for which capture data must be stable for
// algorithm to consider a valid data eye to be found. The read leveling
// logic will ignore any window found smaller than this value. Limitations
// on how small this number can be is determined by: (1) the algorithmic
// limitation of how many taps wide the data eye can be (3 taps), and (2)
// how wide regions of "instability" that occur around the edges of the
// read valid window can be (i.e. need to be able to filter out "false"
// windows that occur for a short # of taps around the edges of the true
// data window, although with multi-sampling during read leveling, this is
// not as much a concern) - the larger the value, the more protection
// against "false" windows
localparam MIN_EYE_SIZE = 16;
// Length of calibration sequence (in # of words)
localparam CAL_PAT_LEN = 8;
// Read data shift register length
localparam RD_SHIFT_LEN = CAL_PAT_LEN / (2*nCK_PER_CLK);
// # of cycles required to perform read data shift register compare
// This is defined as from the cycle the new data is loaded until
// signal found_edge_r is valid
localparam RD_SHIFT_COMP_DELAY = 5;
// worst-case # of cycles to wait to ensure that both the SR and
// PREV_SR shift registers have valid data, and that the comparison
// of the two shift register values is valid. The "+1" at the end of
// this equation is a fudge factor, I freely admit that
localparam SR_VALID_DELAY = (2 * RD_SHIFT_LEN) + RD_SHIFT_COMP_DELAY + 1;
// # of clock cycles to wait after changing tap value or read data MUX
// to allow: (1) tap chain to settle, (2) for delayed input to propagate
// thru ISERDES, (3) for the read data comparison logic to have time to
// output the comparison of two consecutive samples of the settled read data
// The minimum delay is 16 cycles, which should be good enough to handle all
// three of the above conditions for the simulation-only case with a short
// training pattern. For H/W (or for simulation with longer training
// pattern), it will take longer to store and compare two consecutive
// samples, and the value of this parameter will reflect that
localparam PIPE_WAIT_CNT = (SR_VALID_DELAY < 8) ? 16 : (SR_VALID_DELAY + 8);
// # of read data samples to examine when detecting whether an edge has
// occured during stage 1 calibration. Width of local param must be
// changed as appropriate. Note that there are two counters used, each
// counter can be changed independently of the other - they are used in
// cascade to create a larger counter
localparam [11:0] DETECT_EDGE_SAMPLE_CNT0 = 12'h001; //12'hFFF;
localparam [11:0] DETECT_EDGE_SAMPLE_CNT1 = 12'h001; // 12'h1FF Must be > 0
localparam [5:0] CAL1_IDLE = 6'h00;
localparam [5:0] CAL1_NEW_DQS_WAIT = 6'h01;
localparam [5:0] CAL1_STORE_FIRST_WAIT = 6'h02;
localparam [5:0] CAL1_PAT_DETECT = 6'h03;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC = 6'h04;
localparam [5:0] CAL1_DQ_IDEL_TAP_INC_WAIT = 6'h05;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC = 6'h06;
localparam [5:0] CAL1_DQ_IDEL_TAP_DEC_WAIT = 6'h07;
localparam [5:0] CAL1_DETECT_EDGE = 6'h08;
localparam [5:0] CAL1_IDEL_INC_CPT = 6'h09;
localparam [5:0] CAL1_IDEL_INC_CPT_WAIT = 6'h0A;
localparam [5:0] CAL1_CALC_IDEL = 6'h0B;
localparam [5:0] CAL1_IDEL_DEC_CPT = 6'h0C;
localparam [5:0] CAL1_IDEL_DEC_CPT_WAIT = 6'h0D;
localparam [5:0] CAL1_NEXT_DQS = 6'h0E;
localparam [5:0] CAL1_DONE = 6'h0F;
localparam [5:0] CAL1_PB_STORE_FIRST_WAIT = 6'h10;
localparam [5:0] CAL1_PB_DETECT_EDGE = 6'h11;
localparam [5:0] CAL1_PB_INC_CPT = 6'h12;
localparam [5:0] CAL1_PB_INC_CPT_WAIT = 6'h13;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT = 6'h14;
localparam [5:0] CAL1_PB_DEC_CPT_LEFT_WAIT = 6'h15;
localparam [5:0] CAL1_PB_DETECT_EDGE_DQ = 6'h16;
localparam [5:0] CAL1_PB_INC_DQ = 6'h17;
localparam [5:0] CAL1_PB_INC_DQ_WAIT = 6'h18;
localparam [5:0] CAL1_PB_DEC_CPT = 6'h19;
localparam [5:0] CAL1_PB_DEC_CPT_WAIT = 6'h1A;
localparam [5:0] CAL1_REGL_LOAD = 6'h1B;
localparam [5:0] CAL1_RDLVL_ERR = 6'h1C;
localparam [5:0] CAL1_MPR_NEW_DQS_WAIT = 6'h1D;
localparam [5:0] CAL1_VALID_WAIT = 6'h1E;
localparam [5:0] CAL1_MPR_PAT_DETECT = 6'h1F;
localparam [5:0] CAL1_NEW_DQS_PREWAIT = 6'h20;
integer a;
integer b;
integer d;
integer e;
integer f;
integer h;
integer g;
integer i;
integer j;
integer k;
integer l;
integer m;
integer n;
integer r;
integer p;
integer q;
integer s;
integer t;
integer u;
integer w;
integer ce_i;
integer ce_rnk_i;
integer aa;
integer bb;
integer cc;
integer dd;
genvar x;
genvar z;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_r;
wire [DQS_CNT_WIDTH+2:0]cal1_cnt_cpt_timing;
reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_timing_r;
reg cal1_dq_idel_ce;
reg cal1_dq_idel_inc;
reg cal1_dlyce_cpt_r;
reg cal1_dlyinc_cpt_r;
reg cal1_dlyce_dq_r;
reg cal1_dlyinc_dq_r;
reg cal1_wait_cnt_en_r;
reg [4:0] cal1_wait_cnt_r;
reg cal1_wait_r;
reg [DQ_WIDTH-1:0] dlyce_dq_r;
reg dlyinc_dq_r;
reg [4:0] dlyval_dq_reg_r [0:RANKS-1][0:DQ_WIDTH-1];
reg cal1_prech_req_r;
reg [5:0] cal1_state_r;
reg [5:0] cal1_state_r1;
reg [5:0] cnt_idel_dec_cpt_r;
reg [3:0] cnt_shift_r;
reg detect_edge_done_r;
reg [5:0] right_edge_taps_r;
reg [5:0] first_edge_taps_r;
reg found_edge_r;
reg found_first_edge_r;
reg found_second_edge_r;
reg found_stable_eye_r;
reg found_stable_eye_last_r;
reg found_edge_all_r;
reg [5:0] tap_cnt_cpt_r;
reg tap_limit_cpt_r;
reg [4:0] idel_tap_cnt_dq_pb_r;
reg idel_tap_limit_dq_pb_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall0_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall1_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise0_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise1_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall2_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall3_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise2_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise3_r;
reg mux_rd_valid_r;
reg new_cnt_cpt_r;
reg [RD_SHIFT_LEN-1:0] old_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] old_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] old_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] old_sr_match_rise3_r;
reg [4:0] pb_cnt_eye_size_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] pb_detect_edge_done_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_last_r;
reg [DRAM_WIDTH-1:0] pb_found_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_first_edge_r;
reg [DRAM_WIDTH-1:0] pb_found_stable_eye_r;
reg [DRAM_WIDTH-1:0] pb_last_tap_jitter_r;
reg pi_en_stg2_f_timing;
reg pi_stg2_f_incdec_timing;
reg pi_stg2_load_timing;
reg [5:0] pi_stg2_reg_l_timing;
reg [DRAM_WIDTH-1:0] prev_sr_diff_r;
reg [RD_SHIFT_LEN-1:0] prev_sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] prev_sr_rise3_r [DRAM_WIDTH-1:0];
reg [DRAM_WIDTH-1:0] prev_sr_match_cyc2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise0_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise1_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_fall3_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise2_r;
reg [DRAM_WIDTH-1:0] prev_sr_match_rise3_r;
wire [DQ_WIDTH-1:0] rd_data_rise0;
wire [DQ_WIDTH-1:0] rd_data_fall0;
wire [DQ_WIDTH-1:0] rd_data_rise1;
wire [DQ_WIDTH-1:0] rd_data_fall1;
wire [DQ_WIDTH-1:0] rd_data_rise2;
wire [DQ_WIDTH-1:0] rd_data_fall2;
wire [DQ_WIDTH-1:0] rd_data_rise3;
wire [DQ_WIDTH-1:0] rd_data_fall3;
reg samp_cnt_done_r;
reg samp_edge_cnt0_en_r;
reg [11:0] samp_edge_cnt0_r;
reg samp_edge_cnt1_en_r;
reg [11:0] samp_edge_cnt1_r;
reg [DQS_CNT_WIDTH:0] rd_mux_sel_r;
reg [5:0] second_edge_taps_r;
reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0];
reg store_sr_r;
reg store_sr_req_pulsed_r;
reg store_sr_req_r;
reg sr_valid_r;
reg sr_valid_r1;
reg sr_valid_r2;
reg [DRAM_WIDTH-1:0] old_sr_diff_r;
reg [DRAM_WIDTH-1:0] old_sr_match_cyc2_r;
reg pat0_data_match_r;
reg pat1_data_match_r;
wire pat_data_match_r;
wire [RD_SHIFT_LEN-1:0] pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] pat0_match_fall0_r;
reg pat0_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall1_r;
reg pat0_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall2_r;
reg pat0_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_fall3_r;
reg pat0_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise0_r;
reg pat0_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise1_r;
reg pat0_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise2_r;
reg pat0_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat0_match_rise3_r;
reg pat0_match_rise3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall0_r;
reg pat1_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall1_r;
reg pat1_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall2_r;
reg pat1_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall3_r;
reg pat1_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise0_r;
reg pat1_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise1_r;
reg pat1_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise2_r;
reg pat1_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise3_r;
reg pat1_match_rise3_and_r;
reg [4:0] idelay_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [5*DQS_WIDTH*RANKS-1:0] idelay_tap_cnt_w;
reg [4:0] idelay_tap_cnt_slice_r;
reg idelay_tap_limit_r;
wire [RD_SHIFT_LEN-1:0] pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat0_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] idel_pat1_fall3 [3:0];
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat0_match_fall3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall0_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall1_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall2_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_rise3_r;
reg [DRAM_WIDTH-1:0] idel_pat1_match_fall3_r;
reg idel_pat0_match_rise0_and_r;
reg idel_pat0_match_fall0_and_r;
reg idel_pat0_match_rise1_and_r;
reg idel_pat0_match_fall1_and_r;
reg idel_pat0_match_rise2_and_r;
reg idel_pat0_match_fall2_and_r;
reg idel_pat0_match_rise3_and_r;
reg idel_pat0_match_fall3_and_r;
reg idel_pat1_match_rise0_and_r;
reg idel_pat1_match_fall0_and_r;
reg idel_pat1_match_rise1_and_r;
reg idel_pat1_match_fall1_and_r;
reg idel_pat1_match_rise2_and_r;
reg idel_pat1_match_fall2_and_r;
reg idel_pat1_match_rise3_and_r;
reg idel_pat1_match_fall3_and_r;
reg idel_pat0_data_match_r;
reg idel_pat1_data_match_r;
reg idel_pat_data_match;
reg idel_pat_data_match_r;
reg [4:0] idel_dec_cnt;
reg [5:0] rdlvl_dqs_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1];
reg [1:0] rnk_cnt_r;
reg rdlvl_rank_done_r;
reg [3:0] done_cnt;
reg [1:0] regl_rank_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt;
reg [DQS_CNT_WIDTH:0] regl_dqs_cnt_r;
wire [DQS_CNT_WIDTH+2:0]regl_dqs_cnt_timing;
reg regl_rank_done_r;
reg rdlvl_stg1_start_r;
reg dqs_po_dec_done_r1;
reg dqs_po_dec_done_r2;
reg fine_dly_dec_done_r1;
reg fine_dly_dec_done_r2;
reg [3:0] wait_cnt_r;
reg [5:0] pi_rdval_cnt;
reg pi_cnt_dec;
reg mpr_valid_r;
reg mpr_valid_r1;
reg mpr_valid_r2;
reg mpr_rd_rise0_prev_r;
reg mpr_rd_fall0_prev_r;
reg mpr_rd_rise1_prev_r;
reg mpr_rd_fall1_prev_r;
reg mpr_rd_rise2_prev_r;
reg mpr_rd_fall2_prev_r;
reg mpr_rd_rise3_prev_r;
reg mpr_rd_fall3_prev_r;
reg mpr_rdlvl_done_r;
reg mpr_rdlvl_done_r1;
reg mpr_rdlvl_done_r2;
reg mpr_rdlvl_start_r;
reg mpr_rank_done_r;
reg [2:0] stable_idel_cnt;
reg inhibit_edge_detect_r;
reg idel_pat_detect_valid_r;
reg idel_mpr_pat_detect_r;
reg mpr_pat_detect_r;
reg mpr_dec_cpt_r;
reg idel_adj_inc; //IDELAY adjustment
wire [1:0] idelay_adj;
wire pb_detect_edge_setup;
wire pb_detect_edge;
// Debug
reg [6*DQS_WIDTH-1:0] dbg_cpt_first_edge_taps;
reg [6*DQS_WIDTH-1:0] dbg_cpt_second_edge_taps;
reg [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt_w;
//IDELAY adjustment setting for -1
//2'b10 : IDELAY - 1
//2'b01 : IDELAY + 1
//2'b00 : No IDELAY adjustment
assign idelay_adj = (IDELAY_ADJ == "ON") ? 2'b10: 2'b00;
//***************************************************************************
// Debug
//***************************************************************************
always @(*) begin
for (d = 0; d < RANKS; d = d + 1) begin
for (e = 0; e < DQS_WIDTH; e = e + 1) begin
idelay_tap_cnt_w[(5*e+5*DQS_WIDTH*d)+:5] = idelay_tap_cnt_r[d][e];
dbg_cpt_tap_cnt_w[(6*e+6*DQS_WIDTH*d)+:6] = rdlvl_dqs_tap_cnt_r[d][e];
end
end
end
assign mpr_rdlvl_err = rdlvl_stg1_err & (!mpr_rdlvl_done);
assign rdlvl_err = rdlvl_stg1_err & (mpr_rdlvl_done);
assign dbg_phy_rdlvl[0] = rdlvl_stg1_start;
assign dbg_phy_rdlvl[1] = pat_data_match_r;
assign dbg_phy_rdlvl[2] = mux_rd_valid_r;
assign dbg_phy_rdlvl[3] = idelay_tap_limit_r;
assign dbg_phy_rdlvl[8:4] = 'b0;
assign dbg_phy_rdlvl[14:9] = cal1_state_r[5:0];
assign dbg_phy_rdlvl[20:15] = cnt_idel_dec_cpt_r;
assign dbg_phy_rdlvl[21] = found_first_edge_r;
assign dbg_phy_rdlvl[22] = found_second_edge_r;
assign dbg_phy_rdlvl[23] = found_edge_r;
assign dbg_phy_rdlvl[24] = store_sr_r;
// [40:25] previously used for sr, old_sr shift registers. If connecting
// these signals again, don't forget to parameterize based on RD_SHIFT_LEN
assign dbg_phy_rdlvl[40:25] = 'b0;
assign dbg_phy_rdlvl[41] = sr_valid_r;
assign dbg_phy_rdlvl[42] = found_stable_eye_r;
assign dbg_phy_rdlvl[48:43] = tap_cnt_cpt_r;
assign dbg_phy_rdlvl[54:49] = first_edge_taps_r;
assign dbg_phy_rdlvl[60:55] = second_edge_taps_r;
assign dbg_phy_rdlvl[64:61] = cal1_cnt_cpt_timing_r;
assign dbg_phy_rdlvl[65] = cal1_dlyce_cpt_r;
assign dbg_phy_rdlvl[66] = cal1_dlyinc_cpt_r;
assign dbg_phy_rdlvl[67] = found_edge_r;
assign dbg_phy_rdlvl[68] = found_first_edge_r;
assign dbg_phy_rdlvl[73:69] = 'b0;
assign dbg_phy_rdlvl[74] = idel_pat_data_match;
assign dbg_phy_rdlvl[75] = idel_pat0_data_match_r;
assign dbg_phy_rdlvl[76] = idel_pat1_data_match_r;
assign dbg_phy_rdlvl[77] = pat0_data_match_r;
assign dbg_phy_rdlvl[78] = pat1_data_match_r;
assign dbg_phy_rdlvl[79+:5*DQS_WIDTH*RANKS] = idelay_tap_cnt_w;
assign dbg_phy_rdlvl[170+:8] = mux_rd_rise0_r;
assign dbg_phy_rdlvl[178+:8] = mux_rd_fall0_r;
assign dbg_phy_rdlvl[186+:8] = mux_rd_rise1_r;
assign dbg_phy_rdlvl[194+:8] = mux_rd_fall1_r;
assign dbg_phy_rdlvl[202+:8] = mux_rd_rise2_r;
assign dbg_phy_rdlvl[210+:8] = mux_rd_fall2_r;
assign dbg_phy_rdlvl[218+:8] = mux_rd_rise3_r;
assign dbg_phy_rdlvl[226+:8] = mux_rd_fall3_r;
//***************************************************************************
// Debug output
//***************************************************************************
// CPT taps
assign dbg_cpt_first_edge_cnt = dbg_cpt_first_edge_taps;
assign dbg_cpt_second_edge_cnt = dbg_cpt_second_edge_taps;
assign dbg_cpt_tap_cnt = dbg_cpt_tap_cnt_w;
assign dbg_dq_idelay_tap_cnt = idelay_tap_cnt_w;
// Record first and second edges found during CPT calibration
generate
always @(posedge clk)
if (rst) begin
dbg_cpt_first_edge_taps <= #TCQ 'b0;
dbg_cpt_second_edge_taps <= #TCQ 'b0;
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_CALC_IDEL)) begin
//for (ce_rnk_i = 0; ce_rnk_i < RANKS; ce_rnk_i = ce_rnk_i + 1) begin: gen_dbg_cpt_rnk
for (ce_i = 0; ce_i < DQS_WIDTH; ce_i = ce_i + 1) begin: gen_dbg_cpt_edge
if (found_first_edge_r)
dbg_cpt_first_edge_taps[(6*ce_i)+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[(6*ce_i)+:6]
<= #TCQ second_edge_taps_r;
end
//end
end else if (cal1_state_r == CAL1_CALC_IDEL) begin
// Record tap counts of first and second edge edges during
// CPT calibration for each DQS group. If neither edge has
// been found, then those taps will remain 0
if (found_first_edge_r)
dbg_cpt_first_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ first_edge_taps_r;
if (found_second_edge_r)
dbg_cpt_second_edge_taps[((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1))+:6]
<= #TCQ second_edge_taps_r;
end
endgenerate
assign rdlvl_stg1_rnk_done = rdlvl_rank_done_r;// || regl_rank_done_r;
assign mpr_rnk_done = mpr_rank_done_r;
assign mpr_rdlvl_done = ((DRAM_TYPE == "DDR3") && (OCAL_EN == "ON")) ? //&& (SIM_CAL_OPTION == "NONE")
mpr_rdlvl_done_r : 1'b1;
//**************************************************************************
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
// coarse delay
//**************************************************************************
assign pi_stg2_rdlvl_cnt = (cal1_state_r == CAL1_REGL_LOAD) ? regl_dqs_cnt_r : cal1_cnt_cpt_r;
assign idelay_ce = cal1_dq_idel_ce;
assign idelay_inc = cal1_dq_idel_inc;
//***************************************************************************
// Assert calib_in_common in FAST_CAL mode for IDELAY tap increments to all
// DQs simultaneously
//***************************************************************************
always @(posedge clk) begin
if (rst)
rdlvl_assrt_common <= #TCQ 1'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") & rdlvl_stg1_start &
!rdlvl_stg1_start_r)
rdlvl_assrt_common <= #TCQ 1'b1;
else if (!idel_pat_data_match_r & idel_pat_data_match)
rdlvl_assrt_common <= #TCQ 1'b0;
end
//***************************************************************************
// Data mux to route appropriate bit to calibration logic - i.e. calibration
// is done sequentially, one bit (or DQS group) at a time
//***************************************************************************
generate
if (nCK_PER_CLK == 4) begin: rd_data_div4_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH];
assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH];
assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH];
assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH];
end else begin: rd_data_div2_logic_clk
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
end
endgenerate
always @(posedge clk) begin
rd_mux_sel_r <= #TCQ cal1_cnt_cpt_r;
end
// Register outputs for improved timing.
// NOTE: Will need to change when per-bit DQ deskew is supported.
// Currenly all bits in DQS group are checked in aggregate
generate
genvar mux_i;
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r +
mux_i];
end
end
endgenerate
//***************************************************************************
// MPR Read Leveling
//***************************************************************************
// storing the previous read data for checking later. Only bit 0 is used
// since MPR contents (01010101) are available generally on DQ[0] per
// JEDEC spec.
always @(posedge clk)begin
if ((cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
((cal1_state_r == CAL1_MPR_PAT_DETECT) && (idel_pat_detect_valid_r)))begin
mpr_rd_rise0_prev_r <= #TCQ mux_rd_rise0_r[0];
mpr_rd_fall0_prev_r <= #TCQ mux_rd_fall0_r[0];
mpr_rd_rise1_prev_r <= #TCQ mux_rd_rise1_r[0];
mpr_rd_fall1_prev_r <= #TCQ mux_rd_fall1_r[0];
mpr_rd_rise2_prev_r <= #TCQ mux_rd_rise2_r[0];
mpr_rd_fall2_prev_r <= #TCQ mux_rd_fall2_r[0];
mpr_rd_rise3_prev_r <= #TCQ mux_rd_rise3_r[0];
mpr_rd_fall3_prev_r <= #TCQ mux_rd_fall3_r[0];
end
end
generate
if (nCK_PER_CLK == 4) begin: mpr_4to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_NEW_DQS_PREWAIT) |
//(cal1_state_r == CAL1_DETECT_EDGE) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) |
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) |
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) |
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) |
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(mpr_rd_rise2_prev_r == mux_rd_rise2_r[0]) &
(mpr_rd_fall2_prev_r == mux_rd_fall2_r[0]) &
(mpr_rd_rise3_prev_r == mux_rd_rise3_r[0]) &
(mpr_rd_fall3_prev_r == mux_rd_fall3_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b1;
// Wait for settling time after idelay tap increment before
// de-asserting inhibit_edge_detect_r
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 10101010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r &
mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r &
mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
&& (idel_pat_detect_valid_r)))
//|| (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 01010101 to 10101010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r &
~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r &
~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) ||
(mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) ||
(mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) ||
(mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) ||
(mpr_rd_fall3_prev_r != mux_rd_fall3_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end else if (nCK_PER_CLK == 2) begin: mpr_2to1
// changed stable count of 2 IDELAY taps at 78 ps resolution
always @(posedge clk) begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
(mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) |
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) |
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) |
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]))
stable_idel_cnt <= #TCQ 3'd0;
else if ((idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd0) &
((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idel_pat_detect_valid_r))) begin
if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) &
(mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) &
(mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) &
(mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) &
(stable_idel_cnt < 3'd2))
stable_idel_cnt <= #TCQ stable_idel_cnt + 1;
end
end
always @(posedge clk) begin
if (rst |
(mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b1;
else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) &
(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) &
(~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r))
inhibit_edge_detect_r <= 1'b0;
end
//checking for transition from 01010101 to 10101010
always @(posedge clk)begin
if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) |
inhibit_edge_detect_r)
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 1010 is not the correct pattern
else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r &
mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r) ||
((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT)
& (idel_pat_detect_valid_r)))
// ||(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2))
idel_mpr_pat_detect_r <= #TCQ 1'b0;
// 0101 to 1010 is the correct transition
else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r &
~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r) &
(stable_idel_cnt == 3'd2) &
((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) ||
(mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) ||
(mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) ||
(mpr_rd_fall1_prev_r != mux_rd_fall1_r[0])))
idel_mpr_pat_detect_r <= #TCQ 1'b1;
end
end
endgenerate
// Registered signal indicates when mux_rd_rise/fall_r is valid
always @(posedge clk)
mux_rd_valid_r <= #TCQ ~phy_if_empty;
//***************************************************************************
// Decrement initial Phaser_IN fine delay value before proceeding with
// read calibration
//***************************************************************************
always @(posedge clk) begin
dqs_po_dec_done_r1 <= #TCQ dqs_po_dec_done;
dqs_po_dec_done_r2 <= #TCQ dqs_po_dec_done_r1;
fine_dly_dec_done_r2 <= #TCQ fine_dly_dec_done_r1;
pi_fine_dly_dec_done <= #TCQ fine_dly_dec_done_r2;
end
always @(posedge clk) begin
if (rst || pi_cnt_dec)
wait_cnt_r <= #TCQ 'd8;
else if (dqs_po_dec_done_r2 && (wait_cnt_r > 'd0))
wait_cnt_r <= #TCQ wait_cnt_r - 1;
end
always @(posedge clk) begin
if (rst) begin
pi_rdval_cnt <= #TCQ 'd0;
end else if (dqs_po_dec_done_r1 && ~dqs_po_dec_done_r2) begin
pi_rdval_cnt <= #TCQ pi_counter_read_val;
end else if (pi_rdval_cnt > 'd0) begin
if (pi_cnt_dec)
pi_rdval_cnt <= #TCQ pi_rdval_cnt - 1;
else
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end else if (pi_rdval_cnt == 'd0) begin
pi_rdval_cnt <= #TCQ pi_rdval_cnt;
end
end
always @(posedge clk) begin
if (rst || (pi_rdval_cnt == 'd0))
pi_cnt_dec <= #TCQ 1'b0;
else if (dqs_po_dec_done_r2 && (pi_rdval_cnt > 'd0)
&& (wait_cnt_r == 'd1))
pi_cnt_dec <= #TCQ 1'b1;
else
pi_cnt_dec <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (rst) begin
fine_dly_dec_done_r1 <= #TCQ 1'b0;
end else if (((pi_cnt_dec == 'd1) && (pi_rdval_cnt == 'd1)) ||
(dqs_po_dec_done_r2 && (pi_rdval_cnt == 'd0))) begin
fine_dly_dec_done_r1 <= #TCQ 1'b1;
end
end
//***************************************************************************
// Demultiplexor to control Phaser_IN delay values
//***************************************************************************
// Read DQS
always @(posedge clk) begin
if (rst) begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (pi_cnt_dec) begin
pi_en_stg2_f_timing <= #TCQ 'b1;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end else if (cal1_dlyce_cpt_r) begin
if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
// Change only specified DQS
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
// if simulating, and "shortcuts" for calibration enabled, apply
// results to all DQSs (i.e. assume same delay on all
// DQSs).
pi_en_stg2_f_timing <= #TCQ 1'b1;
pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r;
end
end else begin
pi_en_stg2_f_timing <= #TCQ 'b0;
pi_stg2_f_incdec_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_en_stg2_f <= #TCQ pi_en_stg2_f_timing;
pi_stg2_f_incdec <= #TCQ pi_stg2_f_incdec_timing;
end
// This counter used to implement settling time between
// Phaser_IN rank register loads to different DQSs
always @(posedge clk) begin
if (rst)
done_cnt <= #TCQ 'b0;
else if (((cal1_state_r == CAL1_REGL_LOAD) &&
(cal1_state_r1 == CAL1_NEXT_DQS)) ||
((done_cnt == 4'd1) && (cal1_state_r != CAL1_DONE)))
done_cnt <= #TCQ 4'b1010;
else if (done_cnt > 'b0)
done_cnt <= #TCQ done_cnt - 1;
end
// During rank register loading the rank count must be sent to
// Phaser_IN via the phy_ctl_wd?? If so phy_init will have to
// issue NOPs during rank register loading with the appropriate
// rank count
always @(posedge clk) begin
if (rst || (regl_rank_done_r == 1'b1))
regl_rank_done_r <= #TCQ 1'b0;
else if ((regl_dqs_cnt == DQS_WIDTH-1) &&
(regl_rank_cnt != RANKS-1) &&
(done_cnt == 4'd1))
regl_rank_done_r <= #TCQ 1'b1;
end
// Temp wire for timing.
// The following in the always block below causes timing issues
// due to DSP block inference
// 6*regl_dqs_cnt.
// replacing this with two left shifts + 1 left shift to avoid
// DSP multiplier.
assign regl_dqs_cnt_timing = {2'd0, regl_dqs_cnt};
// Load Phaser_OUT rank register with rdlvl delay value
// for each DQS per rank.
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0)) begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt <= DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
pi_stg2_load_timing <= #TCQ 'b1;
pi_stg2_reg_l_timing <= #TCQ
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][regl_dqs_cnt];
end else begin
pi_stg2_load_timing <= #TCQ 'b0;
pi_stg2_reg_l_timing <= #TCQ 'b0;
end
end
// registered for timing
always @(posedge clk) begin
pi_stg2_load <= #TCQ pi_stg2_load_timing;
pi_stg2_reg_l <= #TCQ pi_stg2_reg_l_timing;
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_rank_cnt <= #TCQ 2'b00;
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_rank_cnt <= #TCQ regl_rank_cnt;
else
regl_rank_cnt <= #TCQ regl_rank_cnt + 1;
end
end
always @(posedge clk) begin
if (rst || (done_cnt == 4'd0) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
regl_dqs_cnt <= #TCQ {DQS_CNT_WIDTH+1{1'b0}};
else if ((cal1_state_r == CAL1_REGL_LOAD) &&
(regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin
if (regl_rank_cnt == RANKS-1)
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
else
regl_dqs_cnt <= #TCQ 'b0;
end else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt != DQS_WIDTH-1)
&& (done_cnt == 4'd1))
regl_dqs_cnt <= #TCQ regl_dqs_cnt + 1;
else
regl_dqs_cnt <= #TCQ regl_dqs_cnt;
end
always @(posedge clk)
regl_dqs_cnt_r <= #TCQ regl_dqs_cnt;
//*****************************************************************
// DQ Stage 1 CALIBRATION INCREMENT/DECREMENT LOGIC:
// The actual IDELAY elements for each of the DQ bits is set via the
// DLYVAL parallel load port. However, the stage 1 calibration
// algorithm (well most of it) only needs to increment or decrement the DQ
// IDELAY value by 1 at any one time.
//*****************************************************************
// Chip-select generation for each of the individual counters tracking
// IDELAY tap values for each DQ
generate
for (z = 0; z < DQS_WIDTH; z = z + 1) begin: gen_dlyce_dq
always @(posedge clk)
if (rst)
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else
if (SIM_CAL_OPTION == "SKIP_CAL")
// If skipping calibration altogether (only for simulation), no
// need to set DQ IODELAY values - they are hardcoded
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
else if (SIM_CAL_OPTION == "FAST_CAL") begin
// If fast calibration option (simulation only) selected, DQ
// IODELAYs across all bytes are updated simultaneously
// (although per-bit deskew within DQS[0] is still supported)
for (h = 0; h < DRAM_WIDTH; h = h + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + h] <= #TCQ cal1_dlyce_dq_r;
end
end else if ((SIM_CAL_OPTION == "NONE") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (cal1_cnt_cpt_r == z) begin
for (g = 0; g < DRAM_WIDTH; g = g + 1) begin
dlyce_dq_r[DRAM_WIDTH*z + g]
<= #TCQ cal1_dlyce_dq_r;
end
end else
dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0;
end
end
endgenerate
// Also delay increment/decrement control to match delay on DLYCE
always @(posedge clk)
if (rst)
dlyinc_dq_r <= #TCQ 1'b0;
else
dlyinc_dq_r <= #TCQ cal1_dlyinc_dq_r;
// Each DQ has a counter associated with it to record current read-leveling
// delay value
always @(posedge clk)
// Reset or skipping calibration all together
if (rst | (SIM_CAL_OPTION == "SKIP_CAL")) begin
for (aa = 0; aa < RANKS; aa = aa + 1) begin: rst_dlyval_dq_reg_r
for (bb = 0; bb < DQ_WIDTH; bb = bb + 1)
dlyval_dq_reg_r[aa][bb] <= #TCQ 'b0;
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (n = 0; n < RANKS; n = n + 1) begin: gen_dlyval_dq_reg_rnk
for (r = 0; r < DQ_WIDTH; r = r + 1) begin: gen_dlyval_dq_reg
if (dlyce_dq_r[r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] + 5'h01;
else
dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] - 5'h01;
end
end
end
end else begin
if (dlyce_dq_r[cal1_cnt_cpt_r]) begin
if (dlyinc_dq_r)
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] + 5'h01;
else
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ
dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] - 5'h01;
end
end
// Register for timing (help with logic placement)
always @(posedge clk) begin
for (cc = 0; cc < RANKS; cc = cc + 1) begin: dlyval_dq_assgn
for (dd = 0; dd < DQ_WIDTH; dd = dd + 1)
dlyval_dq[((5*dd)+(cc*DQ_WIDTH*5))+:5] <= #TCQ dlyval_dq_reg_r[cc][dd];
end
end
//***************************************************************************
// Generate signal used to delay calibration state machine - used when:
// (1) IDELAY value changed
// (2) RD_MUX_SEL value changed
// Use when a delay is necessary to give the change time to propagate
// through the data pipeline (through IDELAY and ISERDES, and fabric
// pipeline stages)
//***************************************************************************
// List all the stage 1 calibration wait states here.
// verilint STARC-2.7.3.3b off
always @(posedge clk)
if ((cal1_state_r == CAL1_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) ||
(cal1_state_r == CAL1_NEW_DQS_PREWAIT) ||
(cal1_state_r == CAL1_VALID_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT) ||
(cal1_state_r == CAL1_PB_INC_DQ_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_INC_CPT_WAIT) ||
(cal1_state_r == CAL1_IDEL_DEC_CPT_WAIT) ||
(cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_INC_WAIT) ||
(cal1_state_r == CAL1_DQ_IDEL_TAP_DEC_WAIT))
cal1_wait_cnt_en_r <= #TCQ 1'b1;
else
cal1_wait_cnt_en_r <= #TCQ 1'b0;
// verilint STARC-2.7.3.3b on
always @(posedge clk)
if (!cal1_wait_cnt_en_r) begin
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b1;
end else begin
if (cal1_wait_cnt_r != PIPE_WAIT_CNT - 1) begin
cal1_wait_cnt_r <= #TCQ cal1_wait_cnt_r + 1;
cal1_wait_r <= #TCQ 1'b1;
end else begin
// Need to reset to 0 to handle the case when there are two
// different WAIT states back-to-back
cal1_wait_cnt_r <= #TCQ 5'b00000;
cal1_wait_r <= #TCQ 1'b0;
end
end
//***************************************************************************
// generate request to PHY_INIT logic to issue precharged. Required when
// calibration can take a long time (during which there are only constant
// reads present on this bus). In this case need to issue perioidic
// precharges to avoid tRAS violation. This signal must meet the following
// requirements: (1) only transition from 0->1 when prech is first needed,
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
//***************************************************************************
always @(posedge clk)
if (rst)
rdlvl_prech_req <= #TCQ 1'b0;
else
rdlvl_prech_req <= #TCQ cal1_prech_req_r;
//***************************************************************************
// Serial-to-parallel register to store last RDDATA_SHIFT_LEN cycles of
// data from ISERDES. The value of this register is also stored, so that
// previous and current values of the ISERDES data can be compared while
// varying the IODELAY taps to see if an "edge" of the data valid window
// has been encountered since the last IODELAY tap adjustment
//***************************************************************************
//***************************************************************************
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
// NOTE: Written using discrete flops, but SRL can be used if the matching
// logic does the comparison sequentially, rather than parallel
//***************************************************************************
generate
genvar rd_i;
if (nCK_PER_CLK == 4) begin: gen_sr_div4
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i];
sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i];
sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i];
sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i];
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
sr_rise2_r[rd_i] <= #TCQ {sr_rise2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise2_r[rd_i]};
sr_fall2_r[rd_i] <= #TCQ {sr_fall2_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall2_r[rd_i]};
sr_rise3_r[rd_i] <= #TCQ {sr_rise3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise3_r[rd_i]};
sr_fall3_r[rd_i] <= #TCQ {sr_fall3_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall3_r[rd_i]};
end
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_sr_div2
if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {mux_rd_fall1_r[rd_i]};
end
end
end
end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
if (mux_rd_valid_r) begin
sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise0_r[rd_i]};
sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall0_r[rd_i]};
sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_rise1_r[rd_i]};
sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0],
mux_rd_fall1_r[rd_i]};
end
end
end
end
end
endgenerate
//***************************************************************************
// Conversion to pattern calibration
//***************************************************************************
// Pattern for DQ IDELAY calibration
//*****************************************************************
// Expected data pattern when DQ shifted to the right such that
// DQS before the left edge of the DVW:
// Based on pattern of ({rise,fall}) =
// 0x1, 0xB, 0x4, 0x4, 0xB, 0x9
// Each nibble will look like:
// bit3: 0, 1, 0, 0, 1, 1
// bit2: 0, 0, 1, 1, 0, 0
// bit1: 0, 1, 0, 0, 1, 0
// bit0: 1, 1, 0, 0, 1, 1
// Or if the write is early it could look like:
// 0x4, 0x4, 0xB, 0x9, 0x6, 0xE
// bit3: 0, 0, 1, 1, 0, 1
// bit2: 1, 1, 0, 0, 1, 1
// bit1: 0, 0, 1, 0, 1, 1
// bit0: 0, 0, 1, 1, 0, 0
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
// and the actual training pattern contents change
//*****************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_pat_div4
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign {idel_pat0_rise0[3], idel_pat0_rise0[2],
idel_pat0_rise0[1], idel_pat0_rise0[0]} = 4'h1;
assign {idel_pat0_fall0[3], idel_pat0_fall0[2],
idel_pat0_fall0[1], idel_pat0_fall0[0]} = 4'h7;
assign {idel_pat0_rise1[3], idel_pat0_rise1[2],
idel_pat0_rise1[1], idel_pat0_rise1[0]} = 4'hE;
assign {idel_pat0_fall1[3], idel_pat0_fall1[2],
idel_pat0_fall1[1], idel_pat0_fall1[0]} = 4'hC;
assign {idel_pat0_rise2[3], idel_pat0_rise2[2],
idel_pat0_rise2[1], idel_pat0_rise2[0]} = 4'h9;
assign {idel_pat0_fall2[3], idel_pat0_fall2[2],
idel_pat0_fall2[1], idel_pat0_fall2[0]} = 4'h2;
assign {idel_pat0_rise3[3], idel_pat0_rise3[2],
idel_pat0_rise3[1], idel_pat0_rise3[0]} = 4'h4;
assign {idel_pat0_fall3[3], idel_pat0_fall3[2],
idel_pat0_fall3[1], idel_pat0_fall3[0]} = 4'hB;
// Target pattern for "on-time write"
assign {idel_pat1_rise0[3], idel_pat1_rise0[2],
idel_pat1_rise0[1], idel_pat1_rise0[0]} = 4'h4;
assign {idel_pat1_fall0[3], idel_pat1_fall0[2],
idel_pat1_fall0[1], idel_pat1_fall0[0]} = 4'h9;
assign {idel_pat1_rise1[3], idel_pat1_rise1[2],
idel_pat1_rise1[1], idel_pat1_rise1[0]} = 4'h3;
assign {idel_pat1_fall1[3], idel_pat1_fall1[2],
idel_pat1_fall1[1], idel_pat1_fall1[0]} = 4'h7;
assign {idel_pat1_rise2[3], idel_pat1_rise2[2],
idel_pat1_rise2[1], idel_pat1_rise2[0]} = 4'hE;
assign {idel_pat1_fall2[3], idel_pat1_fall2[2],
idel_pat1_fall2[1], idel_pat1_fall2[0]} = 4'hC;
assign {idel_pat1_rise3[3], idel_pat1_rise3[2],
idel_pat1_rise3[1], idel_pat1_rise3[0]} = 4'h9;
assign {idel_pat1_fall3[3], idel_pat1_fall3[2],
idel_pat1_fall3[1], idel_pat1_fall3[0]} = 4'h2;
// Correct data valid window for "early write"
assign {pat0_rise0[3], pat0_rise0[2],
pat0_rise0[1], pat0_rise0[0]} = 4'h7;
assign {pat0_fall0[3], pat0_fall0[2],
pat0_fall0[1], pat0_fall0[0]} = 4'hE;
assign {pat0_rise1[3], pat0_rise1[2],
pat0_rise1[1], pat0_rise1[0]} = 4'hC;
assign {pat0_fall1[3], pat0_fall1[2],
pat0_fall1[1], pat0_fall1[0]} = 4'h9;
assign {pat0_rise2[3], pat0_rise2[2],
pat0_rise2[1], pat0_rise2[0]} = 4'h2;
assign {pat0_fall2[3], pat0_fall2[2],
pat0_fall2[1], pat0_fall2[0]} = 4'h4;
assign {pat0_rise3[3], pat0_rise3[2],
pat0_rise3[1], pat0_rise3[0]} = 4'hB;
assign {pat0_fall3[3], pat0_fall3[2],
pat0_fall3[1], pat0_fall3[0]} = 4'h1;
// Correct data valid window for "on-time write"
assign {pat1_rise0[3], pat1_rise0[2],
pat1_rise0[1], pat1_rise0[0]} = 4'h9;
assign {pat1_fall0[3], pat1_fall0[2],
pat1_fall0[1], pat1_fall0[0]} = 4'h3;
assign {pat1_rise1[3], pat1_rise1[2],
pat1_rise1[1], pat1_rise1[0]} = 4'h7;
assign {pat1_fall1[3], pat1_fall1[2],
pat1_fall1[1], pat1_fall1[0]} = 4'hE;
assign {pat1_rise2[3], pat1_rise2[2],
pat1_rise2[1], pat1_rise2[0]} = 4'hC;
assign {pat1_fall2[3], pat1_fall2[2],
pat1_fall2[1], pat1_fall2[0]} = 4'h9;
assign {pat1_rise3[3], pat1_rise3[2],
pat1_rise3[1], pat1_rise3[0]} = 4'h2;
assign {pat1_fall3[3], pat1_fall3[2],
pat1_fall3[1], pat1_fall3[0]} = 4'h4;
end else if (nCK_PER_CLK == 2) begin: gen_pat_div2
// Pattern for DQ IDELAY increment
// Target pattern for "early write"
assign idel_pat0_rise0[3] = 2'b01;
assign idel_pat0_fall0[3] = 2'b00;
assign idel_pat0_rise1[3] = 2'b10;
assign idel_pat0_fall1[3] = 2'b11;
assign idel_pat0_rise0[2] = 2'b00;
assign idel_pat0_fall0[2] = 2'b10;
assign idel_pat0_rise1[2] = 2'b11;
assign idel_pat0_fall1[2] = 2'b10;
assign idel_pat0_rise0[1] = 2'b00;
assign idel_pat0_fall0[1] = 2'b11;
assign idel_pat0_rise1[1] = 2'b10;
assign idel_pat0_fall1[1] = 2'b01;
assign idel_pat0_rise0[0] = 2'b11;
assign idel_pat0_fall0[0] = 2'b10;
assign idel_pat0_rise1[0] = 2'b00;
assign idel_pat0_fall1[0] = 2'b01;
// Target pattern for "on-time write"
assign idel_pat1_rise0[3] = 2'b01;
assign idel_pat1_fall0[3] = 2'b11;
assign idel_pat1_rise1[3] = 2'b01;
assign idel_pat1_fall1[3] = 2'b00;
assign idel_pat1_rise0[2] = 2'b11;
assign idel_pat1_fall0[2] = 2'b01;
assign idel_pat1_rise1[2] = 2'b00;
assign idel_pat1_fall1[2] = 2'b10;
assign idel_pat1_rise0[1] = 2'b01;
assign idel_pat1_fall0[1] = 2'b00;
assign idel_pat1_rise1[1] = 2'b10;
assign idel_pat1_fall1[1] = 2'b11;
assign idel_pat1_rise0[0] = 2'b00;
assign idel_pat1_fall0[0] = 2'b10;
assign idel_pat1_rise1[0] = 2'b11;
assign idel_pat1_fall1[0] = 2'b10;
// Correct data valid window for "early write"
assign pat0_rise0[3] = 2'b00;
assign pat0_fall0[3] = 2'b10;
assign pat0_rise1[3] = 2'b11;
assign pat0_fall1[3] = 2'b10;
assign pat0_rise0[2] = 2'b10;
assign pat0_fall0[2] = 2'b11;
assign pat0_rise1[2] = 2'b10;
assign pat0_fall1[2] = 2'b00;
assign pat0_rise0[1] = 2'b11;
assign pat0_fall0[1] = 2'b10;
assign pat0_rise1[1] = 2'b01;
assign pat0_fall1[1] = 2'b00;
assign pat0_rise0[0] = 2'b10;
assign pat0_fall0[0] = 2'b00;
assign pat0_rise1[0] = 2'b01;
assign pat0_fall1[0] = 2'b11;
// Correct data valid window for "on-time write"
assign pat1_rise0[3] = 2'b11;
assign pat1_fall0[3] = 2'b01;
assign pat1_rise1[3] = 2'b00;
assign pat1_fall1[3] = 2'b10;
assign pat1_rise0[2] = 2'b01;
assign pat1_fall0[2] = 2'b00;
assign pat1_rise1[2] = 2'b10;
assign pat1_fall1[2] = 2'b11;
assign pat1_rise0[1] = 2'b00;
assign pat1_fall0[1] = 2'b10;
assign pat1_rise1[1] = 2'b11;
assign pat1_fall1[1] = 2'b10;
assign pat1_rise0[0] = 2'b10;
assign pat1_fall0[0] = 2'b11;
assign pat1_rise1[0] = 2'b10;
assign pat1_fall1[0] = 2'b00;
end
endgenerate
// Each bit of each byte is compared to expected pattern.
// This was done to prevent (and "drastically decrease") the chance that
// invalid data clocked in when the DQ bus is tri-state (along with a
// combination of the correct data) will resemble the expected data
// pattern. A better fix for this is to change the training pattern and/or
// make the pattern longer.
generate
genvar pt_i;
if (nCK_PER_CLK == 4) begin: gen_pat_match_div4
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat0_rise2[pt_i%4])
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat0_fall2[pt_i%4])
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat0_rise3[pt_i%4])
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat0_fall3[pt_i%4])
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == idel_pat1_rise2[pt_i%4])
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == idel_pat1_fall2[pt_i%4])
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == idel_pat1_rise3[pt_i%4])
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == idel_pat1_fall3[pt_i%4])
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat0_rise2[pt_i%4])
pat0_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat0_fall2[pt_i%4])
pat0_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat0_rise3[pt_i%4])
pat0_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat0_fall3[pt_i%4])
pat0_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat1_rise2[pt_i%4])
pat1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat1_fall2[pt_i%4])
pat1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat1_rise3[pt_i%4])
pat1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat1_fall3[pt_i%4])
pat1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_match_rise2_and_r <= #TCQ &idel_pat0_match_rise2_r;
idel_pat0_match_fall2_and_r <= #TCQ &idel_pat0_match_fall2_r;
idel_pat0_match_rise3_and_r <= #TCQ &idel_pat0_match_rise3_r;
idel_pat0_match_fall3_and_r <= #TCQ &idel_pat0_match_fall3_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r &&
idel_pat0_match_rise2_and_r &&
idel_pat0_match_fall2_and_r &&
idel_pat0_match_rise3_and_r &&
idel_pat0_match_fall3_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_match_rise2_and_r <= #TCQ &idel_pat1_match_rise2_r;
idel_pat1_match_fall2_and_r <= #TCQ &idel_pat1_match_fall2_r;
idel_pat1_match_rise3_and_r <= #TCQ &idel_pat1_match_rise3_r;
idel_pat1_match_fall3_and_r <= #TCQ &idel_pat1_match_fall3_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r &&
idel_pat1_match_rise2_and_r &&
idel_pat1_match_fall2_and_r &&
idel_pat1_match_rise3_and_r &&
idel_pat1_match_fall3_and_r);
end
always @(*)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_match_rise2_and_r <= #TCQ &pat0_match_rise2_r;
pat0_match_fall2_and_r <= #TCQ &pat0_match_fall2_r;
pat0_match_rise3_and_r <= #TCQ &pat0_match_rise3_r;
pat0_match_fall3_and_r <= #TCQ &pat0_match_fall3_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r &&
pat0_match_rise2_and_r &&
pat0_match_fall2_and_r &&
pat0_match_rise3_and_r &&
pat0_match_fall3_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_match_rise2_and_r <= #TCQ &pat1_match_rise2_r;
pat1_match_fall2_and_r <= #TCQ &pat1_match_fall2_r;
pat1_match_rise3_and_r <= #TCQ &pat1_match_rise3_r;
pat1_match_fall3_and_r <= #TCQ &pat1_match_fall3_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r &&
pat1_match_rise2_and_r &&
pat1_match_fall2_and_r &&
pat1_match_rise3_and_r &&
pat1_match_fall3_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
// DQ IDELAY pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4])
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4])
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4])
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4])
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4])
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4])
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4])
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4])
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
// DQS DVW pattern detection
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4])
pat0_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4])
pat0_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4])
pat0_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4])
pat0_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat0_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
end
// Combine pattern match "subterms" for DQ-IDELAY stage
always @(posedge clk) begin
idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r;
idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r;
idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r;
idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r;
idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r &&
idel_pat0_match_fall0_and_r &&
idel_pat0_match_rise1_and_r &&
idel_pat0_match_fall1_and_r);
end
always @(posedge clk) begin
idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r;
idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r;
idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r;
idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r;
idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r &&
idel_pat1_match_fall0_and_r &&
idel_pat1_match_rise1_and_r &&
idel_pat1_match_fall1_and_r);
end
always @(posedge clk) begin
if (sr_valid_r2)
idel_pat_data_match <= #TCQ idel_pat0_data_match_r |
idel_pat1_data_match_r;
end
//assign idel_pat_data_match = idel_pat0_data_match_r |
// idel_pat1_data_match_r;
always @(posedge clk)
idel_pat_data_match_r <= #TCQ idel_pat_data_match;
// Combine pattern match "subterms" for DQS-PHASER_IN stage
always @(posedge clk) begin
pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r;
pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r;
pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r;
pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r;
pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r &&
pat0_match_fall0_and_r &&
pat0_match_rise1_and_r &&
pat0_match_fall1_and_r);
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r);
end
assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r;
end
endgenerate
always @(posedge clk) begin
rdlvl_stg1_start_r <= #TCQ rdlvl_stg1_start;
mpr_rdlvl_done_r1 <= #TCQ mpr_rdlvl_done_r;
mpr_rdlvl_done_r2 <= #TCQ mpr_rdlvl_done_r1;
mpr_rdlvl_start_r <= #TCQ mpr_rdlvl_start;
end
//***************************************************************************
// First stage calibration: Capture clock
//***************************************************************************
//*****************************************************************
// Keep track of how many samples have been written to shift registers
// Every time RD_SHIFT_LEN samples have been written, then we have a
// full read training pattern loaded into the sr_* registers. Then assert
// sr_valid_r to indicate that: (1) comparison between the sr_* and
// old_sr_* and prev_sr_* registers can take place, (2) transfer of
// the contents of sr_* to old_sr_* and prev_sr_* registers can also
// take place
//*****************************************************************
// verilint STARC-2.2.3.3 off
always @(posedge clk)
if (rst || (mpr_rdlvl_done_r && ~rdlvl_stg1_start)) begin
cnt_shift_r <= #TCQ 'b1;
sr_valid_r <= #TCQ 1'b0;
mpr_valid_r <= #TCQ 1'b0;
end else begin
if (mux_rd_valid_r && mpr_rdlvl_start && ~mpr_rdlvl_done_r) begin
if (cnt_shift_r == 'b0)
mpr_valid_r <= #TCQ 1'b1;
else begin
mpr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
mpr_valid_r <= #TCQ 1'b0;
if (mux_rd_valid_r && rdlvl_stg1_start) begin
if (cnt_shift_r == RD_SHIFT_LEN-1) begin
sr_valid_r <= #TCQ 1'b1;
cnt_shift_r <= #TCQ 'b0;
end else begin
sr_valid_r <= #TCQ 1'b0;
cnt_shift_r <= #TCQ cnt_shift_r + 1;
end
end else
// When the current mux_rd_* contents are not valid, then
// retain the current value of cnt_shift_r, and make sure
// that sr_valid_r = 0 to prevent any downstream loads or
// comparisons
sr_valid_r <= #TCQ 1'b0;
end
// verilint STARC-2.2.3.3 on
//*****************************************************************
// Logic to determine when either edge of the data eye encountered
// Pre- and post-IDELAY update data pattern is compared, if they
// differ, than an edge has been encountered. Currently no attempt
// made to determine if the data pattern itself is "correct", only
// whether it changes after incrementing the IDELAY (possible
// future enhancement)
//*****************************************************************
// One-way control for ensuring that state machine request to store
// current read data into OLD SR shift register only occurs on a
// valid clock cycle. The FSM provides a one-cycle request pulse.
// It is the responsibility of the FSM to wait the worst-case time
// before relying on any downstream results of this load.
always @(posedge clk)
if (rst)
store_sr_r <= #TCQ 1'b0;
else begin
if (store_sr_req_r)
store_sr_r <= #TCQ 1'b1;
else if ((sr_valid_r || mpr_valid_r) && store_sr_r)
store_sr_r <= #TCQ 1'b0;
end
// Transfer current data to old data, prior to incrementing delay
// Also store data from current sampling window - so that we can detect
// if the current delay tap yields data that is "jittery"
generate
if (nCK_PER_CLK == 4) begin: gen_old_sr_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
// Load last sample (i.e. from current sampling interval)
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
prev_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
prev_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
prev_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
prev_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
old_sr_rise2_r[z] <= #TCQ sr_rise2_r[z];
old_sr_fall2_r[z] <= #TCQ sr_fall2_r[z];
old_sr_rise3_r[z] <= #TCQ sr_rise3_r[z];
old_sr_fall3_r[z] <= #TCQ sr_fall3_r[z];
end
end
end
end else if (nCK_PER_CLK == 2) begin: gen_old_sr_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr
always @(posedge clk) begin
if (sr_valid_r || mpr_valid_r) begin
prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin
old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z];
old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z];
old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z];
old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z];
end
end
end
end
endgenerate
//*******************************************************
// Match determination occurs over 3 cycles - pipelined for better timing
//*******************************************************
// Match valid with # of cycles of pipelining in match determination
always @(posedge clk) begin
sr_valid_r1 <= #TCQ sr_valid_r;
sr_valid_r2 <= #TCQ sr_valid_r1;
mpr_valid_r1 <= #TCQ mpr_valid_r;
mpr_valid_r2 <= #TCQ mpr_valid_r1;
end
generate
if (nCK_PER_CLK == 4) begin: gen_sr_match_div4
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
// CYCLE1: Compare all bits in DQS grp, generate separate term for
// each bit over four bit times. For example, if there are 8-bits
// per DQS group, 32 terms are generated on cycle 1
// NOTE: Structure HDL such that X on data bus will result in a
// mismatch. This is required for memory models that can drive the
// bus with X's to model uncertainty regions (e.g. Denali)
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == old_sr_rise2_r[z]))
old_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise2_r[z] <= #TCQ old_sr_match_rise2_r[z];
else
old_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == old_sr_fall2_r[z]))
old_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall2_r[z] <= #TCQ old_sr_match_fall2_r[z];
else
old_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == old_sr_rise3_r[z]))
old_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise3_r[z] <= #TCQ old_sr_match_rise3_r[z];
else
old_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == old_sr_fall3_r[z]))
old_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall3_r[z] <= #TCQ old_sr_match_fall3_r[z];
else
old_sr_match_fall3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == prev_sr_rise2_r[z]))
prev_sr_match_rise2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise2_r[z] <= #TCQ prev_sr_match_rise2_r[z];
else
prev_sr_match_rise2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == prev_sr_fall2_r[z]))
prev_sr_match_fall2_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall2_r[z] <= #TCQ prev_sr_match_fall2_r[z];
else
prev_sr_match_fall2_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == prev_sr_rise3_r[z]))
prev_sr_match_rise3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise3_r[z] <= #TCQ prev_sr_match_rise3_r[z];
else
prev_sr_match_rise3_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == prev_sr_fall3_r[z]))
prev_sr_match_fall3_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall3_r[z] <= #TCQ prev_sr_match_fall3_r[z];
else
prev_sr_match_fall3_r[z] <= #TCQ 1'b0;
// CYCLE2: Combine all the comparisons for every 8 words (rise0,
// fall0,rise1, fall1) in the calibration sequence. Now we're down
// to DRAM_WIDTH terms
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z] &
old_sr_match_rise2_r[z] &
old_sr_match_fall2_r[z] &
old_sr_match_rise3_r[z] &
old_sr_match_fall3_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z] &
prev_sr_match_rise2_r[z] &
prev_sr_match_fall2_r[z] &
prev_sr_match_rise3_r[z] &
prev_sr_match_fall3_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end if (nCK_PER_CLK == 2) begin: gen_sr_match_div2
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match
always @(posedge clk) begin
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z]))
old_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z];
else
old_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z]))
old_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z];
else
old_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z]))
old_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z];
else
old_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z]))
old_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z];
else
old_sr_match_fall1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z]))
prev_sr_match_rise0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z];
else
prev_sr_match_rise0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z]))
prev_sr_match_fall0_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z];
else
prev_sr_match_fall0_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z]))
prev_sr_match_rise1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z];
else
prev_sr_match_rise1_r[z] <= #TCQ 1'b0;
if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z]))
prev_sr_match_fall1_r[z] <= #TCQ 1'b1;
else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r)
prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z];
else
prev_sr_match_fall1_r[z] <= #TCQ 1'b0;
old_sr_match_cyc2_r[z] <= #TCQ
old_sr_match_rise0_r[z] &
old_sr_match_fall0_r[z] &
old_sr_match_rise1_r[z] &
old_sr_match_fall1_r[z];
prev_sr_match_cyc2_r[z] <= #TCQ
prev_sr_match_rise0_r[z] &
prev_sr_match_fall0_r[z] &
prev_sr_match_rise1_r[z] &
prev_sr_match_fall1_r[z];
// CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen),
// and qualify with pipelined valid signal) - probably don't need
// a cycle just do do this....
if (sr_valid_r2 || mpr_valid_r2) begin
old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z];
prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z];
end else begin
old_sr_diff_r[z] <= #TCQ 'b0;
prev_sr_diff_r[z] <= #TCQ 'b0;
end
end
end
end
endgenerate
//***************************************************************************
// First stage calibration: DQS Capture
//***************************************************************************
//*******************************************************
// Counters for tracking # of samples compared
// For each comparision point (i.e. to determine if an edge has
// occurred after each IODELAY increment when read leveling),
// multiple samples are compared in order to average out the effects
// of jitter. If any one of these samples is different than the "old"
// sample corresponding to the previous IODELAY value, then an edge
// is declared to be detected.
//*******************************************************
// Two cascaded counters are used to keep track of # of samples compared,
// in order to make it easier to meeting timing on these paths. Once
// optimal sampling interval is determined, it may be possible to remove
// the second counter
always @(posedge clk)
samp_edge_cnt0_en_r <= #TCQ
(cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
// First counter counts # of samples compared
always @(posedge clk)
if (rst)
samp_edge_cnt0_r <= #TCQ 'b0;
else begin
if (!samp_edge_cnt0_en_r)
// Reset sample counter when not in any of the "sampling" states
samp_edge_cnt0_r <= #TCQ 'b0;
else if (sr_valid_r2 || mpr_valid_r2)
// Otherwise, count # of samples compared
samp_edge_cnt0_r <= #TCQ samp_edge_cnt0_r + 1;
end
// Counter #2 enable generation
always @(posedge clk)
if (rst)
samp_edge_cnt1_en_r <= #TCQ 1'b0;
else begin
// Assert pulse when correct number of samples compared
if ((samp_edge_cnt0_r == DETECT_EDGE_SAMPLE_CNT0) &&
(sr_valid_r2 || mpr_valid_r2))
samp_edge_cnt1_en_r <= #TCQ 1'b1;
else
samp_edge_cnt1_en_r <= #TCQ 1'b0;
end
// Counter #2
always @(posedge clk)
if (rst)
samp_edge_cnt1_r <= #TCQ 'b0;
else
if (!samp_edge_cnt0_en_r)
samp_edge_cnt1_r <= #TCQ 'b0;
else if (samp_edge_cnt1_en_r)
samp_edge_cnt1_r <= #TCQ samp_edge_cnt1_r + 1;
always @(posedge clk)
if (rst)
samp_cnt_done_r <= #TCQ 1'b0;
else begin
if (!samp_edge_cnt0_en_r)
samp_cnt_done_r <= #TCQ 'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") ||
(SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin
if (samp_edge_cnt0_r == SR_VALID_DELAY-1)
// For simulation only, stay in edge detection mode a minimum
// amount of time - just enough for two data compares to finish
samp_cnt_done_r <= #TCQ 1'b1;
end else begin
if (samp_edge_cnt1_r == DETECT_EDGE_SAMPLE_CNT1)
samp_cnt_done_r <= #TCQ 1'b1;
end
end
//*****************************************************************
// Logic to keep track of (on per-bit basis):
// 1. When a region of stability preceded by a known edge occurs
// 2. If for the current tap, the read data jitters
// 3. If an edge occured between the current and previous tap
// 4. When the current edge detection/sampling interval can end
// Essentially, these are a series of status bits - the stage 1
// calibration FSM monitors these to determine when an edge is
// found. Additional information is provided to help the FSM
// determine if a left or right edge has been found.
//****************************************************************
assign pb_detect_edge_setup
= (cal1_state_r == CAL1_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) ||
(cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT);
assign pb_detect_edge
= (cal1_state_r == CAL1_PAT_DETECT) ||
(cal1_state_r == CAL1_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE) ||
(cal1_state_r == CAL1_PB_DETECT_EDGE_DQ);
generate
for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_track_left_edge
always @(posedge clk) begin
if (pb_detect_edge_setup) begin
// Reset eye size, stable eye marker, and jitter marker before
// starting new edge detection iteration
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_found_edge_last_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_found_first_edge_r[z] <= #TCQ 1'b0;
end else if (pb_detect_edge) begin
// Save information on which DQ bits are already out of the
// data valid window - those DQ bits will later not have their
// IDELAY tap value incremented
pb_found_edge_last_r[z] <= #TCQ pb_found_edge_r[z];
if (!pb_detect_edge_done_r[z]) begin
if (samp_cnt_done_r) begin
// If we've reached end of sampling interval, no jitter on
// current tap has been found (although an edge could have
// been found between the current and previous taps), and
// the sampling interval is complete. Increment the stable
// eye counter if no edge found, and always clear the jitter
// flag in preparation for the next tap.
pb_last_tap_jitter_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
if (!pb_found_edge_r[z] && !pb_last_tap_jitter_r[z]) begin
// If the data was completely stable during this tap and
// no edge was found between this and the previous tap
// then increment the stable eye counter "as appropriate"
if (pb_cnt_eye_size_r[z] != MIN_EYE_SIZE-1)
pb_cnt_eye_size_r[z] <= #TCQ pb_cnt_eye_size_r[z] + 1;
else //if (pb_found_first_edge_r[z])
// We've reached minimum stable eye width
pb_found_stable_eye_r[z] <= #TCQ 1'b1;
end else begin
// Otherwise, an edge was found, either because of a
// difference between this and the previous tap's read
// data, and/or because the previous tap's data jittered
// (but not the current tap's data), then just set the
// edge found flag, and enable the stable eye counter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end
end else if (prev_sr_diff_r[z]) begin
// If we find that the current tap read data jitters, then
// set edge and jitter found flags, "enable" the eye size
// counter, and stop sampling interval for this bit
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_last_tap_jitter_r[z] <= #TCQ 1'b1;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
pb_detect_edge_done_r[z] <= #TCQ 1'b1;
end else if (old_sr_diff_r[z] || pb_last_tap_jitter_r[z]) begin
// If either an edge was found (i.e. difference between
// current tap and previous tap read data), or the previous
// tap exhibited jitter (which means by definition that the
// current tap cannot match the previous tap because the
// previous tap gave unstable data), then set the edge found
// flag, and "enable" eye size counter. But do not stop
// sampling interval - we still need to check if the current
// tap exhibits jitter
pb_cnt_eye_size_r[z] <= #TCQ 5'd0;
pb_found_stable_eye_r[z] <= #TCQ 1'b0;
pb_found_edge_r[z] <= #TCQ 1'b1;
pb_found_first_edge_r[z] <= #TCQ 1'b1;
end
end
end else begin
// Before every edge detection interval, reset "intra-tap" flags
pb_found_edge_r[z] <= #TCQ 1'b0;
pb_detect_edge_done_r[z] <= #TCQ 1'b0;
end
end
end
endgenerate
// Combine the above per-bit status flags into combined terms when
// performing deskew on the aggregate data window
always @(posedge clk) begin
detect_edge_done_r <= #TCQ &pb_detect_edge_done_r;
found_edge_r <= #TCQ |pb_found_edge_r;
found_edge_all_r <= #TCQ &pb_found_edge_r;
found_stable_eye_r <= #TCQ &pb_found_stable_eye_r;
end
// last IODELAY "stable eye" indicator is updated only after
// detect_edge_done_r is asserted - so that when we do find the "right edge"
// of the data valid window, found_edge_r = 1, AND found_stable_eye_r = 1
// when detect_edge_done_r = 1 (otherwise, if found_stable_eye_r updates
// immediately, then it never possible to have found_stable_eye_r = 1
// when we detect an edge - and we'll never know whether we've found
// a "right edge")
always @(posedge clk)
if (pb_detect_edge_setup)
found_stable_eye_last_r <= #TCQ 1'b0;
else if (detect_edge_done_r)
found_stable_eye_last_r <= #TCQ found_stable_eye_r;
//*****************************************************************
// Keep track of DQ IDELAYE2 taps used
//*****************************************************************
// Added additional register stage to improve timing
always @(posedge clk)
if (rst)
idelay_tap_cnt_slice_r <= 5'h0;
else
idelay_tap_cnt_slice_r <= idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
always @(posedge clk)
if (rst || (SIM_CAL_OPTION == "SKIP_CAL")) begin //|| new_cnt_cpt_r
for (s = 0; s < RANKS; s = s + 1) begin
for (t = 0; t < DQS_WIDTH; t = t + 1) begin
idelay_tap_cnt_r[s][t] <= #TCQ idelaye2_init_val;
end
end
end else if (SIM_CAL_OPTION == "FAST_CAL") begin
for (u = 0; u < RANKS; u = u + 1) begin
for (w = 0; w < DQS_WIDTH; w = w + 1) begin
if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] + 1;
else
idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] - 1;
end
end
end
end else if ((rnk_cnt_r == RANKS-1) && (RANKS == 2) &&
rdlvl_rank_done_r && (cal1_state_r == CAL1_IDLE)) begin
for (f = 0; f < DQS_WIDTH; f = f + 1) begin
idelay_tap_cnt_r[rnk_cnt_r][f] <= #TCQ idelay_tap_cnt_r[(rnk_cnt_r-1)][f];
end
end else if (cal1_dq_idel_ce) begin
if (cal1_dq_idel_inc)
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r + 5'h1;
else
idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r - 5'h1;
end else if (idelay_ld)
idelay_tap_cnt_r[0][wrcal_cnt] <= #TCQ 5'b00000;
always @(posedge clk)
if (rst || new_cnt_cpt_r)
idelay_tap_limit_r <= #TCQ 1'b0;
else if (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_r] == 'd31)
idelay_tap_limit_r <= #TCQ 1'b1;
//*****************************************************************
// keep track of edge tap counts found, and current capture clock
// tap count
//*****************************************************************
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_cnt_cpt_r <= #TCQ 'b0;
else if (cal1_dlyce_cpt_r) begin
if (cal1_dlyinc_cpt_r)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r + 1;
else if (tap_cnt_cpt_r != 'd0)
tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r - 1;
end
always @(posedge clk)
if (rst || new_cnt_cpt_r ||
(cal1_state_r1 == CAL1_DQ_IDEL_TAP_INC) ||
(mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2))
tap_limit_cpt_r <= #TCQ 1'b0;
else if (tap_cnt_cpt_r == 6'd63)
tap_limit_cpt_r <= #TCQ 1'b1;
always @(posedge clk)
cal1_cnt_cpt_timing_r <= #TCQ cal1_cnt_cpt_r;
assign cal1_cnt_cpt_timing = {2'b00, cal1_cnt_cpt_r};
// Storing DQS tap values at the end of each DQS read leveling
always @(posedge clk) begin
if (rst) begin
for (a = 0; a < RANKS; a = a + 1) begin: rst_rdlvl_dqs_tap_count_loop
for (b = 0; b < DQS_WIDTH; b = b + 1)
rdlvl_dqs_tap_cnt_r[a][b] <= #TCQ 'b0;
end
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_NEXT_DQS)) begin
for (p = 0; p < RANKS; p = p +1) begin: rdlvl_dqs_tap_rank_cnt
for(q = 0; q < DQS_WIDTH; q = q +1) begin: rdlvl_dqs_tap_cnt
rdlvl_dqs_tap_cnt_r[p][q] <= #TCQ tap_cnt_cpt_r;
end
end
end else if (SIM_CAL_OPTION == "SKIP_CAL") begin
for (j = 0; j < RANKS; j = j +1) begin: rdlvl_dqs_tap_rnk_cnt
for(i = 0; i < DQS_WIDTH; i = i +1) begin: rdlvl_dqs_cnt
rdlvl_dqs_tap_cnt_r[j][i] <= #TCQ 6'd31;
end
end
end else if (cal1_state_r1 == CAL1_NEXT_DQS) begin
rdlvl_dqs_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing_r] <= #TCQ tap_cnt_cpt_r;
end
end
// Counter to track maximum DQ IODELAY tap usage during the per-bit
// deskew portion of stage 1 calibration
always @(posedge clk)
if (rst) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else
if (new_cnt_cpt_r) begin
idel_tap_cnt_dq_pb_r <= #TCQ 'b0;
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end else if (|cal1_dlyce_dq_r) begin
if (cal1_dlyinc_dq_r)
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r + 1;
else
idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r - 1;
if (idel_tap_cnt_dq_pb_r == 31)
idel_tap_limit_dq_pb_r <= #TCQ 1'b1;
else
idel_tap_limit_dq_pb_r <= #TCQ 1'b0;
end
//*****************************************************************
always @(posedge clk)
cal1_state_r1 <= #TCQ cal1_state_r;
always @(posedge clk)
if (rst) begin
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
cnt_idel_dec_cpt_r <= #TCQ 6'bxxxxxx;
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
right_edge_taps_r <= #TCQ 6'bxxxxxx;
first_edge_taps_r <= #TCQ 6'bxxxxxx;
new_cnt_cpt_r <= #TCQ 1'b0;
rdlvl_stg1_done <= #TCQ 1'b0;
rdlvl_stg1_err <= #TCQ 1'b0;
second_edge_taps_r <= #TCQ 6'bxxxxxx;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
rnk_cnt_r <= #TCQ 2'b00;
rdlvl_rank_done_r <= #TCQ 1'b0;
idel_dec_cnt <= #TCQ 'd0;
rdlvl_last_byte_done <= #TCQ 1'b0;
idel_pat_detect_valid_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
if (OCAL_EN == "ON")
mpr_rdlvl_done_r <= #TCQ 1'b0;
else
mpr_rdlvl_done_r <= #TCQ 1'b1;
mpr_dec_cpt_r <= #TCQ 1'b0;
end else begin
// default (inactive) states for all "pulse" outputs
// verilint STARC-2.2.3.3 off
cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
new_cnt_cpt_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b0;
store_sr_req_r <= #TCQ 1'b0;
case (cal1_state_r)
CAL1_IDLE: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
if (mpr_rdlvl_start && ~mpr_rdlvl_start_r) begin
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
end else
if (rdlvl_stg1_start && ~rdlvl_stg1_start_r) begin
if (SIM_CAL_OPTION == "SKIP_CAL")
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
else if (SIM_CAL_OPTION == "FAST_CAL")
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
else begin
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
end
CAL1_MPR_NEW_DQS_WAIT: begin
cal1_prech_req_r <= #TCQ 1'b0;
if (!cal1_wait_r && mpr_valid_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
// Wait for the new DQS group to change
// also gives time for the read data IN_FIFO to
// output the updated data for the new DQS group
CAL1_NEW_DQS_WAIT: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
if (|pi_counter_read_val) begin //VK_REVIEW
mpr_dec_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
cnt_idel_dec_cpt_r <= #TCQ pi_counter_read_val;
end else if (!cal1_wait_r) begin
//if (!cal1_wait_r) begin
// Store "previous tap" read data. Technically there is no
// "previous" read data, since we are starting a new DQS
// group, so we'll never find an edge at tap 0 unless the
// data is fluctuating/jittering
store_sr_req_r <= #TCQ 1'b1;
// If per-bit deskew is disabled, then skip the first
// portion of stage 1 calibration
if (PER_BIT_DESKEW == "OFF")
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else if (PER_BIT_DESKEW == "ON")
cal1_state_r <= #TCQ CAL1_PB_STORE_FIRST_WAIT;
end
end
//*****************************************************************
// Per-bit deskew states
//*****************************************************************
// Wait state following storage of initial read data
CAL1_PB_STORE_FIRST_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
// Look for an edge on all DQ bits in current DQS group
CAL1_PB_DETECT_EDGE:
if (detect_edge_done_r) begin
if (found_stable_eye_r) begin
// If we've found the left edge for all bits (or more precisely,
// we've found the left edge, and then part of the stable
// window thereafter), then proceed to positioning the CPT clock
// right before the left margin
cnt_idel_dec_cpt_r <= #TCQ MIN_EYE_SIZE + 1;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT;
end else begin
// If we've reached the end of the sampling time, and haven't
// yet found the left margin of all the DQ bits, then:
if (!tap_limit_cpt_r) begin
// If we still have taps left to use, then store current value
// of read data, increment the capture clock, and continue to
// look for (left) edges
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT;
end else begin
// If we ran out of taps moving the capture clock, and we
// haven't finished edge detection, then reset the capture
// clock taps to 0 (gradually, one tap at a time...
// then exit the per-bit portion of the algorithm -
// i.e. proceed to adjust the capture clock and DQ IODELAYs as
cnt_idel_dec_cpt_r <= #TCQ 6'd63;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
end
end
// Increment delay for DQS
CAL1_PB_INC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_CPT_WAIT;
end
// Wait for IODELAY for both capture and internal nodes within
// ISERDES to settle, before checking again for an edge
CAL1_PB_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE;
end
// We've found the left edges of the windows for all DQ bits
// (actually, we found it MIN_EYE_SIZE taps ago) Decrement capture
// clock IDELAY to position just outside left edge of data window
CAL1_PB_DEC_CPT_LEFT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
CAL1_PB_DEC_CPT_LEFT_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// If there is skew between individual DQ bits, then after we've
// positioned the CPT clock, we will be "in the window" for some
// DQ bits ("early" DQ bits), and "out of the window" for others
// ("late" DQ bits). Increase DQ taps until we are out of the
// window for all DQ bits
CAL1_PB_DETECT_EDGE_DQ:
if (detect_edge_done_r)
if (found_edge_all_r) begin
// We're out of the window for all DQ bits in this DQS group
// We're done with per-bit deskew for this group - now decr
// capture clock IODELAY tap count back to 0, and proceed
// with the rest of stage 1 calibration for this DQS group
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end else
if (!idel_tap_limit_dq_pb_r)
// If we still have DQ taps available for deskew, keep
// incrementing IODELAY tap count for the appropriate DQ bits
cal1_state_r <= #TCQ CAL1_PB_INC_DQ;
else begin
// Otherwise, stop immediately (we've done the best we can)
// and proceed with rest of stage 1 calibration
cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT;
end
CAL1_PB_INC_DQ: begin
// Increment only those DQ for which an edge hasn't been found yet
cal1_dlyce_dq_r <= #TCQ ~pb_found_edge_last_r;
cal1_dlyinc_dq_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_PB_INC_DQ_WAIT;
end
CAL1_PB_INC_DQ_WAIT:
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ;
// Decrement capture clock taps back to initial value
CAL1_PB_DEC_CPT:
if (cnt_idel_dec_cpt_r == 6'b000000)
cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_WAIT;
else begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
end
// Wait for capture clock to settle, then proceed to rest of
// state 1 calibration for this DQS group
CAL1_PB_DEC_CPT_WAIT:
if (!cal1_wait_r) begin
store_sr_req_r <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end
// When first starting calibration for a DQS group, save the
// current value of the read data shift register, and use this
// as a reference. Note that for the first iteration of the
// edge detection loop, we will in effect be checking for an edge
// at IODELAY taps = 0 - normally, we are comparing the read data
// for IODELAY taps = N, with the read data for IODELAY taps = N-1
// An edge can only be found at IODELAY taps = 0 if the read data
// is changing during this time (possible due to jitter)
CAL1_STORE_FIRST_WAIT: begin
mpr_dec_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
CAL1_VALID_WAIT: begin
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
end
CAL1_MPR_PAT_DETECT: begin
// MPR read leveling for centering DQS in valid window before
// OCLKDELAYED calibration begins in order to eliminate read issues
if (idel_pat_detect_valid_r == 1'b0) begin
cal1_state_r <= #TCQ CAL1_VALID_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b1;
end else if (idel_pat_detect_valid_r && idel_mpr_pat_detect_r) begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 'd0;
end else if (!idelay_tap_limit_r)
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
else
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
CAL1_PAT_DETECT: begin
// All DQ bits associated with a DQS are pushed to the right one IDELAY
// tap at a time until first rising DQS is in the tri-state region
// before first rising edge window.
// The detect_edge_done_r condition included to support averaging
// during IDELAY tap increments
if (detect_edge_done_r) begin
if (idel_pat_data_match) begin
case (idelay_adj)
2'b01: begin
cal1_state_r <= CAL1_DQ_IDEL_TAP_INC;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b1;
end
2'b10: begin //DEC by 1
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC ;
idel_dec_cnt <= #TCQ 1'b1;
idel_adj_inc <= #TCQ 1'b0;
end
default: begin
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
idel_dec_cnt <= #TCQ 1'b0;
idel_adj_inc <= #TCQ 1'b0;
end
endcase
end else if (!idelay_tap_limit_r) begin
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC;
end else begin
cal1_state_r <= #TCQ CAL1_RDLVL_ERR;
end
end
end
// Increment IDELAY tap by 1 for DQ bits in the byte being calibrated
// until left edge of valid window detected
CAL1_DQ_IDEL_TAP_INC: begin
cal1_dq_idel_ce <= #TCQ 1'b1;
cal1_dq_idel_inc <= #TCQ 1'b1;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC_WAIT;
idel_pat_detect_valid_r <= #TCQ 1'b0;
end
CAL1_DQ_IDEL_TAP_INC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
idel_adj_inc <= #TCQ 1'b0;
if (idel_adj_inc)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
else if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT;
else
cal1_state_r <= #TCQ CAL1_PAT_DETECT;
end
end
// Decrement by 2 IDELAY taps once idel_pat_data_match detected
CAL1_DQ_IDEL_TAP_DEC: begin
cal1_dq_idel_inc <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC_WAIT;
if (idel_dec_cnt >= 'd0)
cal1_dq_idel_ce <= #TCQ 1'b1;
else
cal1_dq_idel_ce <= #TCQ 1'b0;
if (idel_dec_cnt > 'd0)
idel_dec_cnt <= #TCQ idel_dec_cnt - 1;
else
idel_dec_cnt <= #TCQ idel_dec_cnt;
end
CAL1_DQ_IDEL_TAP_DEC_WAIT: begin
cal1_dq_idel_ce <= #TCQ 1'b0;
cal1_dq_idel_inc <= #TCQ 1'b0;
if (!cal1_wait_r) begin
if ((idel_dec_cnt > 'd0) || (pi_rdval_cnt > 'd0))
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
else if (mpr_dec_cpt_r)
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
else
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
end
// Check for presence of data eye edge. During this state, we
// sample the read data multiple times, and look for changes
// in the read data, specifically:
// 1. A change in the read data compared with the value of
// read data from the previous delay tap. This indicates
// that the most recent tap delay increment has moved us
// into either a new window, or moved/kept us in the
// transition/jitter region between windows. Note that this
// condition only needs to be checked for once, and for
// logistical purposes, we check this soon after entering
// this state (see comment in CAL1_DETECT_EDGE below for
// why this is done)
// 2. A change in the read data while we are in this state
// (i.e. in the absence of a tap delay increment). This
// indicates that we're close enough to a window edge that
// jitter will cause the read data to change even in the
// absence of a tap delay change
CAL1_DETECT_EDGE: begin
// Essentially wait for the first comparision to finish, then
// store current data into "old" data register. This store
// happens now, rather than later (e.g. when we've have already
// left this state) in order to avoid the situation the data that
// is stored as "old" data has not been used in an "active
// comparison" - i.e. data is stored after the last comparison
// of this state. In this case, we can miss an edge if the
// following sequence occurs:
// 1. Comparison completes in this state - no edge found
// 2. "Momentary jitter" occurs which "pushes" the data out the
// equivalent of one delay tap
// 3. We store this jittered data as the "old" data
// 4. "Jitter" no longer present
// 5. We increment the delay tap by one
// 6. Now we compare the current with the "old" data - they're
// the same, and no edge is detected
// NOTE: Given the large # of comparisons done in this state, it's
// highly unlikely the above sequence will occur in actual H/W
// Wait for the first load of read data into the comparison
// shift register to finish, then load the current read data
// into the "old" data register. This allows us to do one
// initial comparision between the current read data, and
// stored data corresponding to the previous delay tap
idel_pat_detect_valid_r <= #TCQ 1'b0;
if (!store_sr_req_pulsed_r) begin
// Pulse store_sr_req_r only once in this state
store_sr_req_r <= #TCQ 1'b1;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end else begin
store_sr_req_r <= #TCQ 1'b0;
store_sr_req_pulsed_r <= #TCQ 1'b1;
end
// Continue to sample read data and look for edges until the
// appropriate time interval (shorter for simulation-only,
// much, much longer for actual h/w) has elapsed
if (detect_edge_done_r) begin
if (tap_limit_cpt_r)
// Only one edge detected and ran out of taps since only one
// bit time worth of taps available for window detection. This
// can happen if at tap 0 DQS is in previous window which results
// in only left edge being detected. Or at tap 0 DQS is in the
// current window resulting in only right edge being detected.
// Depending on the frequency this case can also happen if at
// tap 0 DQS is in the left noise region resulting in only left
// edge being detected.
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
else if (found_edge_r) begin
// Sticky bit - asserted after we encounter an edge, although
// the current edge may not be considered the "first edge" this
// just means we found at least one edge
found_first_edge_r <= #TCQ 1'b1;
// Only the right edge of the data valid window is found
// Record the inner right edge tap value
if (!found_first_edge_r && found_stable_eye_last_r) begin
if (tap_cnt_cpt_r == 'd0)
right_edge_taps_r <= #TCQ 'd0;
else
right_edge_taps_r <= #TCQ tap_cnt_cpt_r;
end
// Both edges of data valid window found:
// If we've found a second edge after a region of stability
// then we must have just passed the second ("right" edge of
// the window. Record this second_edge_taps = current tap-1,
// because we're one past the actual second edge tap, where
// the edge taps represent the extremes of the data valid
// window (i.e. smallest & largest taps where data still valid
if (found_first_edge_r && found_stable_eye_last_r) begin
found_second_edge_r <= #TCQ 1'b1;
second_edge_taps_r <= #TCQ tap_cnt_cpt_r - 1;
cal1_state_r <= #TCQ CAL1_CALC_IDEL;
end else begin
// Otherwise, an edge was found (just not the "second" edge)
// Assuming DQS is in the correct window at tap 0 of Phaser IN
// fine tap. The first edge found is the right edge of the valid
// window and is the beginning of the jitter region hence done!
first_edge_taps_r <= #TCQ tap_cnt_cpt_r;
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end else
// Otherwise, if we haven't found an edge....
// If we still have taps left to use, then keep incrementing
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT;
end
end
// Increment Phaser_IN delay for DQS
CAL1_IDEL_INC_CPT: begin
cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT_WAIT;
if (~tap_limit_cpt_r) begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b1;
end else begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
end
end
// Wait for Phaser_In to settle, before checking again for an edge
CAL1_IDEL_INC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_DETECT_EDGE;
end
// Calculate final value of Phaser_IN taps. At this point, one or both
// edges of data eye have been found, and/or all taps have been
// exhausted looking for the edges
// NOTE: We're calculating the amount to decrement by, not the
// absolute setting for DQS.
CAL1_CALC_IDEL: begin
// CASE1: If 2 edges found.
if (found_second_edge_r)
cnt_idel_dec_cpt_r
<= #TCQ ((second_edge_taps_r -
first_edge_taps_r)>>1) + 1;
else if (right_edge_taps_r > 6'd0)
// Only right edge detected
// right_edge_taps_r is the inner right edge tap value
// hence used for calculation
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r - (right_edge_taps_r>>1));
else if (found_first_edge_r)
// Only left edge detected
cnt_idel_dec_cpt_r
<= #TCQ ((tap_cnt_cpt_r - first_edge_taps_r)>>1);
else
cnt_idel_dec_cpt_r
<= #TCQ (tap_cnt_cpt_r>>1);
// Now use the value we just calculated to decrement CPT taps
// to the desired calibration point
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// decrement capture clock for final adjustment - center
// capture clock in middle of data eye. This adjustment will occur
// only when both the edges are found usign CPT taps. Must do this
// incrementally to avoid clock glitching (since CPT drives clock
// divider within each ISERDES)
CAL1_IDEL_DEC_CPT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b1;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// once adjustment is complete, we're done with calibration for
// this DQS, repeat for next DQS
cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1;
if (cnt_idel_dec_cpt_r == 6'b000001) begin
if (mpr_dec_cpt_r) begin
if (|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) begin
idel_dec_cnt <= #TCQ idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing];
cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC;
end else
cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT;
end else
cal1_state_r <= #TCQ CAL1_NEXT_DQS;
end else
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT_WAIT;
end
CAL1_IDEL_DEC_CPT_WAIT: begin
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
if (!cal1_wait_r)
cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT;
end
// Determine whether we're done, or have more DQS's to calibrate
// Also request precharge after every byte, as appropriate
CAL1_NEXT_DQS: begin
//if (mpr_rdlvl_done_r || (DRAM_TYPE == "DDR2"))
cal1_prech_req_r <= #TCQ 1'b1;
//else
// cal1_prech_req_r <= #TCQ 1'b0;
cal1_dlyce_cpt_r <= #TCQ 1'b0;
cal1_dlyinc_cpt_r <= #TCQ 1'b0;
// Prepare for another iteration with next DQS group
found_first_edge_r <= #TCQ 1'b0;
found_second_edge_r <= #TCQ 1'b0;
first_edge_taps_r <= #TCQ 'd0;
second_edge_taps_r <= #TCQ 'd0;
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(cal1_cnt_cpt_r >= DQS_WIDTH-1)) begin
if (mpr_rdlvl_done_r) begin
rdlvl_last_byte_done <= #TCQ 1'b1;
mpr_last_byte_done <= #TCQ 1'b0;
end else begin
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b1;
end
end
// Wait until precharge that occurs in between calibration of
// DQS groups is finished
if (prech_done) begin // || (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))) begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
//rdlvl_rank_done_r <= #TCQ 1'b1;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_DONE; //CAL1_REGL_LOAD;
end else if (cal1_cnt_cpt_r >= DQS_WIDTH-1) begin
if (~mpr_rdlvl_done_r) begin
mpr_rank_done_r <= #TCQ 1'b1;
// if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_DONE;
cal1_cnt_cpt_r <= #TCQ 'b0;
// end else begin
// // Process DQS groups in next rank
// rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
// new_cnt_cpt_r <= #TCQ 1'b1;
// cal1_cnt_cpt_r <= #TCQ 'b0;
// cal1_state_r <= #TCQ CAL1_IDLE;
// end
end else begin
// All DQS groups in a rank done
rdlvl_rank_done_r <= #TCQ 1'b1;
if (rnk_cnt_r == RANKS-1) begin
// All DQS groups in all ranks done
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end else begin
// Process DQS groups in next rank
rnk_cnt_r <= #TCQ rnk_cnt_r + 1;
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ 'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end
end
end else begin
// Process next DQS group
new_cnt_cpt_r <= #TCQ 1'b1;
cal1_cnt_cpt_r <= #TCQ cal1_cnt_cpt_r + 1;
cal1_state_r <= #TCQ CAL1_NEW_DQS_PREWAIT;
end
end
end
CAL1_NEW_DQS_PREWAIT: begin
if (!cal1_wait_r) begin
if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))
cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT;
else
cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT;
end
end
// Load rank registers in Phaser_IN
CAL1_REGL_LOAD: begin
rdlvl_rank_done_r <= #TCQ 1'b0;
mpr_rank_done_r <= #TCQ 1'b0;
cal1_prech_req_r <= #TCQ 1'b0;
cal1_cnt_cpt_r <= #TCQ 'b0;
rnk_cnt_r <= #TCQ 2'b00;
if ((regl_rank_cnt == RANKS-1) &&
((regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1))) begin
cal1_state_r <= #TCQ CAL1_DONE;
rdlvl_last_byte_done <= #TCQ 1'b0;
mpr_last_byte_done <= #TCQ 1'b0;
end else
cal1_state_r <= #TCQ CAL1_REGL_LOAD;
end
CAL1_RDLVL_ERR: begin
rdlvl_stg1_err <= #TCQ 1'b1;
end
// Done with this stage of calibration
// if used, allow DEBUG_PORT to control taps
CAL1_DONE: begin
mpr_rdlvl_done_r <= #TCQ 1'b1;
cal1_prech_req_r <= #TCQ 1'b0;
if (~mpr_rdlvl_done_r && (OCAL_EN=="ON") && (DRAM_TYPE == "DDR3")) begin
rdlvl_stg1_done <= #TCQ 1'b0;
cal1_state_r <= #TCQ CAL1_IDLE;
end else
rdlvl_stg1_done <= #TCQ 1'b1;
end
endcase
end
// verilint STARC-2.2.3.3 on
endmodule
|
module mig_7series_v2_3_ddr_phy_ocd_data #
(parameter TCQ = 100,
parameter nCK_PER_CLK = 4,
parameter DQS_CNT_WIDTH = 3,
parameter DQ_WIDTH = 64)
(/*AUTOARG*/
// Outputs
match,
// Inputs
clk, rst, complex_oclkdelay_calib_start, phy_rddata, prbs_o,
oclkdelay_calib_cnt, prbs_ignore_first_byte, prbs_ignore_last_bytes,
phy_rddata_en_1
);
localparam [7:0] OCAL_DQ_MASK = 8'b0000_0000;
input clk;
input rst;
input complex_oclkdelay_calib_start;
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_rddata;
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] prbs_o;
input [DQS_CNT_WIDTH:0] oclkdelay_calib_cnt;
reg [DQ_WIDTH-1:0] word, word_shifted;
reg [63:0] data_bytes_ns, data_bytes_r, data_bytes_r1, data_bytes_r2, prbs_bytes_ns, prbs_bytes_r;
always @(posedge clk) data_bytes_r <= #TCQ data_bytes_ns;
always @(posedge clk) data_bytes_r1 <= #TCQ data_bytes_r;
always @(posedge clk) data_bytes_r2 <= #TCQ data_bytes_r1;
always @(posedge clk) prbs_bytes_r <= #TCQ prbs_bytes_ns;
input prbs_ignore_first_byte, prbs_ignore_last_bytes;
reg prbs_ignore_first_byte_r, prbs_ignore_last_bytes_r;
always @(posedge clk) prbs_ignore_first_byte_r <= #TCQ prbs_ignore_first_byte;
always @(posedge clk) prbs_ignore_last_bytes_r <= #TCQ prbs_ignore_last_bytes;
input phy_rddata_en_1;
reg [7:0] last_byte_r;
wire [63:0] data_bytes = complex_oclkdelay_calib_start ? data_bytes_r2 : data_bytes_r;
wire [7:0] last_byte_ns;
generate if (nCK_PER_CLK == 4) begin
assign last_byte_ns = phy_rddata_en_1 ? data_bytes[63:56] : last_byte_r;
end else begin
assign last_byte_ns = phy_rddata_en_1 ? data_bytes[31:24] : last_byte_r;
end endgenerate
always @(posedge clk) last_byte_r <= #TCQ last_byte_ns;
reg second_half_ns, second_half_r;
always @(posedge clk) second_half_r <= #TCQ second_half_ns;
always @(*) begin
second_half_ns = second_half_r;
if (rst) second_half_ns = 1'b0;
else second_half_ns = phy_rddata_en_1 ^ second_half_r;
end
reg [7:0] comp0, comp180, prbs0, prbs180;
integer ii;
always @(*) begin
comp0 = 8'hff;
comp180 = 8'hff;
prbs0 = 8'hff;
prbs180 = 8'hff;
data_bytes_ns = 64'b0;
prbs_bytes_ns = 64'b0;
for (ii=0; ii<2*nCK_PER_CLK; ii=ii+1)
begin
word = phy_rddata[ii*DQ_WIDTH+:DQ_WIDTH];
word_shifted = word >> oclkdelay_calib_cnt*8;
data_bytes_ns[ii*8+:8] = word_shifted[7:0];
word = prbs_o[ii*DQ_WIDTH+:DQ_WIDTH];
word_shifted = word >> oclkdelay_calib_cnt*8;
prbs_bytes_ns[ii*8+:8] = word_shifted[7:0];
comp0[ii] = data_bytes[ii*8+:8] == (ii%2 ? 8'hff : 8'h00);
comp180[ii] = data_bytes[ii*8+:8] == (ii%2 ? 8'h00 : 8'hff);
prbs0[ii] = data_bytes[ii*8+:8] == prbs_bytes_r[ii*8+:8];
end // for (ii=0; ii<2*nCK_PER_CLK; ii=ii+1)
prbs180[0] = last_byte_r == prbs_bytes_r[7:0];
for (ii=1; ii<2*nCK_PER_CLK; ii=ii+1)
prbs180[ii] = data_bytes[(ii-1)*8+:8] == prbs_bytes_r[ii*8+:8];
if (nCK_PER_CLK == 4) begin
if (prbs_ignore_last_bytes_r) begin
prbs0[7:6] = 2'b11;
prbs180[7] = 1'b1;
end
if (prbs_ignore_first_byte_r) prbs180[0] = 1'b1;
end else begin
if (second_half_r) begin
if (prbs_ignore_last_bytes_r) begin
prbs0[3:2] = 2'b11;
prbs180[3] = 1'b1;
end
end else if (prbs_ignore_first_byte_r) prbs180[0] = 1'b1;
end // else: !if(nCK_PER_CLK == 4)
end // always @ (*)
wire [7:0] comp0_masked = comp0 | OCAL_DQ_MASK;
wire [7:0] comp180_masked = comp180 | OCAL_DQ_MASK;
wire [7:0] prbs0_masked = prbs0 | OCAL_DQ_MASK;
wire [7:0] prbs180_masked = prbs180 | OCAL_DQ_MASK;
output [1:0] match;
assign match = complex_oclkdelay_calib_start ? {&prbs180_masked, &prbs0_masked} : {&comp180_masked , &comp0_masked};
endmodule
|
module
generate
if(CKE_ODT_AUX == "TRUE") begin
assign aux_out_map = ((DRAM_TYPE == "DDR2") && (RANKS == 1)) ?
{mux_aux_out[1],mux_aux_out[1],mux_aux_out[1],mux_aux_out[0]} :
mux_aux_out;
end else begin
assign aux_out_map = 4'b0000 ;
end
endgenerate
assign init_calib_complete = phy_init_data_sel;
assign phy_mc_ctl_full = phy_ctl_full;
assign phy_mc_cmd_full = phy_cmd_full;
assign phy_mc_data_full = phy_pre_data_a_full;
//***************************************************************************
// Generate parity for DDR3 RDIMM.
//***************************************************************************
generate
if ((DRAM_TYPE == "DDR3") && (REG_CTRL == "ON")) begin: gen_ddr3_parity
if (nCK_PER_CLK == 4) begin
always @(posedge clk) begin
parity[0] <= #TCQ (^{mux_address[(ROW_WIDTH*4)-1:ROW_WIDTH*3],
mux_bank[(BANK_WIDTH*4)-1:BANK_WIDTH*3],
mux_cas_n[3], mux_ras_n[3], mux_we_n[3]});
end
always @(*) begin
parity[1] = (^{mux_address[ROW_WIDTH-1:0], mux_bank[BANK_WIDTH-1:0],
mux_cas_n[0],mux_ras_n[0], mux_we_n[0]});
parity[2] = (^{mux_address[(ROW_WIDTH*2)-1:ROW_WIDTH],
mux_bank[(BANK_WIDTH*2)-1:BANK_WIDTH],
mux_cas_n[1], mux_ras_n[1], mux_we_n[1]});
parity[3] = (^{mux_address[(ROW_WIDTH*3)-1:ROW_WIDTH*2],
mux_bank[(BANK_WIDTH*3)-1:BANK_WIDTH*2],
mux_cas_n[2],mux_ras_n[2], mux_we_n[2]});
end
end else begin
always @(posedge clk) begin
parity[0] <= #TCQ(^{mux_address[(ROW_WIDTH*2)-1:ROW_WIDTH],
mux_bank[(BANK_WIDTH*2)-1:BANK_WIDTH],
mux_cas_n[1], mux_ras_n[1], mux_we_n[1]});
end
always @(*) begin
parity[1] = (^{mux_address[ROW_WIDTH-1:0],
mux_bank[BANK_WIDTH-1:0],
mux_cas_n[0], mux_ras_n[0], mux_we_n[0]});
end
end
end else begin: gen_ddr3_noparity
if (nCK_PER_CLK == 4) begin
always @(posedge clk) begin
parity[0] <= #TCQ 1'b0;
parity[1] <= #TCQ 1'b0;
parity[2] <= #TCQ 1'b0;
parity[3] <= #TCQ 1'b0;
end
end else begin
always @(posedge clk) begin
parity[0] <= #TCQ 1'b0;
parity[1] <= #TCQ 1'b0;
end
end
end
endgenerate
//***************************************************************************
// Code for optional register stage in read path to MC for timing
//***************************************************************************
generate
if(RD_PATH_REG == 1)begin:RD_REG_TIMING
always @(posedge clk)begin
rddata_valid_reg <= #TCQ phy_rddata_valid_w;
rd_data_reg <= #TCQ rd_data_map;
end // always @ (posedge clk)
end else begin : RD_REG_NO_TIMING // block: RD_REG_TIMING
always @(phy_rddata_valid_w or rd_data_map)begin
rddata_valid_reg = phy_rddata_valid_w;
rd_data_reg = rd_data_map;
end
end
endgenerate
assign phy_rddata_valid = rddata_valid_reg;
assign phy_rd_data = rd_data_reg;
//***************************************************************************
// Hard PHY and accompanying bit mapping logic
//***************************************************************************
mig_7series_v2_3_ddr_mc_phy_wrapper #
(
.TCQ (TCQ),
.tCK (tCK),
.BANK_TYPE (BANK_TYPE),
.DATA_IO_PRIM_TYPE (DATA_IO_PRIM_TYPE),
.DATA_IO_IDLE_PWRDWN(DATA_IO_IDLE_PWRDWN),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.nCK_PER_CLK (nCK_PER_CLK),
.nCS_PER_RANK (nCS_PER_RANK),
.BANK_WIDTH (BANK_WIDTH),
.CKE_WIDTH (CKE_WIDTH),
.CS_WIDTH (CS_WIDTH),
.CK_WIDTH (CK_WIDTH),
.LP_DDR_CK_WIDTH (LP_DDR_CK_WIDTH),
.DDR2_DQSN_ENABLE (DDR2_DQSN_ENABLE),
.CWL (CWL),
.DM_WIDTH (DM_WIDTH),
.DQ_WIDTH (DQ_WIDTH),
.DQS_CNT_WIDTH (DQS_CNT_WIDTH),
.DQS_WIDTH (DQS_WIDTH),
.DRAM_TYPE (DRAM_TYPE),
.RANKS (RANKS),
.ODT_WIDTH (ODT_WIDTH),
.REG_CTRL (REG_CTRL),
.ROW_WIDTH (ROW_WIDTH),
.USE_CS_PORT (USE_CS_PORT),
.USE_DM_PORT (USE_DM_PORT),
.USE_ODT_PORT (USE_ODT_PORT),
.IBUF_LPWR_MODE (IBUF_LPWR_MODE),
.PHYCTL_CMD_FIFO (PHYCTL_CMD_FIFO),
.DATA_CTL_B0 (DATA_CTL_B0),
.DATA_CTL_B1 (DATA_CTL_B1),
.DATA_CTL_B2 (DATA_CTL_B2),
.DATA_CTL_B3 (DATA_CTL_B3),
.DATA_CTL_B4 (DATA_CTL_B4),
.BYTE_LANES_B0 (BYTE_LANES_B0),
.BYTE_LANES_B1 (BYTE_LANES_B1),
.BYTE_LANES_B2 (BYTE_LANES_B2),
.BYTE_LANES_B3 (BYTE_LANES_B3),
.BYTE_LANES_B4 (BYTE_LANES_B4),
.PHY_0_BITLANES (PHY_0_BITLANES),
.PHY_1_BITLANES (PHY_1_BITLANES),
.PHY_2_BITLANES (PHY_2_BITLANES),
.HIGHEST_BANK (HIGHEST_BANK),
.HIGHEST_LANE (HIGHEST_LANE),
.CK_BYTE_MAP (CK_BYTE_MAP),
.ADDR_MAP (ADDR_MAP),
.BANK_MAP (BANK_MAP),
.CAS_MAP (CAS_MAP),
.CKE_ODT_BYTE_MAP (CKE_ODT_BYTE_MAP),
.CKE_MAP (CKE_MAP),
.ODT_MAP (ODT_MAP),
.CKE_ODT_AUX (CKE_ODT_AUX),
.CS_MAP (CS_MAP),
.PARITY_MAP (PARITY_MAP),
.RAS_MAP (RAS_MAP),
.WE_MAP (WE_MAP),
.DQS_BYTE_MAP (DQS_BYTE_MAP),
.DATA0_MAP (DATA0_MAP),
.DATA1_MAP (DATA1_MAP),
.DATA2_MAP (DATA2_MAP),
.DATA3_MAP (DATA3_MAP),
.DATA4_MAP (DATA4_MAP),
.DATA5_MAP (DATA5_MAP),
.DATA6_MAP (DATA6_MAP),
.DATA7_MAP (DATA7_MAP),
.DATA8_MAP (DATA8_MAP),
.DATA9_MAP (DATA9_MAP),
.DATA10_MAP (DATA10_MAP),
.DATA11_MAP (DATA11_MAP),
.DATA12_MAP (DATA12_MAP),
.DATA13_MAP (DATA13_MAP),
.DATA14_MAP (DATA14_MAP),
.DATA15_MAP (DATA15_MAP),
.DATA16_MAP (DATA16_MAP),
.DATA17_MAP (DATA17_MAP),
.MASK0_MAP (MASK0_MAP),
.MASK1_MAP (MASK1_MAP),
.SIM_CAL_OPTION (SIM_CAL_OPTION),
.MASTER_PHY_CTL (MASTER_PHY_CTL),
.DRAM_WIDTH (DRAM_WIDTH),
.POC_USE_METASTABLE_SAMP (POC_USE_METASTABLE_SAMP)
)
u_ddr_mc_phy_wrapper
(
.rst (rst),
.iddr_rst (iddr_rst),
.clk (clk),
// For memory frequencies between 400~1066 MHz freq_refclk = mem_refclk
// For memory frequencies below 400 MHz mem_refclk = mem_refclk and
// freq_refclk = 2x or 4x mem_refclk such that it remains in the
// 400~1066 MHz range
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.mmcm_ps_clk (mmcm_ps_clk),
.pll_lock (pll_lock),
.sync_pulse (sync_pulse),
.idelayctrl_refclk (clk_ref),
.phy_cmd_wr_en (mux_cmd_wren),
.phy_data_wr_en (mux_wrdata_en),
// phy_ctl_wd = {ACTPRE[31:30],EventDelay[29:25],seq[24:23],
// DataOffset[22:17],HiIndex[16:15],LowIndex[14:12],
// AuxOut[11:8],ControlOffset[7:3],PHYCmd[2:0]}
// The fields ACTPRE, and BankCount are only used
// when the hard PHY counters are used by the MC.
.phy_ctl_wd ({5'd0, mux_cas_slot, calib_seq, mux_data_offset,
mux_rank_cnt, 3'd0, aux_out_map,
5'd0, mux_cmd}),
.phy_ctl_wr (mux_ctl_wren),
.phy_if_empty_def (phy_if_empty_def),
.phy_if_reset (phy_if_reset),
.data_offset_1 (mux_data_offset_1),
.data_offset_2 (mux_data_offset_2),
.aux_in_1 (aux_out_map),
.aux_in_2 (aux_out_map),
.idelaye2_init_val (idelaye2_init_val),
.oclkdelay_init_val (oclkdelay_init_val),
.if_empty (if_empty),
.phy_ctl_full (phy_ctl_full),
.phy_cmd_full (phy_cmd_full),
.phy_data_full (phy_data_full),
.phy_pre_data_a_full (phy_pre_data_a_full),
.ddr_clk (ddr_clk),
.phy_mc_go (phy_mc_go),
.phy_write_calib (phy_write_calib),
.phy_read_calib (phy_read_calib),
.po_fine_enable (po_enstg2_f),
.po_coarse_enable (po_enstg2_c),
.po_fine_inc (po_stg2_fincdec),
.po_coarse_inc (po_stg2_cincdec),
.po_counter_load_en (po_counter_load_en),
.po_counter_read_en (1'b1),
.po_sel_fine_oclk_delay (po_sel_stg2stg3),
.po_counter_load_val (),
.po_counter_read_val (po_counter_read_val),
.pi_rst_dqs_find (rst_stg1_cal),
.pi_fine_enable (pi_enstg2_f),
.pi_fine_inc (pi_stg2_fincdec),
.pi_counter_load_en (pi_stg2_load),
.pi_counter_load_val (pi_stg2_reg_l),
.pi_counter_read_val (pi_counter_read_val),
.idelay_ce (idelay_ce),
.idelay_inc (idelay_inc),
.idelay_ld (idelay_ld),
.pi_phase_locked (pi_phase_locked),
.pi_phase_locked_all (pi_phase_locked_all),
.pi_dqs_found (pi_found_dqs),
.pi_dqs_found_all (pi_dqs_found_all),
// Currently not being used. May be used in future if periodic reads
// become a requirement. This output could also be used to signal a
// catastrophic failure in read capture and the need for re-cal
.pi_dqs_out_of_range (pi_dqs_out_of_range),
.phy_init_data_sel (phy_init_data_sel),
.calib_sel (calib_sel),
.calib_in_common (calib_in_common),
.calib_zero_inputs (calib_zero_inputs),
.calib_zero_ctrl (calib_zero_ctrl),
.mux_address (mux_address),
.mux_bank (mux_bank),
.mux_cs_n (mux_cs_n),
.mux_ras_n (mux_ras_n),
.mux_cas_n (mux_cas_n),
.mux_we_n (mux_we_n),
.mux_reset_n (mux_reset_n),
.parity_in (parity),
.mux_wrdata (mux_wrdata),
.mux_wrdata_mask (mux_wrdata_mask),
.mux_odt (mux_odt),
.mux_cke (mux_cke),
.idle (idle),
.rd_data (rd_data_map),
.ddr_addr (ddr_addr),
.ddr_ba (ddr_ba),
.ddr_cas_n (ddr_cas_n),
.ddr_cke (ddr_cke),
.ddr_cs_n (ddr_cs_n),
.ddr_dm (ddr_dm),
.ddr_odt (ddr_odt),
.ddr_parity (ddr_parity),
.ddr_ras_n (ddr_ras_n),
.ddr_we_n (ddr_we_n),
.ddr_dq (ddr_dq),
.ddr_dqs (ddr_dqs),
.ddr_dqs_n (ddr_dqs_n),
.ddr_reset_n (ddr_reset_n),
.dbg_pi_counter_read_en (1'b1),
.ref_dll_lock (ref_dll_lock),
.rst_phaser_ref (rst_phaser_ref),
.dbg_pi_phase_locked_phy4lanes (dbg_pi_phase_locked_phy4lanes),
.dbg_pi_dqs_found_lanes_phy4lanes (dbg_pi_dqs_found_lanes_phy4lanes),
.byte_sel_cnt (byte_sel_cnt),
.pd_out (pd_out),
.fine_delay_incdec_pb (fine_delay_incdec_pb),
.fine_delay_sel (fine_delay_sel)
);
//***************************************************************************
// Soft memory initialization and calibration logic
//***************************************************************************
mig_7series_v2_3_ddr_calib_top #
(
.TCQ (TCQ),
.DDR3_VDD_OP_VOLT (DDR3_VDD_OP_VOLT),
.nCK_PER_CLK (nCK_PER_CLK),
.PRE_REV3ES (PRE_REV3ES),
.tCK (tCK),
.CLK_PERIOD (CLK_PERIOD),
.N_CTL_LANES (N_CTL_LANES),
.CTL_BYTE_LANE (CTL_BYTE_LANE),
.CTL_BANK (CTL_BANK),
.DRAM_TYPE (DRAM_TYPE),
.PRBS_WIDTH (8),
.DQS_BYTE_MAP (DQS_BYTE_MAP),
.HIGHEST_BANK (HIGHEST_BANK),
.BANK_TYPE (BANK_TYPE),
.HIGHEST_LANE (HIGHEST_LANE),
.BYTE_LANES_B0 (BYTE_LANES_B0),
.BYTE_LANES_B1 (BYTE_LANES_B1),
.BYTE_LANES_B2 (BYTE_LANES_B2),
.BYTE_LANES_B3 (BYTE_LANES_B3),
.BYTE_LANES_B4 (BYTE_LANES_B4),
.DATA_CTL_B0 (DATA_CTL_B0),
.DATA_CTL_B1 (DATA_CTL_B1),
.DATA_CTL_B2 (DATA_CTL_B2),
.DATA_CTL_B3 (DATA_CTL_B3),
.DATA_CTL_B4 (DATA_CTL_B4),
.SLOT_1_CONFIG (SLOT_1_CONFIG),
.BANK_WIDTH (BANK_WIDTH),
.CA_MIRROR (CA_MIRROR),
.COL_WIDTH (COL_WIDTH),
.CKE_ODT_AUX (CKE_ODT_AUX),
.nCS_PER_RANK (nCS_PER_RANK),
.DQ_WIDTH (DQ_WIDTH),
.DQS_CNT_WIDTH (DQS_CNT_WIDTH),
.DQS_WIDTH (DQS_WIDTH),
.DRAM_WIDTH (DRAM_WIDTH),
.ROW_WIDTH (ROW_WIDTH),
.RANKS (RANKS),
.CS_WIDTH (CS_WIDTH),
.CKE_WIDTH (CKE_WIDTH),
.DDR2_DQSN_ENABLE (DDR2_DQSN_ENABLE),
.PER_BIT_DESKEW ("OFF"),
.CALIB_ROW_ADD (CALIB_ROW_ADD),
.CALIB_COL_ADD (CALIB_COL_ADD),
.CALIB_BA_ADD (CALIB_BA_ADD),
.AL (AL),
.BURST_MODE (BURST_MODE),
.BURST_TYPE (BURST_TYPE),
.nCL (CL),
.nCWL (CWL),
.tRFC (tRFC),
.tREFI (tREFI),
.OUTPUT_DRV (OUTPUT_DRV),
.REG_CTRL (REG_CTRL),
.ADDR_CMD_MODE (ADDR_CMD_MODE),
.RTT_NOM (RTT_NOM),
.RTT_WR (RTT_WR),
.WRLVL (WRLVL_W),
.USE_ODT_PORT (USE_ODT_PORT),
.SIM_INIT_OPTION (SIM_INIT_OPTION),
.SIM_CAL_OPTION (SIM_CAL_OPTION),
.DEBUG_PORT (DEBUG_PORT),
.IDELAY_ADJ (IDELAY_ADJ),
.FINE_PER_BIT (FINE_PER_BIT),
.CENTER_COMP_MODE (CENTER_COMP_MODE),
.PI_VAL_ADJ (PI_VAL_ADJ),
.TAPSPERKCLK (TAPSPERKCLK),
.POC_USE_METASTABLE_SAMP (POC_USE_METASTABLE_SAMP)
)
u_ddr_calib_top
(
.clk (clk),
.rst (rst),
.tg_err (error),
.rst_tg_mc (rst_tg_mc),
.slot_0_present (slot_0_present),
.slot_1_present (slot_1_present),
// PHY Control Block and IN_FIFO status
.phy_ctl_ready (phy_mc_go),
.phy_ctl_full (1'b0),
.phy_cmd_full (1'b0),
.phy_data_full (1'b0),
.phy_if_empty (if_empty),
.idelaye2_init_val (idelaye2_init_val),
.oclkdelay_init_val (oclkdelay_init_val),
// From calib logic To data IN_FIFO
// DQ IDELAY tap value from Calib logic
// port to be added to mc_phy by Gary
.dlyval_dq (),
// hard PHY calibration modes
.write_calib (phy_write_calib),
.read_calib (phy_read_calib),
// DQS count and ck/addr/cmd to be mapped to calib_sel
// based on parameter that defines placement of ctl lanes
// and DQS byte groups in each bank. When phy_write_calib
// is de-asserted calib_sel should select CK/addr/cmd/ctl.
.calib_sel (calib_sel),
.calib_in_common (calib_in_common),
.calib_zero_inputs (calib_zero_inputs),
.calib_zero_ctrl (calib_zero_ctrl),
.phy_if_empty_def (phy_if_empty_def),
.phy_if_reset (phy_if_reset),
// Signals from calib logic to be MUXED with MC
// signals before sending to hard PHY
.calib_ctl_wren (calib_ctl_wren),
.calib_cmd_wren (calib_cmd_wren),
.calib_seq (calib_seq),
.calib_aux_out (calib_aux_out),
.calib_odt (calib_odt),
.calib_cke (calib_cke),
.calib_cmd (calib_cmd),
.calib_wrdata_en (calib_wrdata_en),
.calib_rank_cnt (calib_rank_cnt),
.calib_cas_slot (calib_cas_slot),
.calib_data_offset_0 (calib_data_offset_0),
.calib_data_offset_1 (calib_data_offset_1),
.calib_data_offset_2 (calib_data_offset_2),
.phy_reset_n (phy_reset_n),
.phy_address (phy_address),
.phy_bank (phy_bank),
.phy_cs_n (phy_cs_n),
.phy_ras_n (phy_ras_n),
.phy_cas_n (phy_cas_n),
.phy_we_n (phy_we_n),
.phy_wrdata (phy_wrdata),
// DQS Phaser_IN calibration/status signals
.pi_phaselocked (pi_phase_locked),
.pi_phase_locked_all (pi_phase_locked_all),
.pi_found_dqs (pi_found_dqs),
.pi_dqs_found_all (pi_dqs_found_all),
.pi_dqs_found_lanes (dbg_pi_dqs_found_lanes_phy4lanes),
.pi_rst_stg1_cal (rst_stg1_cal),
.pi_en_stg2_f (pi_enstg2_f),
.pi_stg2_f_incdec (pi_stg2_fincdec),
.pi_stg2_load (pi_stg2_load),
.pi_stg2_reg_l (pi_stg2_reg_l),
.pi_counter_read_val (pi_counter_read_val),
.device_temp (device_temp),
.tempmon_sample_en (tempmon_sample_en),
// IDELAY tap enable and inc signals
.idelay_ce (idelay_ce),
.idelay_inc (idelay_inc),
.idelay_ld (idelay_ld),
// DQS Phaser_OUT calibration/status signals
.po_sel_stg2stg3 (po_sel_stg2stg3),
.po_stg2_c_incdec (po_stg2_cincdec),
.po_en_stg2_c (po_enstg2_c),
.po_stg2_f_incdec (po_stg2_fincdec),
.po_en_stg2_f (po_enstg2_f),
.po_counter_load_en (po_counter_load_en),
.po_counter_read_val (po_counter_read_val),
// From data IN_FIFO To Calib logic and MC/UI
.phy_rddata (rd_data_map),
// From calib logic To MC
.phy_rddata_valid (phy_rddata_valid_w),
.calib_rd_data_offset_0 (calib_rd_data_offset_0),
.calib_rd_data_offset_1 (calib_rd_data_offset_1),
.calib_rd_data_offset_2 (calib_rd_data_offset_2),
.calib_writes (),
// Mem Init and Calibration status To MC
.init_calib_complete (phy_init_data_sel),
.init_wrcal_complete (init_wrcal_complete),
// Debug Error signals
.pi_phase_locked_err (dbg_pi_phaselock_err),
.pi_dqsfound_err (dbg_pi_dqsfound_err),
.wrcal_err (dbg_wrcal_err),
//used for oclk stg3 centering
.pd_out (pd_out),
.psen (psen),
.psincdec (psincdec),
.psdone (psdone),
.poc_sample_pd (poc_sample_pd),
// Debug Signals
.dbg_pi_phaselock_start (dbg_pi_phaselock_start),
.dbg_pi_dqsfound_start (dbg_pi_dqsfound_start),
.dbg_pi_dqsfound_done (dbg_pi_dqsfound_done),
.dbg_wrlvl_start (dbg_wrlvl_start),
.dbg_wrlvl_done (dbg_wrlvl_done),
.dbg_wrlvl_err (dbg_wrlvl_err),
.dbg_wrlvl_fine_tap_cnt (dbg_wrlvl_fine_tap_cnt),
.dbg_wrlvl_coarse_tap_cnt (dbg_wrlvl_coarse_tap_cnt),
.dbg_phy_wrlvl (dbg_phy_wrlvl),
.dbg_tap_cnt_during_wrlvl (dbg_tap_cnt_during_wrlvl),
.dbg_wl_edge_detect_valid (dbg_wl_edge_detect_valid),
.dbg_rd_data_edge_detect (dbg_rd_data_edge_detect),
.dbg_wrcal_start (dbg_wrcal_start),
.dbg_wrcal_done (dbg_wrcal_done),
.dbg_phy_wrcal (dbg_phy_wrcal),
.dbg_final_po_fine_tap_cnt (dbg_final_po_fine_tap_cnt),
.dbg_final_po_coarse_tap_cnt (dbg_final_po_coarse_tap_cnt),
.dbg_rdlvl_start (dbg_rdlvl_start),
.dbg_rdlvl_done (dbg_rdlvl_done),
.dbg_rdlvl_err (dbg_rdlvl_err),
.dbg_cpt_first_edge_cnt (dbg_cpt_first_edge_cnt),
.dbg_cpt_second_edge_cnt (dbg_cpt_second_edge_cnt),
.dbg_cpt_tap_cnt (dbg_cpt_tap_cnt),
.dbg_dq_idelay_tap_cnt (dbg_dq_idelay_tap_cnt),
.dbg_sel_pi_incdec (dbg_sel_pi_incdec),
.dbg_sel_po_incdec (dbg_sel_po_incdec),
.dbg_byte_sel (dbg_byte_sel),
.dbg_pi_f_inc (dbg_pi_f_inc),
.dbg_pi_f_dec (dbg_pi_f_dec),
.dbg_po_f_inc (dbg_po_f_inc),
.dbg_po_f_stg23_sel (dbg_po_f_stg23_sel),
.dbg_po_f_dec (dbg_po_f_dec),
.dbg_idel_up_all (dbg_idel_up_all),
.dbg_idel_down_all (dbg_idel_down_all),
.dbg_idel_up_cpt (dbg_idel_up_cpt),
.dbg_idel_down_cpt (dbg_idel_down_cpt),
.dbg_sel_idel_cpt (dbg_sel_idel_cpt),
.dbg_sel_all_idel_cpt (dbg_sel_all_idel_cpt),
.dbg_phy_rdlvl (dbg_phy_rdlvl),
.dbg_calib_top (dbg_calib_top),
.dbg_phy_init (dbg_phy_init),
.dbg_prbs_rdlvl (dbg_prbs_rdlvl),
.dbg_dqs_found_cal (dbg_dqs_found_cal),
.dbg_phy_oclkdelay_cal (dbg_phy_oclkdelay_cal),
.dbg_oclkdelay_rd_data (dbg_oclkdelay_rd_data),
.dbg_oclkdelay_calib_start (dbg_oclkdelay_calib_start),
.dbg_oclkdelay_calib_done (dbg_oclkdelay_calib_done),
.prbs_final_dqs_tap_cnt_r (prbs_final_dqs_tap_cnt_r),
.dbg_prbs_first_edge_taps (dbg_prbs_first_edge_taps),
.dbg_prbs_second_edge_taps (dbg_prbs_second_edge_taps),
.byte_sel_cnt (byte_sel_cnt),
.fine_delay_incdec_pb (fine_delay_incdec_pb),
.fine_delay_sel (fine_delay_sel)
);
endmodule
|
module mig_7series_v2_3_ddr_phy_4lanes #(
parameter GENERATE_IDELAYCTRL = "TRUE",
parameter IODELAY_GRP = "IODELAY_MIG",
parameter FPGA_SPEED_GRADE = 1,
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter BYTELANES_DDR_CK = 24'b0010_0010_0010_0010_0010_0010,
parameter NUM_DDR_CK = 1,
// next three parameter fields correspond to byte lanes for lane order DCBA
parameter BYTE_LANES = 4'b1111, // lane existence, one per lane
parameter DATA_CTL_N = 4'b1111, // data or control, per lane
parameter BITLANES = 48'hffff_ffff_ffff,
parameter BITLANES_OUTONLY = 48'h0000_0000_0000,
parameter LANE_REMAP = 16'h3210,// 4-bit index
// used to rewire to one of four
// input/output buss lanes
// example: 0321 remaps lanes as:
// D->A
// C->D
// B->C
// A->B
parameter LAST_BANK = "FALSE",
parameter USE_PRE_POST_FIFO = "FALSE",
parameter RCLK_SELECT_LANE = "B",
parameter real TCK = 0.00,
parameter SYNTHESIS = "FALSE",
parameter PO_CTL_COARSE_BYPASS = "FALSE",
parameter PO_FINE_DELAY = 0,
parameter PI_SEL_CLK_OFFSET = 0,
// phy_control paramter used in other paramsters
parameter PC_CLK_RATIO = 4,
//phaser_in parameters
parameter A_PI_FREQ_REF_DIV = "NONE",
parameter A_PI_CLKOUT_DIV = 2,
parameter A_PI_BURST_MODE = "TRUE",
parameter A_PI_OUTPUT_CLK_SRC = "DELAYED_REF" , //"DELAYED_REF",
parameter A_PI_FINE_DELAY = 60,
parameter A_PI_SYNC_IN_DIV_RST = "TRUE",
parameter B_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter B_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter B_PI_BURST_MODE = A_PI_BURST_MODE,
parameter B_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter B_PI_FINE_DELAY = A_PI_FINE_DELAY,
parameter B_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter C_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter C_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter C_PI_BURST_MODE = A_PI_BURST_MODE,
parameter C_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter C_PI_FINE_DELAY = 0,
parameter C_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter D_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter D_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter D_PI_BURST_MODE = A_PI_BURST_MODE,
parameter D_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter D_PI_FINE_DELAY = 0,
parameter D_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
//phaser_out parameters
parameter A_PO_CLKOUT_DIV = (DATA_CTL_N[0] == 0) ? PC_CLK_RATIO : 2,
parameter A_PO_FINE_DELAY = PO_FINE_DELAY,
parameter A_PO_COARSE_DELAY = 0,
parameter A_PO_OCLK_DELAY = 0,
parameter A_PO_OCLKDELAY_INV = "FALSE",
parameter A_PO_OUTPUT_CLK_SRC = "DELAYED_REF",
parameter A_PO_SYNC_IN_DIV_RST = "TRUE",
//parameter A_PO_SYNC_IN_DIV_RST = "FALSE",
parameter B_PO_CLKOUT_DIV = (DATA_CTL_N[1] == 0) ? PC_CLK_RATIO : 2,
parameter B_PO_FINE_DELAY = PO_FINE_DELAY,
parameter B_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter B_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter B_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter B_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter B_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter C_PO_CLKOUT_DIV = (DATA_CTL_N[2] == 0) ? PC_CLK_RATIO : 2,
parameter C_PO_FINE_DELAY = PO_FINE_DELAY,
parameter C_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter C_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter C_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter C_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter C_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter D_PO_CLKOUT_DIV = (DATA_CTL_N[3] == 0) ? PC_CLK_RATIO : 2,
parameter D_PO_FINE_DELAY = PO_FINE_DELAY,
parameter D_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter D_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter D_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter D_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter D_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter A_IDELAYE2_IDELAY_TYPE = "VARIABLE",
parameter A_IDELAYE2_IDELAY_VALUE = 00,
parameter B_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter B_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter C_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter C_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter D_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter D_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
// phy_control parameters
parameter PC_BURST_MODE = "TRUE",
parameter PC_DATA_CTL_N = DATA_CTL_N,
parameter PC_CMD_OFFSET = 0,
parameter PC_RD_CMD_OFFSET_0 = 0,
parameter PC_RD_CMD_OFFSET_1 = 0,
parameter PC_RD_CMD_OFFSET_2 = 0,
parameter PC_RD_CMD_OFFSET_3 = 0,
parameter PC_CO_DURATION = 1,
parameter PC_DI_DURATION = 1,
parameter PC_DO_DURATION = 1,
parameter PC_RD_DURATION_0 = 0,
parameter PC_RD_DURATION_1 = 0,
parameter PC_RD_DURATION_2 = 0,
parameter PC_RD_DURATION_3 = 0,
parameter PC_WR_CMD_OFFSET_0 = 5,
parameter PC_WR_CMD_OFFSET_1 = 5,
parameter PC_WR_CMD_OFFSET_2 = 5,
parameter PC_WR_CMD_OFFSET_3 = 5,
parameter PC_WR_DURATION_0 = 6,
parameter PC_WR_DURATION_1 = 6,
parameter PC_WR_DURATION_2 = 6,
parameter PC_WR_DURATION_3 = 6,
parameter PC_AO_WRLVL_EN = 0,
parameter PC_AO_TOGGLE = 4'b0101, // odd bits are toggle (CKE)
parameter PC_FOUR_WINDOW_CLOCKS = 63,
parameter PC_EVENTS_DELAY = 18,
parameter PC_PHY_COUNT_EN = "TRUE",
parameter PC_SYNC_MODE = "TRUE",
parameter PC_DISABLE_SEQ_MATCH = "TRUE",
parameter PC_MULTI_REGION = "FALSE",
// io fifo parameters
parameter A_OF_ARRAY_MODE = (DATA_CTL_N[0] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter B_OF_ARRAY_MODE = (DATA_CTL_N[1] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter C_OF_ARRAY_MODE = (DATA_CTL_N[2] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter D_OF_ARRAY_MODE = (DATA_CTL_N[3] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter OF_ALMOST_EMPTY_VALUE = 1,
parameter OF_ALMOST_FULL_VALUE = 1,
parameter OF_OUTPUT_DISABLE = "TRUE",
parameter OF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
parameter A_OS_DATA_RATE = "DDR",
parameter A_OS_DATA_WIDTH = 4,
parameter B_OS_DATA_RATE = A_OS_DATA_RATE,
parameter B_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter C_OS_DATA_RATE = A_OS_DATA_RATE,
parameter C_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter D_OS_DATA_RATE = A_OS_DATA_RATE,
parameter D_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter A_IF_ARRAY_MODE = "ARRAY_MODE_4_X_8",
parameter B_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter C_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter D_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter IF_ALMOST_EMPTY_VALUE = 1,
parameter IF_ALMOST_FULL_VALUE = 1,
parameter IF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
// this is used locally, not for external pushdown
// NOTE: the 0+ is needed in each to coerce to integer for addition.
// otherwise 4x 1'b values are added producing a 1'b value.
parameter HIGHEST_LANE = LAST_BANK == "FALSE" ? 4 : (BYTE_LANES[3] ? 4 : BYTE_LANES[2] ? 3 : BYTE_LANES[1] ? 2 : 1),
parameter N_CTL_LANES = ((0+(!DATA_CTL_N[0]) & BYTE_LANES[0]) + (0+(!DATA_CTL_N[1]) & BYTE_LANES[1]) + (0+(!DATA_CTL_N[2]) & BYTE_LANES[2]) + (0+(!DATA_CTL_N[3]) & BYTE_LANES[3])),
parameter N_BYTE_LANES = (0+BYTE_LANES[0]) + (0+BYTE_LANES[1]) + (0+BYTE_LANES[2]) + (0+BYTE_LANES[3]),
parameter N_DATA_LANES = N_BYTE_LANES - N_CTL_LANES,
// assume odt per rank + any declared cke's
parameter AUXOUT_WIDTH = 4,
parameter LP_DDR_CK_WIDTH = 2
,parameter CKE_ODT_AUX = "FALSE"
)
(
//`include "phy.vh"
input rst,
input phy_clk,
input phy_ctl_clk,
input freq_refclk,
input mem_refclk,
input mem_refclk_div4,
input pll_lock,
input sync_pulse,
input idelayctrl_refclk,
input [HIGHEST_LANE*80-1:0] phy_dout,
input phy_cmd_wr_en,
input phy_data_wr_en,
input phy_rd_en,
input phy_ctl_mstr_empty,
input [31:0] phy_ctl_wd,
input [`PC_DATA_OFFSET_RANGE] data_offset,
input phy_ctl_wr,
input if_empty_def,
input phyGo,
input input_sink,
output [(LP_DDR_CK_WIDTH*24)-1:0] ddr_clk, // to memory
output rclk,
output if_a_empty,
output if_empty,
output byte_rd_en,
output if_empty_or,
output if_empty_and,
output of_ctl_a_full,
output of_data_a_full,
output of_ctl_full,
output of_data_full,
output pre_data_a_full,
output [HIGHEST_LANE*80-1:0]phy_din, // assume input bus same size as output bus
output phy_ctl_empty,
output phy_ctl_a_full,
output phy_ctl_full,
output [HIGHEST_LANE*12-1:0]mem_dq_out,
output [HIGHEST_LANE*12-1:0]mem_dq_ts,
input [HIGHEST_LANE*10-1:0]mem_dq_in,
output [HIGHEST_LANE-1:0] mem_dqs_out,
output [HIGHEST_LANE-1:0] mem_dqs_ts,
input [HIGHEST_LANE-1:0] mem_dqs_in,
input [1:0] byte_rd_en_oth_banks,
output [AUXOUT_WIDTH-1:0] aux_out,
output reg rst_out = 0,
output reg mcGo=0,
output phy_ctl_ready,
output ref_dll_lock,
input if_rst,
input phy_read_calib,
input phy_write_calib,
input idelay_inc,
input idelay_ce,
input idelay_ld,
input [2:0] calib_sel,
input calib_zero_ctrl,
input [HIGHEST_LANE-1:0] calib_zero_lanes,
input calib_in_common,
input po_fine_enable,
input po_coarse_enable,
input po_fine_inc,
input po_coarse_inc,
input po_counter_load_en,
input po_counter_read_en,
input [8:0] po_counter_load_val,
input po_sel_fine_oclk_delay,
output reg po_coarse_overflow,
output reg po_fine_overflow,
output reg [8:0] po_counter_read_val,
input pi_rst_dqs_find,
input pi_fine_enable,
input pi_fine_inc,
input pi_counter_load_en,
input pi_counter_read_en,
input [5:0] pi_counter_load_val,
output reg pi_fine_overflow,
output reg [5:0] pi_counter_read_val,
output reg pi_dqs_found,
output pi_dqs_found_all,
output pi_dqs_found_any,
output [HIGHEST_LANE-1:0] pi_phase_locked_lanes,
output [HIGHEST_LANE-1:0] pi_dqs_found_lanes,
output reg pi_dqs_out_of_range,
output reg pi_phase_locked,
output pi_phase_locked_all,
input [29:0] fine_delay,
input fine_delay_sel
);
localparam DATA_CTL_A = (~DATA_CTL_N[0]);
localparam DATA_CTL_B = (~DATA_CTL_N[1]);
localparam DATA_CTL_C = (~DATA_CTL_N[2]);
localparam DATA_CTL_D = (~DATA_CTL_N[3]);
localparam PRESENT_CTL_A = BYTE_LANES[0] && ! DATA_CTL_N[0];
localparam PRESENT_CTL_B = BYTE_LANES[1] && ! DATA_CTL_N[1];
localparam PRESENT_CTL_C = BYTE_LANES[2] && ! DATA_CTL_N[2];
localparam PRESENT_CTL_D = BYTE_LANES[3] && ! DATA_CTL_N[3];
localparam PRESENT_DATA_A = BYTE_LANES[0] && DATA_CTL_N[0];
localparam PRESENT_DATA_B = BYTE_LANES[1] && DATA_CTL_N[1];
localparam PRESENT_DATA_C = BYTE_LANES[2] && DATA_CTL_N[2];
localparam PRESENT_DATA_D = BYTE_LANES[3] && DATA_CTL_N[3];
localparam PC_DATA_CTL_A = (DATA_CTL_A) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_B = (DATA_CTL_B) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_C = (DATA_CTL_C) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_D = (DATA_CTL_D) ? "FALSE" : "TRUE";
localparam A_PO_COARSE_BYPASS = (DATA_CTL_A) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam B_PO_COARSE_BYPASS = (DATA_CTL_B) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam C_PO_COARSE_BYPASS = (DATA_CTL_C) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam D_PO_COARSE_BYPASS = (DATA_CTL_D) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam IO_A_START = 41;
localparam IO_A_END = 40;
localparam IO_B_START = 43;
localparam IO_B_END = 42;
localparam IO_C_START = 45;
localparam IO_C_END = 44;
localparam IO_D_START = 47;
localparam IO_D_END = 46;
localparam IO_A_X_START = (HIGHEST_LANE * 10) + 1;
localparam IO_A_X_END = (IO_A_X_START-1);
localparam IO_B_X_START = (IO_A_X_START + 2);
localparam IO_B_X_END = (IO_B_X_START -1);
localparam IO_C_X_START = (IO_B_X_START + 2);
localparam IO_C_X_END = (IO_C_X_START -1);
localparam IO_D_X_START = (IO_C_X_START + 2);
localparam IO_D_X_END = (IO_D_X_START -1);
localparam MSB_BURST_PEND_PO = 3;
localparam MSB_BURST_PEND_PI = 7;
localparam MSB_RANK_SEL_I = MSB_BURST_PEND_PI + 8;
localparam PHASER_CTL_BUS_WIDTH = MSB_RANK_SEL_I + 1;
wire [1:0] oserdes_dqs;
wire [1:0] oserdes_dqs_ts;
wire [1:0] oserdes_dq_ts;
wire [PHASER_CTL_BUS_WIDTH-1:0] phaser_ctl_bus;
wire [7:0] in_rank;
wire [11:0] IO_A;
wire [11:0] IO_B;
wire [11:0] IO_C;
wire [11:0] IO_D;
wire [319:0] phy_din_remap;
reg A_po_counter_read_en;
wire [8:0] A_po_counter_read_val;
reg A_pi_counter_read_en;
wire [5:0] A_pi_counter_read_val;
wire A_pi_fine_overflow;
wire A_po_coarse_overflow;
wire A_po_fine_overflow;
wire A_pi_dqs_found;
wire A_pi_dqs_out_of_range;
wire A_pi_phase_locked;
wire A_pi_iserdes_rst;
reg A_pi_fine_enable;
reg A_pi_fine_inc;
reg A_pi_counter_load_en;
reg [5:0] A_pi_counter_load_val;
reg A_pi_rst_dqs_find;
reg A_po_fine_enable;
reg A_po_coarse_enable;
reg A_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg A_po_sel_fine_oclk_delay;
reg A_po_coarse_inc;
reg A_po_counter_load_en;
reg [8:0] A_po_counter_load_val;
wire A_rclk;
reg A_idelay_ce;
reg A_idelay_ld;
reg [29:0] A_fine_delay;
reg A_fine_delay_sel;
reg B_po_counter_read_en;
wire [8:0] B_po_counter_read_val;
reg B_pi_counter_read_en;
wire [5:0] B_pi_counter_read_val;
wire B_pi_fine_overflow;
wire B_po_coarse_overflow;
wire B_po_fine_overflow;
wire B_pi_phase_locked;
wire B_pi_iserdes_rst;
wire B_pi_dqs_found;
wire B_pi_dqs_out_of_range;
reg B_pi_fine_enable;
reg B_pi_fine_inc;
reg B_pi_counter_load_en;
reg [5:0] B_pi_counter_load_val;
reg B_pi_rst_dqs_find;
reg B_po_fine_enable;
reg B_po_coarse_enable;
reg B_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg B_po_coarse_inc;
reg B_po_sel_fine_oclk_delay;
reg B_po_counter_load_en;
reg [8:0] B_po_counter_load_val;
wire B_rclk;
reg B_idelay_ce;
reg B_idelay_ld;
reg [29:0] B_fine_delay;
reg B_fine_delay_sel;
reg C_pi_fine_inc;
reg D_pi_fine_inc;
reg C_pi_fine_enable;
reg D_pi_fine_enable;
reg C_po_counter_load_en;
reg D_po_counter_load_en;
reg C_po_coarse_inc;
reg D_po_coarse_inc;
reg C_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg D_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg C_po_sel_fine_oclk_delay;
reg D_po_sel_fine_oclk_delay;
reg [5:0] C_pi_counter_load_val;
reg [5:0] D_pi_counter_load_val;
reg [8:0] C_po_counter_load_val;
reg [8:0] D_po_counter_load_val;
reg C_po_coarse_enable;
reg D_po_coarse_enable;
reg C_po_fine_enable;
reg D_po_fine_enable;
wire C_po_coarse_overflow;
wire D_po_coarse_overflow;
wire C_po_fine_overflow;
wire D_po_fine_overflow;
wire [8:0] C_po_counter_read_val;
wire [8:0] D_po_counter_read_val;
reg C_po_counter_read_en;
reg D_po_counter_read_en;
wire C_pi_dqs_found;
wire D_pi_dqs_found;
wire C_pi_fine_overflow;
wire D_pi_fine_overflow;
reg C_pi_counter_read_en;
reg D_pi_counter_read_en;
reg C_pi_counter_load_en;
reg D_pi_counter_load_en;
wire C_pi_phase_locked;
wire C_pi_iserdes_rst;
wire D_pi_phase_locked;
wire D_pi_iserdes_rst;
wire C_pi_dqs_out_of_range;
wire D_pi_dqs_out_of_range;
wire [5:0] C_pi_counter_read_val;
wire [5:0] D_pi_counter_read_val;
wire C_rclk;
wire D_rclk;
reg C_idelay_ce;
reg D_idelay_ce;
reg C_idelay_ld;
reg D_idelay_ld;
reg C_pi_rst_dqs_find;
reg D_pi_rst_dqs_find;
reg [29:0] C_fine_delay;
reg [29:0] D_fine_delay;
reg C_fine_delay_sel;
reg D_fine_delay_sel;
wire pi_iserdes_rst;
wire A_if_empty;
wire B_if_empty;
wire C_if_empty;
wire D_if_empty;
wire A_byte_rd_en;
wire B_byte_rd_en;
wire C_byte_rd_en;
wire D_byte_rd_en;
wire A_if_a_empty;
wire B_if_a_empty;
wire C_if_a_empty;
wire D_if_a_empty;
//wire A_if_full;
//wire B_if_full;
//wire C_if_full;
//wire D_if_full;
//wire A_of_empty;
//wire B_of_empty;
//wire C_of_empty;
//wire D_of_empty;
wire A_of_full;
wire B_of_full;
wire C_of_full;
wire D_of_full;
wire A_of_ctl_full;
wire B_of_ctl_full;
wire C_of_ctl_full;
wire D_of_ctl_full;
wire A_of_data_full;
wire B_of_data_full;
wire C_of_data_full;
wire D_of_data_full;
wire A_of_a_full;
wire B_of_a_full;
wire C_of_a_full;
wire D_of_a_full;
wire A_pre_fifo_a_full;
wire B_pre_fifo_a_full;
wire C_pre_fifo_a_full;
wire D_pre_fifo_a_full;
wire A_of_ctl_a_full;
wire B_of_ctl_a_full;
wire C_of_ctl_a_full;
wire D_of_ctl_a_full;
wire A_of_data_a_full;
wire B_of_data_a_full;
wire C_of_data_a_full;
wire D_of_data_a_full;
wire A_pre_data_a_full;
wire B_pre_data_a_full;
wire C_pre_data_a_full;
wire D_pre_data_a_full;
wire [LP_DDR_CK_WIDTH*6-1:0] A_ddr_clk; // for generation
wire [LP_DDR_CK_WIDTH*6-1:0] B_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] C_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] D_ddr_clk; //
wire [3:0] dummy_data;
wire [31:0] _phy_ctl_wd;
wire [1:0] phy_encalib;
assign pi_dqs_found_all =
(! PRESENT_DATA_A | A_pi_dqs_found) &
(! PRESENT_DATA_B | B_pi_dqs_found) &
(! PRESENT_DATA_C | C_pi_dqs_found) &
(! PRESENT_DATA_D | D_pi_dqs_found) ;
assign pi_dqs_found_any =
( PRESENT_DATA_A & A_pi_dqs_found) |
( PRESENT_DATA_B & B_pi_dqs_found) |
( PRESENT_DATA_C & C_pi_dqs_found) |
( PRESENT_DATA_D & D_pi_dqs_found) ;
assign pi_phase_locked_all =
(! PRESENT_DATA_A | A_pi_phase_locked) &
(! PRESENT_DATA_B | B_pi_phase_locked) &
(! PRESENT_DATA_C | C_pi_phase_locked) &
(! PRESENT_DATA_D | D_pi_phase_locked);
wire dangling_inputs = (& dummy_data) & input_sink & 1'b0; // this reduces all constant 0 values to 1 signal
// which is combined into another signals such that
// the other signal isn't changed. The purpose
// is to fake the tools into ignoring dangling inputs.
// Because it is anded with 1'b0, the contributing signals
// are folded as constants or trimmed.
assign if_empty = !if_empty_def ? (A_if_empty | B_if_empty | C_if_empty | D_if_empty) : (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign byte_rd_en = !if_empty_def ? (A_byte_rd_en & B_byte_rd_en & C_byte_rd_en & D_byte_rd_en) :
(A_byte_rd_en | B_byte_rd_en | C_byte_rd_en | D_byte_rd_en);
assign if_empty_or = (A_if_empty | B_if_empty | C_if_empty | D_if_empty);
assign if_empty_and = (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign if_a_empty = A_if_a_empty | B_if_a_empty | C_if_a_empty | D_if_a_empty;
//assign if_full = A_if_full | B_if_full | C_if_full | D_if_full ;
//assign of_empty = A_of_empty & B_of_empty & C_of_empty & D_of_empty;
assign of_ctl_full = A_of_ctl_full | B_of_ctl_full | C_of_ctl_full | D_of_ctl_full ;
assign of_data_full = A_of_data_full | B_of_data_full | C_of_data_full | D_of_data_full ;
assign of_ctl_a_full = A_of_ctl_a_full | B_of_ctl_a_full | C_of_ctl_a_full | D_of_ctl_a_full ;
assign of_data_a_full = A_of_data_a_full | B_of_data_a_full | C_of_data_a_full | D_of_data_a_full | dangling_inputs ;
assign pre_data_a_full = A_pre_data_a_full | B_pre_data_a_full | C_pre_data_a_full | D_pre_data_a_full;
function [79:0] part_select_80;
input [319:0] vector;
input [1:0] select;
begin
case (select)
2'b00 : part_select_80[79:0] = vector[1*80-1:0*80];
2'b01 : part_select_80[79:0] = vector[2*80-1:1*80];
2'b10 : part_select_80[79:0] = vector[3*80-1:2*80];
2'b11 : part_select_80[79:0] = vector[4*80-1:3*80];
endcase
end
endfunction
wire [319:0] phy_dout_remap;
reg rst_out_trig = 1'b0;
reg [31:0] rclk_delay;
reg rst_edge1 = 1'b0;
reg rst_edge2 = 1'b0;
reg rst_edge3 = 1'b0;
reg rst_edge_detect = 1'b0;
wire rclk_;
reg rst_out_start = 1'b0 ;
reg rst_primitives=0;
reg A_rst_primitives=0;
reg B_rst_primitives=0;
reg C_rst_primitives=0;
reg D_rst_primitives=0;
`ifdef USE_PHY_CONTROL_TEST
wire [15:0] test_output;
wire [15:0] test_input;
wire [2:0] test_select=0;
wire scan_enable = 0;
`endif
generate
genvar i;
if (RCLK_SELECT_LANE == "A") begin
assign rclk_ = A_rclk;
assign pi_iserdes_rst = A_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "B") begin
assign rclk_ = B_rclk;
assign pi_iserdes_rst = B_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "C") begin
assign rclk_ = C_rclk;
assign pi_iserdes_rst = C_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "D") begin
assign rclk_ = D_rclk;
assign pi_iserdes_rst = D_pi_iserdes_rst;
end
else begin
assign rclk_ = B_rclk; // default
end
endgenerate
assign ddr_clk[LP_DDR_CK_WIDTH*6-1:0] = A_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*12-1:LP_DDR_CK_WIDTH*6] = B_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*18-1:LP_DDR_CK_WIDTH*12] = C_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*24-1:LP_DDR_CK_WIDTH*18] = D_ddr_clk;
assign pi_phase_locked_lanes =
{(! PRESENT_DATA_A[0] | A_pi_phase_locked),
(! PRESENT_DATA_B[0] | B_pi_phase_locked) ,
(! PRESENT_DATA_C[0] | C_pi_phase_locked) ,
(! PRESENT_DATA_D[0] | D_pi_phase_locked)};
assign pi_dqs_found_lanes = {D_pi_dqs_found, C_pi_dqs_found, B_pi_dqs_found, A_pi_dqs_found};
// this block scrubs X from rclk_delay[11]
reg rclk_delay_11;
always @(rclk_delay[11]) begin : rclk_delay_11_blk
if ( rclk_delay[11])
rclk_delay_11 = 1;
else
rclk_delay_11 = 0;
end
always @(posedge phy_clk or posedge rst ) begin
// scrub 4-state values from rclk_delay[11]
if ( rst) begin
rst_out <= #1 0;
end
else begin
if ( rclk_delay_11)
rst_out <= #1 1;
end
end
always @(posedge phy_clk ) begin
// phy_ctl_ready drives reset of the system
rst_primitives <= !phy_ctl_ready ;
A_rst_primitives <= rst_primitives ;
B_rst_primitives <= rst_primitives ;
C_rst_primitives <= rst_primitives ;
D_rst_primitives <= rst_primitives ;
rclk_delay <= #1 (rclk_delay << 1) | (!rst_primitives && phyGo);
mcGo <= #1 rst_out ;
end
generate
if (BYTE_LANES[0]) begin
assign dummy_data[0] = 0;
end
else begin
assign dummy_data[0] = &phy_dout_remap[1*80-1:0*80];
end
if (BYTE_LANES[1]) begin
assign dummy_data[1] = 0;
end
else begin
assign dummy_data[1] = &phy_dout_remap[2*80-1:1*80];
end
if (BYTE_LANES[2]) begin
assign dummy_data[2] = 0;
end
else begin
assign dummy_data[2] = &phy_dout_remap[3*80-1:2*80];
end
if (BYTE_LANES[3]) begin
assign dummy_data[3] = 0;
end
else begin
assign dummy_data[3] = &phy_dout_remap[4*80-1:3*80];
end
if (PRESENT_DATA_A) begin
assign A_of_data_full = A_of_full;
assign A_of_ctl_full = 0;
assign A_of_data_a_full = A_of_a_full;
assign A_of_ctl_a_full = 0;
assign A_pre_data_a_full = A_pre_fifo_a_full;
end
else begin
assign A_of_ctl_full = A_of_full;
assign A_of_data_full = 0;
assign A_of_ctl_a_full = A_of_a_full;
assign A_of_data_a_full = 0;
assign A_pre_data_a_full = 0;
end
if (PRESENT_DATA_B) begin
assign B_of_data_full = B_of_full;
assign B_of_ctl_full = 0;
assign B_of_data_a_full = B_of_a_full;
assign B_of_ctl_a_full = 0;
assign B_pre_data_a_full = B_pre_fifo_a_full;
end
else begin
assign B_of_ctl_full = B_of_full;
assign B_of_data_full = 0;
assign B_of_ctl_a_full = B_of_a_full;
assign B_of_data_a_full = 0;
assign B_pre_data_a_full = 0;
end
if (PRESENT_DATA_C) begin
assign C_of_data_full = C_of_full;
assign C_of_ctl_full = 0;
assign C_of_data_a_full = C_of_a_full;
assign C_of_ctl_a_full = 0;
assign C_pre_data_a_full = C_pre_fifo_a_full;
end
else begin
assign C_of_ctl_full = C_of_full;
assign C_of_data_full = 0;
assign C_of_ctl_a_full = C_of_a_full;
assign C_of_data_a_full = 0;
assign C_pre_data_a_full = 0;
end
if (PRESENT_DATA_D) begin
assign D_of_data_full = D_of_full;
assign D_of_ctl_full = 0;
assign D_of_data_a_full = D_of_a_full;
assign D_of_ctl_a_full = 0;
assign D_pre_data_a_full = D_pre_fifo_a_full;
end
else begin
assign D_of_ctl_full = D_of_full;
assign D_of_data_full = 0;
assign D_of_ctl_a_full = D_of_a_full;
assign D_of_data_a_full = 0;
assign D_pre_data_a_full = 0;
end
// byte lane must exist and be data lane.
if (PRESENT_DATA_A )
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[79:0];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[79:0];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[79:0];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[79:0];
endcase
else
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_B )
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[159:80];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[159:80];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[159:80];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[159:80];
endcase
else
if (HIGHEST_LANE > 1)
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_C)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[239:160];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[239:160];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[239:160];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[239:160];
endcase
else
if (HIGHEST_LANE > 2)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_D )
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[319:240];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[319:240];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[319:240];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[319:240];
endcase
else
if (HIGHEST_LANE > 3)
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (HIGHEST_LANE > 1)
assign _phy_ctl_wd = {phy_ctl_wd[31:23], data_offset, phy_ctl_wd[16:0]};
if (HIGHEST_LANE == 1)
assign _phy_ctl_wd = phy_ctl_wd;
//BUFR #(.BUFR_DIVIDE ("1")) rclk_buf(.I(rclk_), .O(rclk), .CE (1'b1), .CLR (pi_iserdes_rst));
BUFIO rclk_buf(.I(rclk_), .O(rclk) );
if ( BYTE_LANES[0] ) begin : ddr_byte_lane_A
assign phy_dout_remap[79:0] = part_select_80(phy_dout, (LANE_REMAP[1:0]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("A"),
.PO_DATA_CTL (PC_DATA_CTL_N[0] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[11:0]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[11:0]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (A_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (A_PI_BURST_MODE),
.PI_CLKOUT_DIV (A_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (A_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (A_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (A_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (A_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (A_PO_CLKOUT_DIV),
.PO_FINE_DELAY (A_PO_FINE_DELAY),
.PO_COARSE_BYPASS (A_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (A_PO_COARSE_DELAY),
.PO_OCLK_DELAY (A_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (A_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (A_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (A_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (A_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (A_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (A_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (A_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_A(
.mem_dq_out (mem_dq_out[11:0]),
.mem_dq_ts (mem_dq_ts[11:0]),
.mem_dq_in (mem_dq_in[9:0]),
.mem_dqs_out (mem_dqs_out[0]),
.mem_dqs_ts (mem_dqs_ts[0]),
.mem_dqs_in (mem_dqs_in[0]),
.rst (A_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (A_ddr_clk),
.rclk (A_rclk),
.pi_dqs_found (A_pi_dqs_found),
.dqs_out_of_range (A_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (A_if_a_empty),
.if_empty (A_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*A_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*A_of_empty*/),
.of_a_full (A_of_a_full),
.of_full (A_of_full),
.pre_fifo_a_full (A_pre_fifo_a_full),
.phy_din (phy_din_remap[79:0]),
.phy_dout (phy_dout_remap[79:0]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({B_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (A_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (A_idelay_ce),
.idelay_ld (A_idelay_ld),
.pi_rst_dqs_find (A_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (A_po_fine_enable),
.po_coarse_enable (A_po_coarse_enable),
.po_fine_inc (A_po_fine_inc),
.po_coarse_inc (A_po_coarse_inc),
.po_counter_load_en (A_po_counter_load_en),
.po_counter_read_en (A_po_counter_read_en),
.po_counter_load_val (A_po_counter_load_val),
.po_coarse_overflow (A_po_coarse_overflow),
.po_fine_overflow (A_po_fine_overflow),
.po_counter_read_val (A_po_counter_read_val),
.po_sel_fine_oclk_delay(A_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (A_pi_fine_enable),
.pi_fine_inc (A_pi_fine_inc),
.pi_counter_load_en (A_pi_counter_load_en),
.pi_counter_read_en (A_pi_counter_read_en),
.pi_counter_load_val (A_pi_counter_load_val),
.pi_fine_overflow (A_pi_fine_overflow),
.pi_counter_read_val (A_pi_counter_read_val),
.pi_iserdes_rst (A_pi_iserdes_rst),
.pi_phase_locked (A_pi_phase_locked),
.fine_delay (A_fine_delay),
.fine_delay_sel (A_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_A
assign A_of_a_full = 1'b0;
assign A_of_full = 1'b0;
assign A_pre_fifo_a_full = 1'b0;
assign A_if_empty = 1'b0;
assign A_byte_rd_en = 1'b1;
assign A_if_a_empty = 1'b0;
assign A_pi_phase_locked = 1;
assign A_pi_dqs_found = 1;
assign A_rclk = 0;
assign A_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign A_pi_counter_read_val = 0;
assign A_po_counter_read_val = 0;
assign A_pi_fine_overflow = 0;
assign A_po_coarse_overflow = 0;
assign A_po_fine_overflow = 0;
end
if ( BYTE_LANES[1] ) begin : ddr_byte_lane_B
assign phy_dout_remap[159:80] = part_select_80(phy_dout, (LANE_REMAP[5:4]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("B"),
.PO_DATA_CTL (PC_DATA_CTL_N[1] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[23:12]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[23:12]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (B_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (B_PI_BURST_MODE),
.PI_CLKOUT_DIV (B_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (B_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (B_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (B_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (B_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (B_PO_CLKOUT_DIV),
.PO_FINE_DELAY (B_PO_FINE_DELAY),
.PO_COARSE_BYPASS (B_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (B_PO_COARSE_DELAY),
.PO_OCLK_DELAY (B_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (B_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (B_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (B_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (B_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (B_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (B_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (B_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_B(
.mem_dq_out (mem_dq_out[23:12]),
.mem_dq_ts (mem_dq_ts[23:12]),
.mem_dq_in (mem_dq_in[19:10]),
.mem_dqs_out (mem_dqs_out[1]),
.mem_dqs_ts (mem_dqs_ts[1]),
.mem_dqs_in (mem_dqs_in[1]),
.rst (B_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (B_ddr_clk),
.rclk (B_rclk),
.pi_dqs_found (B_pi_dqs_found),
.dqs_out_of_range (B_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (B_if_a_empty),
.if_empty (B_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*B_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*B_of_empty*/),
.of_a_full (B_of_a_full),
.of_full (B_of_full),
.pre_fifo_a_full (B_pre_fifo_a_full),
.phy_din (phy_din_remap[159:80]),
.phy_dout (phy_dout_remap[159:80]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (B_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (B_idelay_ce),
.idelay_ld (B_idelay_ld),
.pi_rst_dqs_find (B_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (B_po_fine_enable),
.po_coarse_enable (B_po_coarse_enable),
.po_fine_inc (B_po_fine_inc),
.po_coarse_inc (B_po_coarse_inc),
.po_counter_load_en (B_po_counter_load_en),
.po_counter_read_en (B_po_counter_read_en),
.po_counter_load_val (B_po_counter_load_val),
.po_coarse_overflow (B_po_coarse_overflow),
.po_fine_overflow (B_po_fine_overflow),
.po_counter_read_val (B_po_counter_read_val),
.po_sel_fine_oclk_delay(B_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (B_pi_fine_enable),
.pi_fine_inc (B_pi_fine_inc),
.pi_counter_load_en (B_pi_counter_load_en),
.pi_counter_read_en (B_pi_counter_read_en),
.pi_counter_load_val (B_pi_counter_load_val),
.pi_fine_overflow (B_pi_fine_overflow),
.pi_counter_read_val (B_pi_counter_read_val),
.pi_iserdes_rst (B_pi_iserdes_rst),
.pi_phase_locked (B_pi_phase_locked),
.fine_delay (B_fine_delay),
.fine_delay_sel (B_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_B
assign B_of_a_full = 1'b0;
assign B_of_full = 1'b0;
assign B_pre_fifo_a_full = 1'b0;
assign B_if_empty = 1'b0;
assign B_if_a_empty = 1'b0;
assign B_byte_rd_en = 1'b1;
assign B_pi_phase_locked = 1;
assign B_pi_dqs_found = 1;
assign B_rclk = 0;
assign B_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign B_pi_counter_read_val = 0;
assign B_po_counter_read_val = 0;
assign B_pi_fine_overflow = 0;
assign B_po_coarse_overflow = 0;
assign B_po_fine_overflow = 0;
end
if ( BYTE_LANES[2] ) begin : ddr_byte_lane_C
assign phy_dout_remap[239:160] = part_select_80(phy_dout, (LANE_REMAP[9:8]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("C"),
.PO_DATA_CTL (PC_DATA_CTL_N[2] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[35:24]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[35:24]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (C_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (C_PI_BURST_MODE),
.PI_CLKOUT_DIV (C_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (C_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (C_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (C_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (C_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (C_PO_CLKOUT_DIV),
.PO_FINE_DELAY (C_PO_FINE_DELAY),
.PO_COARSE_BYPASS (C_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (C_PO_COARSE_DELAY),
.PO_OCLK_DELAY (C_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (C_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (C_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (C_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (C_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (C_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (C_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (C_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_C(
.mem_dq_out (mem_dq_out[35:24]),
.mem_dq_ts (mem_dq_ts[35:24]),
.mem_dq_in (mem_dq_in[29:20]),
.mem_dqs_out (mem_dqs_out[2]),
.mem_dqs_ts (mem_dqs_ts[2]),
.mem_dqs_in (mem_dqs_in[2]),
.rst (C_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (C_ddr_clk),
.rclk (C_rclk),
.pi_dqs_found (C_pi_dqs_found),
.dqs_out_of_range (C_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (C_if_a_empty),
.if_empty (C_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*C_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*C_of_empty*/),
.of_a_full (C_of_a_full),
.of_full (C_of_full),
.pre_fifo_a_full (C_pre_fifo_a_full),
.phy_din (phy_din_remap[239:160]),
.phy_dout (phy_dout_remap[239:160]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (C_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (C_idelay_ce),
.idelay_ld (C_idelay_ld),
.pi_rst_dqs_find (C_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (C_po_fine_enable),
.po_coarse_enable (C_po_coarse_enable),
.po_fine_inc (C_po_fine_inc),
.po_coarse_inc (C_po_coarse_inc),
.po_counter_load_en (C_po_counter_load_en),
.po_counter_read_en (C_po_counter_read_en),
.po_counter_load_val (C_po_counter_load_val),
.po_coarse_overflow (C_po_coarse_overflow),
.po_fine_overflow (C_po_fine_overflow),
.po_counter_read_val (C_po_counter_read_val),
.po_sel_fine_oclk_delay(C_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (C_pi_fine_enable),
.pi_fine_inc (C_pi_fine_inc),
.pi_counter_load_en (C_pi_counter_load_en),
.pi_counter_read_en (C_pi_counter_read_en),
.pi_counter_load_val (C_pi_counter_load_val),
.pi_fine_overflow (C_pi_fine_overflow),
.pi_counter_read_val (C_pi_counter_read_val),
.pi_iserdes_rst (C_pi_iserdes_rst),
.pi_phase_locked (C_pi_phase_locked),
.fine_delay (C_fine_delay),
.fine_delay_sel (C_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_C
assign C_of_a_full = 1'b0;
assign C_of_full = 1'b0;
assign C_pre_fifo_a_full = 1'b0;
assign C_if_empty = 1'b0;
assign C_byte_rd_en = 1'b1;
assign C_if_a_empty = 1'b0;
assign C_pi_phase_locked = 1;
assign C_pi_dqs_found = 1;
assign C_rclk = 0;
assign C_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign C_pi_counter_read_val = 0;
assign C_po_counter_read_val = 0;
assign C_pi_fine_overflow = 0;
assign C_po_coarse_overflow = 0;
assign C_po_fine_overflow = 0;
end
if ( BYTE_LANES[3] ) begin : ddr_byte_lane_D
assign phy_dout_remap[319:240] = part_select_80(phy_dout, (LANE_REMAP[13:12]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("D"),
.PO_DATA_CTL (PC_DATA_CTL_N[3] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[47:36]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[47:36]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (D_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (D_PI_BURST_MODE),
.PI_CLKOUT_DIV (D_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (D_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (D_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (D_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (D_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (D_PO_CLKOUT_DIV),
.PO_FINE_DELAY (D_PO_FINE_DELAY),
.PO_COARSE_BYPASS (D_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (D_PO_COARSE_DELAY),
.PO_OCLK_DELAY (D_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (D_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (D_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (D_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (D_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (D_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (D_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (D_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_D(
.mem_dq_out (mem_dq_out[47:36]),
.mem_dq_ts (mem_dq_ts[47:36]),
.mem_dq_in (mem_dq_in[39:30]),
.mem_dqs_out (mem_dqs_out[3]),
.mem_dqs_ts (mem_dqs_ts[3]),
.mem_dqs_in (mem_dqs_in[3]),
.rst (D_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (D_ddr_clk),
.rclk (D_rclk),
.pi_dqs_found (D_pi_dqs_found),
.dqs_out_of_range (D_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (D_if_a_empty),
.if_empty (D_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*D_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*D_of_empty*/),
.of_a_full (D_of_a_full),
.of_full (D_of_full),
.pre_fifo_a_full (D_pre_fifo_a_full),
.phy_din (phy_din_remap[319:240]),
.phy_dout (phy_dout_remap[319:240]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.idelay_inc (idelay_inc),
.idelay_ce (D_idelay_ce),
.idelay_ld (D_idelay_ld),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,C_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (D_byte_rd_en),
// calibration signals
.pi_rst_dqs_find (D_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (D_po_fine_enable),
.po_coarse_enable (D_po_coarse_enable),
.po_fine_inc (D_po_fine_inc),
.po_coarse_inc (D_po_coarse_inc),
.po_counter_load_en (D_po_counter_load_en),
.po_counter_read_en (D_po_counter_read_en),
.po_counter_load_val (D_po_counter_load_val),
.po_coarse_overflow (D_po_coarse_overflow),
.po_fine_overflow (D_po_fine_overflow),
.po_counter_read_val (D_po_counter_read_val),
.po_sel_fine_oclk_delay(D_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (D_pi_fine_enable),
.pi_fine_inc (D_pi_fine_inc),
.pi_counter_load_en (D_pi_counter_load_en),
.pi_counter_read_en (D_pi_counter_read_en),
.pi_counter_load_val (D_pi_counter_load_val),
.pi_fine_overflow (D_pi_fine_overflow),
.pi_counter_read_val (D_pi_counter_read_val),
.pi_iserdes_rst (D_pi_iserdes_rst),
.pi_phase_locked (D_pi_phase_locked),
.fine_delay (D_fine_delay),
.fine_delay_sel (D_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_D
assign D_of_a_full = 1'b0;
assign D_of_full = 1'b0;
assign D_pre_fifo_a_full = 1'b0;
assign D_if_empty = 1'b0;
assign D_byte_rd_en = 1'b1;
assign D_if_a_empty = 1'b0;
assign D_rclk = 0;
assign D_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign D_pi_dqs_found = 1;
assign D_pi_phase_locked = 1;
assign D_pi_counter_read_val = 0;
assign D_po_counter_read_val = 0;
assign D_pi_fine_overflow = 0;
assign D_po_coarse_overflow = 0;
assign D_po_fine_overflow = 0;
end
endgenerate
assign phaser_ctl_bus[MSB_RANK_SEL_I : MSB_RANK_SEL_I - 7] = in_rank;
PHY_CONTROL #(
.AO_WRLVL_EN ( PC_AO_WRLVL_EN),
.AO_TOGGLE ( PC_AO_TOGGLE),
.BURST_MODE ( PC_BURST_MODE),
.CO_DURATION ( PC_CO_DURATION ),
.CLK_RATIO ( PC_CLK_RATIO),
.DATA_CTL_A_N ( PC_DATA_CTL_A),
.DATA_CTL_B_N ( PC_DATA_CTL_B),
.DATA_CTL_C_N ( PC_DATA_CTL_C),
.DATA_CTL_D_N ( PC_DATA_CTL_D),
.DI_DURATION ( PC_DI_DURATION ),
.DO_DURATION ( PC_DO_DURATION ),
.EVENTS_DELAY ( PC_EVENTS_DELAY),
.FOUR_WINDOW_CLOCKS ( PC_FOUR_WINDOW_CLOCKS),
.MULTI_REGION ( PC_MULTI_REGION ),
.PHY_COUNT_ENABLE ( PC_PHY_COUNT_EN),
.DISABLE_SEQ_MATCH ( PC_DISABLE_SEQ_MATCH),
.SYNC_MODE ( PC_SYNC_MODE),
.CMD_OFFSET ( PC_CMD_OFFSET),
.RD_CMD_OFFSET_0 ( PC_RD_CMD_OFFSET_0),
.RD_CMD_OFFSET_1 ( PC_RD_CMD_OFFSET_1),
.RD_CMD_OFFSET_2 ( PC_RD_CMD_OFFSET_2),
.RD_CMD_OFFSET_3 ( PC_RD_CMD_OFFSET_3),
.RD_DURATION_0 ( PC_RD_DURATION_0),
.RD_DURATION_1 ( PC_RD_DURATION_1),
.RD_DURATION_2 ( PC_RD_DURATION_2),
.RD_DURATION_3 ( PC_RD_DURATION_3),
.WR_CMD_OFFSET_0 ( PC_WR_CMD_OFFSET_0),
.WR_CMD_OFFSET_1 ( PC_WR_CMD_OFFSET_1),
.WR_CMD_OFFSET_2 ( PC_WR_CMD_OFFSET_2),
.WR_CMD_OFFSET_3 ( PC_WR_CMD_OFFSET_3),
.WR_DURATION_0 ( PC_WR_DURATION_0),
.WR_DURATION_1 ( PC_WR_DURATION_1),
.WR_DURATION_2 ( PC_WR_DURATION_2),
.WR_DURATION_3 ( PC_WR_DURATION_3)
) phy_control_i (
.AUXOUTPUT (aux_out),
.INBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PI:MSB_BURST_PEND_PI-3]),
.INRANKA (in_rank[1:0]),
.INRANKB (in_rank[3:2]),
.INRANKC (in_rank[5:4]),
.INRANKD (in_rank[7:6]),
.OUTBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PO:MSB_BURST_PEND_PO-3]),
.PCENABLECALIB (phy_encalib),
.PHYCTLALMOSTFULL (phy_ctl_a_full),
.PHYCTLEMPTY (phy_ctl_empty),
.PHYCTLFULL (phy_ctl_full),
.PHYCTLREADY (phy_ctl_ready),
.MEMREFCLK (mem_refclk),
.PHYCLK (phy_ctl_clk),
.PHYCTLMSTREMPTY (phy_ctl_mstr_empty),
.PHYCTLWD (_phy_ctl_wd),
.PHYCTLWRENABLE (phy_ctl_wr),
.PLLLOCK (pll_lock),
.REFDLLLOCK (ref_dll_lock), // is reset while !locked
.RESET (rst),
.SYNCIN (sync_pulse),
.READCALIBENABLE (phy_read_calib),
.WRITECALIBENABLE (phy_write_calib)
`ifdef USE_PHY_CONTROL_TEST
, .TESTINPUT (16'b0),
.TESTOUTPUT (test_output),
.TESTSELECT (test_select),
.SCANENABLEN (scan_enable)
`endif
);
// register outputs to give extra slack in timing
always @(posedge phy_clk ) begin
case (calib_sel[1:0])
2'h0: begin
po_coarse_overflow <= #1 A_po_coarse_overflow;
po_fine_overflow <= #1 A_po_fine_overflow;
po_counter_read_val <= #1 A_po_counter_read_val;
pi_fine_overflow <= #1 A_pi_fine_overflow;
pi_counter_read_val<= #1 A_pi_counter_read_val;
pi_phase_locked <= #1 A_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 A_pi_dqs_found;
pi_dqs_out_of_range <= #1 A_pi_dqs_out_of_range;
end
2'h1: begin
po_coarse_overflow <= #1 B_po_coarse_overflow;
po_fine_overflow <= #1 B_po_fine_overflow;
po_counter_read_val <= #1 B_po_counter_read_val;
pi_fine_overflow <= #1 B_pi_fine_overflow;
pi_counter_read_val <= #1 B_pi_counter_read_val;
pi_phase_locked <= #1 B_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 B_pi_dqs_found;
pi_dqs_out_of_range <= #1 B_pi_dqs_out_of_range;
end
2'h2: begin
po_coarse_overflow <= #1 C_po_coarse_overflow;
po_fine_overflow <= #1 C_po_fine_overflow;
po_counter_read_val <= #1 C_po_counter_read_val;
pi_fine_overflow <= #1 C_pi_fine_overflow;
pi_counter_read_val <= #1 C_pi_counter_read_val;
pi_phase_locked <= #1 C_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 C_pi_dqs_found;
pi_dqs_out_of_range <= #1 C_pi_dqs_out_of_range;
end
2'h3: begin
po_coarse_overflow <= #1 D_po_coarse_overflow;
po_fine_overflow <= #1 D_po_fine_overflow;
po_counter_read_val <= #1 D_po_counter_read_val;
pi_fine_overflow <= #1 D_pi_fine_overflow;
pi_counter_read_val <= #1 D_pi_counter_read_val;
pi_phase_locked <= #1 D_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 D_pi_dqs_found;
pi_dqs_out_of_range <= #1 D_pi_dqs_out_of_range;
end
default: begin
po_coarse_overflow <= po_coarse_overflow;
end
endcase
end
wire B_mux_ctrl;
wire C_mux_ctrl;
wire D_mux_ctrl;
generate
if (HIGHEST_LANE > 1)
assign B_mux_ctrl = ( !calib_zero_lanes[1] && ( ! calib_zero_ctrl || DATA_CTL_N[1]));
else
assign B_mux_ctrl = 0;
if (HIGHEST_LANE > 2)
assign C_mux_ctrl = ( !calib_zero_lanes[2] && (! calib_zero_ctrl || DATA_CTL_N[2]));
else
assign C_mux_ctrl = 0;
if (HIGHEST_LANE > 3)
assign D_mux_ctrl = ( !calib_zero_lanes[3] && ( ! calib_zero_ctrl || DATA_CTL_N[3]));
else
assign D_mux_ctrl = 0;
endgenerate
always @(*) begin
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
if ( calib_sel[2]) begin
// if this is asserted, all calib signals are deasserted
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
end else
if (calib_in_common) begin
// if this is asserted, each signal is broadcast to all phasers
// in common
if ( !calib_zero_lanes[0] && (! calib_zero_ctrl || DATA_CTL_N[0])) begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
if ( B_mux_ctrl) begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
if ( C_mux_ctrl) begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
if ( D_mux_ctrl) begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_read_en = po_counter_read_en;
D_po_counter_load_val = po_counter_load_val;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
end
else begin
// otherwise, only a single phaser is selected
case (calib_sel[1:0])
0: begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
1: begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
2: begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
3: begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_load_val = po_counter_load_val;
D_po_counter_read_en = po_counter_read_en;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
endcase
end
end
//obligatory phaser-ref
PHASER_REF phaser_ref_i(
.LOCKED (ref_dll_lock),
.CLKIN (freq_refclk),
.PWRDWN (1'b0),
.RST ( ! pll_lock)
);
// optional idelay_ctrl
generate
if ( GENERATE_IDELAYCTRL == "TRUE")
IDELAYCTRL idelayctrl (
.RDY (/*idelayctrl_rdy*/),
.REFCLK (idelayctrl_refclk),
.RST (rst)
);
endgenerate
endmodule
|
module mig_7series_v2_3_ddr_phy_4lanes #(
parameter GENERATE_IDELAYCTRL = "TRUE",
parameter IODELAY_GRP = "IODELAY_MIG",
parameter FPGA_SPEED_GRADE = 1,
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter BYTELANES_DDR_CK = 24'b0010_0010_0010_0010_0010_0010,
parameter NUM_DDR_CK = 1,
// next three parameter fields correspond to byte lanes for lane order DCBA
parameter BYTE_LANES = 4'b1111, // lane existence, one per lane
parameter DATA_CTL_N = 4'b1111, // data or control, per lane
parameter BITLANES = 48'hffff_ffff_ffff,
parameter BITLANES_OUTONLY = 48'h0000_0000_0000,
parameter LANE_REMAP = 16'h3210,// 4-bit index
// used to rewire to one of four
// input/output buss lanes
// example: 0321 remaps lanes as:
// D->A
// C->D
// B->C
// A->B
parameter LAST_BANK = "FALSE",
parameter USE_PRE_POST_FIFO = "FALSE",
parameter RCLK_SELECT_LANE = "B",
parameter real TCK = 0.00,
parameter SYNTHESIS = "FALSE",
parameter PO_CTL_COARSE_BYPASS = "FALSE",
parameter PO_FINE_DELAY = 0,
parameter PI_SEL_CLK_OFFSET = 0,
// phy_control paramter used in other paramsters
parameter PC_CLK_RATIO = 4,
//phaser_in parameters
parameter A_PI_FREQ_REF_DIV = "NONE",
parameter A_PI_CLKOUT_DIV = 2,
parameter A_PI_BURST_MODE = "TRUE",
parameter A_PI_OUTPUT_CLK_SRC = "DELAYED_REF" , //"DELAYED_REF",
parameter A_PI_FINE_DELAY = 60,
parameter A_PI_SYNC_IN_DIV_RST = "TRUE",
parameter B_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter B_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter B_PI_BURST_MODE = A_PI_BURST_MODE,
parameter B_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter B_PI_FINE_DELAY = A_PI_FINE_DELAY,
parameter B_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter C_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter C_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter C_PI_BURST_MODE = A_PI_BURST_MODE,
parameter C_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter C_PI_FINE_DELAY = 0,
parameter C_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter D_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter D_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter D_PI_BURST_MODE = A_PI_BURST_MODE,
parameter D_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter D_PI_FINE_DELAY = 0,
parameter D_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
//phaser_out parameters
parameter A_PO_CLKOUT_DIV = (DATA_CTL_N[0] == 0) ? PC_CLK_RATIO : 2,
parameter A_PO_FINE_DELAY = PO_FINE_DELAY,
parameter A_PO_COARSE_DELAY = 0,
parameter A_PO_OCLK_DELAY = 0,
parameter A_PO_OCLKDELAY_INV = "FALSE",
parameter A_PO_OUTPUT_CLK_SRC = "DELAYED_REF",
parameter A_PO_SYNC_IN_DIV_RST = "TRUE",
//parameter A_PO_SYNC_IN_DIV_RST = "FALSE",
parameter B_PO_CLKOUT_DIV = (DATA_CTL_N[1] == 0) ? PC_CLK_RATIO : 2,
parameter B_PO_FINE_DELAY = PO_FINE_DELAY,
parameter B_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter B_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter B_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter B_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter B_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter C_PO_CLKOUT_DIV = (DATA_CTL_N[2] == 0) ? PC_CLK_RATIO : 2,
parameter C_PO_FINE_DELAY = PO_FINE_DELAY,
parameter C_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter C_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter C_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter C_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter C_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter D_PO_CLKOUT_DIV = (DATA_CTL_N[3] == 0) ? PC_CLK_RATIO : 2,
parameter D_PO_FINE_DELAY = PO_FINE_DELAY,
parameter D_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter D_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter D_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter D_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter D_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter A_IDELAYE2_IDELAY_TYPE = "VARIABLE",
parameter A_IDELAYE2_IDELAY_VALUE = 00,
parameter B_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter B_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter C_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter C_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter D_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter D_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
// phy_control parameters
parameter PC_BURST_MODE = "TRUE",
parameter PC_DATA_CTL_N = DATA_CTL_N,
parameter PC_CMD_OFFSET = 0,
parameter PC_RD_CMD_OFFSET_0 = 0,
parameter PC_RD_CMD_OFFSET_1 = 0,
parameter PC_RD_CMD_OFFSET_2 = 0,
parameter PC_RD_CMD_OFFSET_3 = 0,
parameter PC_CO_DURATION = 1,
parameter PC_DI_DURATION = 1,
parameter PC_DO_DURATION = 1,
parameter PC_RD_DURATION_0 = 0,
parameter PC_RD_DURATION_1 = 0,
parameter PC_RD_DURATION_2 = 0,
parameter PC_RD_DURATION_3 = 0,
parameter PC_WR_CMD_OFFSET_0 = 5,
parameter PC_WR_CMD_OFFSET_1 = 5,
parameter PC_WR_CMD_OFFSET_2 = 5,
parameter PC_WR_CMD_OFFSET_3 = 5,
parameter PC_WR_DURATION_0 = 6,
parameter PC_WR_DURATION_1 = 6,
parameter PC_WR_DURATION_2 = 6,
parameter PC_WR_DURATION_3 = 6,
parameter PC_AO_WRLVL_EN = 0,
parameter PC_AO_TOGGLE = 4'b0101, // odd bits are toggle (CKE)
parameter PC_FOUR_WINDOW_CLOCKS = 63,
parameter PC_EVENTS_DELAY = 18,
parameter PC_PHY_COUNT_EN = "TRUE",
parameter PC_SYNC_MODE = "TRUE",
parameter PC_DISABLE_SEQ_MATCH = "TRUE",
parameter PC_MULTI_REGION = "FALSE",
// io fifo parameters
parameter A_OF_ARRAY_MODE = (DATA_CTL_N[0] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter B_OF_ARRAY_MODE = (DATA_CTL_N[1] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter C_OF_ARRAY_MODE = (DATA_CTL_N[2] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter D_OF_ARRAY_MODE = (DATA_CTL_N[3] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter OF_ALMOST_EMPTY_VALUE = 1,
parameter OF_ALMOST_FULL_VALUE = 1,
parameter OF_OUTPUT_DISABLE = "TRUE",
parameter OF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
parameter A_OS_DATA_RATE = "DDR",
parameter A_OS_DATA_WIDTH = 4,
parameter B_OS_DATA_RATE = A_OS_DATA_RATE,
parameter B_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter C_OS_DATA_RATE = A_OS_DATA_RATE,
parameter C_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter D_OS_DATA_RATE = A_OS_DATA_RATE,
parameter D_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter A_IF_ARRAY_MODE = "ARRAY_MODE_4_X_8",
parameter B_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter C_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter D_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter IF_ALMOST_EMPTY_VALUE = 1,
parameter IF_ALMOST_FULL_VALUE = 1,
parameter IF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
// this is used locally, not for external pushdown
// NOTE: the 0+ is needed in each to coerce to integer for addition.
// otherwise 4x 1'b values are added producing a 1'b value.
parameter HIGHEST_LANE = LAST_BANK == "FALSE" ? 4 : (BYTE_LANES[3] ? 4 : BYTE_LANES[2] ? 3 : BYTE_LANES[1] ? 2 : 1),
parameter N_CTL_LANES = ((0+(!DATA_CTL_N[0]) & BYTE_LANES[0]) + (0+(!DATA_CTL_N[1]) & BYTE_LANES[1]) + (0+(!DATA_CTL_N[2]) & BYTE_LANES[2]) + (0+(!DATA_CTL_N[3]) & BYTE_LANES[3])),
parameter N_BYTE_LANES = (0+BYTE_LANES[0]) + (0+BYTE_LANES[1]) + (0+BYTE_LANES[2]) + (0+BYTE_LANES[3]),
parameter N_DATA_LANES = N_BYTE_LANES - N_CTL_LANES,
// assume odt per rank + any declared cke's
parameter AUXOUT_WIDTH = 4,
parameter LP_DDR_CK_WIDTH = 2
,parameter CKE_ODT_AUX = "FALSE"
)
(
//`include "phy.vh"
input rst,
input phy_clk,
input phy_ctl_clk,
input freq_refclk,
input mem_refclk,
input mem_refclk_div4,
input pll_lock,
input sync_pulse,
input idelayctrl_refclk,
input [HIGHEST_LANE*80-1:0] phy_dout,
input phy_cmd_wr_en,
input phy_data_wr_en,
input phy_rd_en,
input phy_ctl_mstr_empty,
input [31:0] phy_ctl_wd,
input [`PC_DATA_OFFSET_RANGE] data_offset,
input phy_ctl_wr,
input if_empty_def,
input phyGo,
input input_sink,
output [(LP_DDR_CK_WIDTH*24)-1:0] ddr_clk, // to memory
output rclk,
output if_a_empty,
output if_empty,
output byte_rd_en,
output if_empty_or,
output if_empty_and,
output of_ctl_a_full,
output of_data_a_full,
output of_ctl_full,
output of_data_full,
output pre_data_a_full,
output [HIGHEST_LANE*80-1:0]phy_din, // assume input bus same size as output bus
output phy_ctl_empty,
output phy_ctl_a_full,
output phy_ctl_full,
output [HIGHEST_LANE*12-1:0]mem_dq_out,
output [HIGHEST_LANE*12-1:0]mem_dq_ts,
input [HIGHEST_LANE*10-1:0]mem_dq_in,
output [HIGHEST_LANE-1:0] mem_dqs_out,
output [HIGHEST_LANE-1:0] mem_dqs_ts,
input [HIGHEST_LANE-1:0] mem_dqs_in,
input [1:0] byte_rd_en_oth_banks,
output [AUXOUT_WIDTH-1:0] aux_out,
output reg rst_out = 0,
output reg mcGo=0,
output phy_ctl_ready,
output ref_dll_lock,
input if_rst,
input phy_read_calib,
input phy_write_calib,
input idelay_inc,
input idelay_ce,
input idelay_ld,
input [2:0] calib_sel,
input calib_zero_ctrl,
input [HIGHEST_LANE-1:0] calib_zero_lanes,
input calib_in_common,
input po_fine_enable,
input po_coarse_enable,
input po_fine_inc,
input po_coarse_inc,
input po_counter_load_en,
input po_counter_read_en,
input [8:0] po_counter_load_val,
input po_sel_fine_oclk_delay,
output reg po_coarse_overflow,
output reg po_fine_overflow,
output reg [8:0] po_counter_read_val,
input pi_rst_dqs_find,
input pi_fine_enable,
input pi_fine_inc,
input pi_counter_load_en,
input pi_counter_read_en,
input [5:0] pi_counter_load_val,
output reg pi_fine_overflow,
output reg [5:0] pi_counter_read_val,
output reg pi_dqs_found,
output pi_dqs_found_all,
output pi_dqs_found_any,
output [HIGHEST_LANE-1:0] pi_phase_locked_lanes,
output [HIGHEST_LANE-1:0] pi_dqs_found_lanes,
output reg pi_dqs_out_of_range,
output reg pi_phase_locked,
output pi_phase_locked_all,
input [29:0] fine_delay,
input fine_delay_sel
);
localparam DATA_CTL_A = (~DATA_CTL_N[0]);
localparam DATA_CTL_B = (~DATA_CTL_N[1]);
localparam DATA_CTL_C = (~DATA_CTL_N[2]);
localparam DATA_CTL_D = (~DATA_CTL_N[3]);
localparam PRESENT_CTL_A = BYTE_LANES[0] && ! DATA_CTL_N[0];
localparam PRESENT_CTL_B = BYTE_LANES[1] && ! DATA_CTL_N[1];
localparam PRESENT_CTL_C = BYTE_LANES[2] && ! DATA_CTL_N[2];
localparam PRESENT_CTL_D = BYTE_LANES[3] && ! DATA_CTL_N[3];
localparam PRESENT_DATA_A = BYTE_LANES[0] && DATA_CTL_N[0];
localparam PRESENT_DATA_B = BYTE_LANES[1] && DATA_CTL_N[1];
localparam PRESENT_DATA_C = BYTE_LANES[2] && DATA_CTL_N[2];
localparam PRESENT_DATA_D = BYTE_LANES[3] && DATA_CTL_N[3];
localparam PC_DATA_CTL_A = (DATA_CTL_A) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_B = (DATA_CTL_B) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_C = (DATA_CTL_C) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_D = (DATA_CTL_D) ? "FALSE" : "TRUE";
localparam A_PO_COARSE_BYPASS = (DATA_CTL_A) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam B_PO_COARSE_BYPASS = (DATA_CTL_B) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam C_PO_COARSE_BYPASS = (DATA_CTL_C) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam D_PO_COARSE_BYPASS = (DATA_CTL_D) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam IO_A_START = 41;
localparam IO_A_END = 40;
localparam IO_B_START = 43;
localparam IO_B_END = 42;
localparam IO_C_START = 45;
localparam IO_C_END = 44;
localparam IO_D_START = 47;
localparam IO_D_END = 46;
localparam IO_A_X_START = (HIGHEST_LANE * 10) + 1;
localparam IO_A_X_END = (IO_A_X_START-1);
localparam IO_B_X_START = (IO_A_X_START + 2);
localparam IO_B_X_END = (IO_B_X_START -1);
localparam IO_C_X_START = (IO_B_X_START + 2);
localparam IO_C_X_END = (IO_C_X_START -1);
localparam IO_D_X_START = (IO_C_X_START + 2);
localparam IO_D_X_END = (IO_D_X_START -1);
localparam MSB_BURST_PEND_PO = 3;
localparam MSB_BURST_PEND_PI = 7;
localparam MSB_RANK_SEL_I = MSB_BURST_PEND_PI + 8;
localparam PHASER_CTL_BUS_WIDTH = MSB_RANK_SEL_I + 1;
wire [1:0] oserdes_dqs;
wire [1:0] oserdes_dqs_ts;
wire [1:0] oserdes_dq_ts;
wire [PHASER_CTL_BUS_WIDTH-1:0] phaser_ctl_bus;
wire [7:0] in_rank;
wire [11:0] IO_A;
wire [11:0] IO_B;
wire [11:0] IO_C;
wire [11:0] IO_D;
wire [319:0] phy_din_remap;
reg A_po_counter_read_en;
wire [8:0] A_po_counter_read_val;
reg A_pi_counter_read_en;
wire [5:0] A_pi_counter_read_val;
wire A_pi_fine_overflow;
wire A_po_coarse_overflow;
wire A_po_fine_overflow;
wire A_pi_dqs_found;
wire A_pi_dqs_out_of_range;
wire A_pi_phase_locked;
wire A_pi_iserdes_rst;
reg A_pi_fine_enable;
reg A_pi_fine_inc;
reg A_pi_counter_load_en;
reg [5:0] A_pi_counter_load_val;
reg A_pi_rst_dqs_find;
reg A_po_fine_enable;
reg A_po_coarse_enable;
reg A_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg A_po_sel_fine_oclk_delay;
reg A_po_coarse_inc;
reg A_po_counter_load_en;
reg [8:0] A_po_counter_load_val;
wire A_rclk;
reg A_idelay_ce;
reg A_idelay_ld;
reg [29:0] A_fine_delay;
reg A_fine_delay_sel;
reg B_po_counter_read_en;
wire [8:0] B_po_counter_read_val;
reg B_pi_counter_read_en;
wire [5:0] B_pi_counter_read_val;
wire B_pi_fine_overflow;
wire B_po_coarse_overflow;
wire B_po_fine_overflow;
wire B_pi_phase_locked;
wire B_pi_iserdes_rst;
wire B_pi_dqs_found;
wire B_pi_dqs_out_of_range;
reg B_pi_fine_enable;
reg B_pi_fine_inc;
reg B_pi_counter_load_en;
reg [5:0] B_pi_counter_load_val;
reg B_pi_rst_dqs_find;
reg B_po_fine_enable;
reg B_po_coarse_enable;
reg B_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg B_po_coarse_inc;
reg B_po_sel_fine_oclk_delay;
reg B_po_counter_load_en;
reg [8:0] B_po_counter_load_val;
wire B_rclk;
reg B_idelay_ce;
reg B_idelay_ld;
reg [29:0] B_fine_delay;
reg B_fine_delay_sel;
reg C_pi_fine_inc;
reg D_pi_fine_inc;
reg C_pi_fine_enable;
reg D_pi_fine_enable;
reg C_po_counter_load_en;
reg D_po_counter_load_en;
reg C_po_coarse_inc;
reg D_po_coarse_inc;
reg C_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg D_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg C_po_sel_fine_oclk_delay;
reg D_po_sel_fine_oclk_delay;
reg [5:0] C_pi_counter_load_val;
reg [5:0] D_pi_counter_load_val;
reg [8:0] C_po_counter_load_val;
reg [8:0] D_po_counter_load_val;
reg C_po_coarse_enable;
reg D_po_coarse_enable;
reg C_po_fine_enable;
reg D_po_fine_enable;
wire C_po_coarse_overflow;
wire D_po_coarse_overflow;
wire C_po_fine_overflow;
wire D_po_fine_overflow;
wire [8:0] C_po_counter_read_val;
wire [8:0] D_po_counter_read_val;
reg C_po_counter_read_en;
reg D_po_counter_read_en;
wire C_pi_dqs_found;
wire D_pi_dqs_found;
wire C_pi_fine_overflow;
wire D_pi_fine_overflow;
reg C_pi_counter_read_en;
reg D_pi_counter_read_en;
reg C_pi_counter_load_en;
reg D_pi_counter_load_en;
wire C_pi_phase_locked;
wire C_pi_iserdes_rst;
wire D_pi_phase_locked;
wire D_pi_iserdes_rst;
wire C_pi_dqs_out_of_range;
wire D_pi_dqs_out_of_range;
wire [5:0] C_pi_counter_read_val;
wire [5:0] D_pi_counter_read_val;
wire C_rclk;
wire D_rclk;
reg C_idelay_ce;
reg D_idelay_ce;
reg C_idelay_ld;
reg D_idelay_ld;
reg C_pi_rst_dqs_find;
reg D_pi_rst_dqs_find;
reg [29:0] C_fine_delay;
reg [29:0] D_fine_delay;
reg C_fine_delay_sel;
reg D_fine_delay_sel;
wire pi_iserdes_rst;
wire A_if_empty;
wire B_if_empty;
wire C_if_empty;
wire D_if_empty;
wire A_byte_rd_en;
wire B_byte_rd_en;
wire C_byte_rd_en;
wire D_byte_rd_en;
wire A_if_a_empty;
wire B_if_a_empty;
wire C_if_a_empty;
wire D_if_a_empty;
//wire A_if_full;
//wire B_if_full;
//wire C_if_full;
//wire D_if_full;
//wire A_of_empty;
//wire B_of_empty;
//wire C_of_empty;
//wire D_of_empty;
wire A_of_full;
wire B_of_full;
wire C_of_full;
wire D_of_full;
wire A_of_ctl_full;
wire B_of_ctl_full;
wire C_of_ctl_full;
wire D_of_ctl_full;
wire A_of_data_full;
wire B_of_data_full;
wire C_of_data_full;
wire D_of_data_full;
wire A_of_a_full;
wire B_of_a_full;
wire C_of_a_full;
wire D_of_a_full;
wire A_pre_fifo_a_full;
wire B_pre_fifo_a_full;
wire C_pre_fifo_a_full;
wire D_pre_fifo_a_full;
wire A_of_ctl_a_full;
wire B_of_ctl_a_full;
wire C_of_ctl_a_full;
wire D_of_ctl_a_full;
wire A_of_data_a_full;
wire B_of_data_a_full;
wire C_of_data_a_full;
wire D_of_data_a_full;
wire A_pre_data_a_full;
wire B_pre_data_a_full;
wire C_pre_data_a_full;
wire D_pre_data_a_full;
wire [LP_DDR_CK_WIDTH*6-1:0] A_ddr_clk; // for generation
wire [LP_DDR_CK_WIDTH*6-1:0] B_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] C_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] D_ddr_clk; //
wire [3:0] dummy_data;
wire [31:0] _phy_ctl_wd;
wire [1:0] phy_encalib;
assign pi_dqs_found_all =
(! PRESENT_DATA_A | A_pi_dqs_found) &
(! PRESENT_DATA_B | B_pi_dqs_found) &
(! PRESENT_DATA_C | C_pi_dqs_found) &
(! PRESENT_DATA_D | D_pi_dqs_found) ;
assign pi_dqs_found_any =
( PRESENT_DATA_A & A_pi_dqs_found) |
( PRESENT_DATA_B & B_pi_dqs_found) |
( PRESENT_DATA_C & C_pi_dqs_found) |
( PRESENT_DATA_D & D_pi_dqs_found) ;
assign pi_phase_locked_all =
(! PRESENT_DATA_A | A_pi_phase_locked) &
(! PRESENT_DATA_B | B_pi_phase_locked) &
(! PRESENT_DATA_C | C_pi_phase_locked) &
(! PRESENT_DATA_D | D_pi_phase_locked);
wire dangling_inputs = (& dummy_data) & input_sink & 1'b0; // this reduces all constant 0 values to 1 signal
// which is combined into another signals such that
// the other signal isn't changed. The purpose
// is to fake the tools into ignoring dangling inputs.
// Because it is anded with 1'b0, the contributing signals
// are folded as constants or trimmed.
assign if_empty = !if_empty_def ? (A_if_empty | B_if_empty | C_if_empty | D_if_empty) : (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign byte_rd_en = !if_empty_def ? (A_byte_rd_en & B_byte_rd_en & C_byte_rd_en & D_byte_rd_en) :
(A_byte_rd_en | B_byte_rd_en | C_byte_rd_en | D_byte_rd_en);
assign if_empty_or = (A_if_empty | B_if_empty | C_if_empty | D_if_empty);
assign if_empty_and = (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign if_a_empty = A_if_a_empty | B_if_a_empty | C_if_a_empty | D_if_a_empty;
//assign if_full = A_if_full | B_if_full | C_if_full | D_if_full ;
//assign of_empty = A_of_empty & B_of_empty & C_of_empty & D_of_empty;
assign of_ctl_full = A_of_ctl_full | B_of_ctl_full | C_of_ctl_full | D_of_ctl_full ;
assign of_data_full = A_of_data_full | B_of_data_full | C_of_data_full | D_of_data_full ;
assign of_ctl_a_full = A_of_ctl_a_full | B_of_ctl_a_full | C_of_ctl_a_full | D_of_ctl_a_full ;
assign of_data_a_full = A_of_data_a_full | B_of_data_a_full | C_of_data_a_full | D_of_data_a_full | dangling_inputs ;
assign pre_data_a_full = A_pre_data_a_full | B_pre_data_a_full | C_pre_data_a_full | D_pre_data_a_full;
function [79:0] part_select_80;
input [319:0] vector;
input [1:0] select;
begin
case (select)
2'b00 : part_select_80[79:0] = vector[1*80-1:0*80];
2'b01 : part_select_80[79:0] = vector[2*80-1:1*80];
2'b10 : part_select_80[79:0] = vector[3*80-1:2*80];
2'b11 : part_select_80[79:0] = vector[4*80-1:3*80];
endcase
end
endfunction
wire [319:0] phy_dout_remap;
reg rst_out_trig = 1'b0;
reg [31:0] rclk_delay;
reg rst_edge1 = 1'b0;
reg rst_edge2 = 1'b0;
reg rst_edge3 = 1'b0;
reg rst_edge_detect = 1'b0;
wire rclk_;
reg rst_out_start = 1'b0 ;
reg rst_primitives=0;
reg A_rst_primitives=0;
reg B_rst_primitives=0;
reg C_rst_primitives=0;
reg D_rst_primitives=0;
`ifdef USE_PHY_CONTROL_TEST
wire [15:0] test_output;
wire [15:0] test_input;
wire [2:0] test_select=0;
wire scan_enable = 0;
`endif
generate
genvar i;
if (RCLK_SELECT_LANE == "A") begin
assign rclk_ = A_rclk;
assign pi_iserdes_rst = A_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "B") begin
assign rclk_ = B_rclk;
assign pi_iserdes_rst = B_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "C") begin
assign rclk_ = C_rclk;
assign pi_iserdes_rst = C_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "D") begin
assign rclk_ = D_rclk;
assign pi_iserdes_rst = D_pi_iserdes_rst;
end
else begin
assign rclk_ = B_rclk; // default
end
endgenerate
assign ddr_clk[LP_DDR_CK_WIDTH*6-1:0] = A_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*12-1:LP_DDR_CK_WIDTH*6] = B_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*18-1:LP_DDR_CK_WIDTH*12] = C_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*24-1:LP_DDR_CK_WIDTH*18] = D_ddr_clk;
assign pi_phase_locked_lanes =
{(! PRESENT_DATA_A[0] | A_pi_phase_locked),
(! PRESENT_DATA_B[0] | B_pi_phase_locked) ,
(! PRESENT_DATA_C[0] | C_pi_phase_locked) ,
(! PRESENT_DATA_D[0] | D_pi_phase_locked)};
assign pi_dqs_found_lanes = {D_pi_dqs_found, C_pi_dqs_found, B_pi_dqs_found, A_pi_dqs_found};
// this block scrubs X from rclk_delay[11]
reg rclk_delay_11;
always @(rclk_delay[11]) begin : rclk_delay_11_blk
if ( rclk_delay[11])
rclk_delay_11 = 1;
else
rclk_delay_11 = 0;
end
always @(posedge phy_clk or posedge rst ) begin
// scrub 4-state values from rclk_delay[11]
if ( rst) begin
rst_out <= #1 0;
end
else begin
if ( rclk_delay_11)
rst_out <= #1 1;
end
end
always @(posedge phy_clk ) begin
// phy_ctl_ready drives reset of the system
rst_primitives <= !phy_ctl_ready ;
A_rst_primitives <= rst_primitives ;
B_rst_primitives <= rst_primitives ;
C_rst_primitives <= rst_primitives ;
D_rst_primitives <= rst_primitives ;
rclk_delay <= #1 (rclk_delay << 1) | (!rst_primitives && phyGo);
mcGo <= #1 rst_out ;
end
generate
if (BYTE_LANES[0]) begin
assign dummy_data[0] = 0;
end
else begin
assign dummy_data[0] = &phy_dout_remap[1*80-1:0*80];
end
if (BYTE_LANES[1]) begin
assign dummy_data[1] = 0;
end
else begin
assign dummy_data[1] = &phy_dout_remap[2*80-1:1*80];
end
if (BYTE_LANES[2]) begin
assign dummy_data[2] = 0;
end
else begin
assign dummy_data[2] = &phy_dout_remap[3*80-1:2*80];
end
if (BYTE_LANES[3]) begin
assign dummy_data[3] = 0;
end
else begin
assign dummy_data[3] = &phy_dout_remap[4*80-1:3*80];
end
if (PRESENT_DATA_A) begin
assign A_of_data_full = A_of_full;
assign A_of_ctl_full = 0;
assign A_of_data_a_full = A_of_a_full;
assign A_of_ctl_a_full = 0;
assign A_pre_data_a_full = A_pre_fifo_a_full;
end
else begin
assign A_of_ctl_full = A_of_full;
assign A_of_data_full = 0;
assign A_of_ctl_a_full = A_of_a_full;
assign A_of_data_a_full = 0;
assign A_pre_data_a_full = 0;
end
if (PRESENT_DATA_B) begin
assign B_of_data_full = B_of_full;
assign B_of_ctl_full = 0;
assign B_of_data_a_full = B_of_a_full;
assign B_of_ctl_a_full = 0;
assign B_pre_data_a_full = B_pre_fifo_a_full;
end
else begin
assign B_of_ctl_full = B_of_full;
assign B_of_data_full = 0;
assign B_of_ctl_a_full = B_of_a_full;
assign B_of_data_a_full = 0;
assign B_pre_data_a_full = 0;
end
if (PRESENT_DATA_C) begin
assign C_of_data_full = C_of_full;
assign C_of_ctl_full = 0;
assign C_of_data_a_full = C_of_a_full;
assign C_of_ctl_a_full = 0;
assign C_pre_data_a_full = C_pre_fifo_a_full;
end
else begin
assign C_of_ctl_full = C_of_full;
assign C_of_data_full = 0;
assign C_of_ctl_a_full = C_of_a_full;
assign C_of_data_a_full = 0;
assign C_pre_data_a_full = 0;
end
if (PRESENT_DATA_D) begin
assign D_of_data_full = D_of_full;
assign D_of_ctl_full = 0;
assign D_of_data_a_full = D_of_a_full;
assign D_of_ctl_a_full = 0;
assign D_pre_data_a_full = D_pre_fifo_a_full;
end
else begin
assign D_of_ctl_full = D_of_full;
assign D_of_data_full = 0;
assign D_of_ctl_a_full = D_of_a_full;
assign D_of_data_a_full = 0;
assign D_pre_data_a_full = 0;
end
// byte lane must exist and be data lane.
if (PRESENT_DATA_A )
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[79:0];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[79:0];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[79:0];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[79:0];
endcase
else
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_B )
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[159:80];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[159:80];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[159:80];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[159:80];
endcase
else
if (HIGHEST_LANE > 1)
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_C)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[239:160];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[239:160];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[239:160];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[239:160];
endcase
else
if (HIGHEST_LANE > 2)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_D )
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[319:240];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[319:240];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[319:240];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[319:240];
endcase
else
if (HIGHEST_LANE > 3)
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (HIGHEST_LANE > 1)
assign _phy_ctl_wd = {phy_ctl_wd[31:23], data_offset, phy_ctl_wd[16:0]};
if (HIGHEST_LANE == 1)
assign _phy_ctl_wd = phy_ctl_wd;
//BUFR #(.BUFR_DIVIDE ("1")) rclk_buf(.I(rclk_), .O(rclk), .CE (1'b1), .CLR (pi_iserdes_rst));
BUFIO rclk_buf(.I(rclk_), .O(rclk) );
if ( BYTE_LANES[0] ) begin : ddr_byte_lane_A
assign phy_dout_remap[79:0] = part_select_80(phy_dout, (LANE_REMAP[1:0]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("A"),
.PO_DATA_CTL (PC_DATA_CTL_N[0] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[11:0]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[11:0]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (A_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (A_PI_BURST_MODE),
.PI_CLKOUT_DIV (A_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (A_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (A_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (A_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (A_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (A_PO_CLKOUT_DIV),
.PO_FINE_DELAY (A_PO_FINE_DELAY),
.PO_COARSE_BYPASS (A_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (A_PO_COARSE_DELAY),
.PO_OCLK_DELAY (A_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (A_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (A_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (A_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (A_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (A_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (A_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (A_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_A(
.mem_dq_out (mem_dq_out[11:0]),
.mem_dq_ts (mem_dq_ts[11:0]),
.mem_dq_in (mem_dq_in[9:0]),
.mem_dqs_out (mem_dqs_out[0]),
.mem_dqs_ts (mem_dqs_ts[0]),
.mem_dqs_in (mem_dqs_in[0]),
.rst (A_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (A_ddr_clk),
.rclk (A_rclk),
.pi_dqs_found (A_pi_dqs_found),
.dqs_out_of_range (A_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (A_if_a_empty),
.if_empty (A_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*A_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*A_of_empty*/),
.of_a_full (A_of_a_full),
.of_full (A_of_full),
.pre_fifo_a_full (A_pre_fifo_a_full),
.phy_din (phy_din_remap[79:0]),
.phy_dout (phy_dout_remap[79:0]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({B_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (A_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (A_idelay_ce),
.idelay_ld (A_idelay_ld),
.pi_rst_dqs_find (A_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (A_po_fine_enable),
.po_coarse_enable (A_po_coarse_enable),
.po_fine_inc (A_po_fine_inc),
.po_coarse_inc (A_po_coarse_inc),
.po_counter_load_en (A_po_counter_load_en),
.po_counter_read_en (A_po_counter_read_en),
.po_counter_load_val (A_po_counter_load_val),
.po_coarse_overflow (A_po_coarse_overflow),
.po_fine_overflow (A_po_fine_overflow),
.po_counter_read_val (A_po_counter_read_val),
.po_sel_fine_oclk_delay(A_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (A_pi_fine_enable),
.pi_fine_inc (A_pi_fine_inc),
.pi_counter_load_en (A_pi_counter_load_en),
.pi_counter_read_en (A_pi_counter_read_en),
.pi_counter_load_val (A_pi_counter_load_val),
.pi_fine_overflow (A_pi_fine_overflow),
.pi_counter_read_val (A_pi_counter_read_val),
.pi_iserdes_rst (A_pi_iserdes_rst),
.pi_phase_locked (A_pi_phase_locked),
.fine_delay (A_fine_delay),
.fine_delay_sel (A_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_A
assign A_of_a_full = 1'b0;
assign A_of_full = 1'b0;
assign A_pre_fifo_a_full = 1'b0;
assign A_if_empty = 1'b0;
assign A_byte_rd_en = 1'b1;
assign A_if_a_empty = 1'b0;
assign A_pi_phase_locked = 1;
assign A_pi_dqs_found = 1;
assign A_rclk = 0;
assign A_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign A_pi_counter_read_val = 0;
assign A_po_counter_read_val = 0;
assign A_pi_fine_overflow = 0;
assign A_po_coarse_overflow = 0;
assign A_po_fine_overflow = 0;
end
if ( BYTE_LANES[1] ) begin : ddr_byte_lane_B
assign phy_dout_remap[159:80] = part_select_80(phy_dout, (LANE_REMAP[5:4]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("B"),
.PO_DATA_CTL (PC_DATA_CTL_N[1] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[23:12]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[23:12]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (B_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (B_PI_BURST_MODE),
.PI_CLKOUT_DIV (B_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (B_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (B_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (B_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (B_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (B_PO_CLKOUT_DIV),
.PO_FINE_DELAY (B_PO_FINE_DELAY),
.PO_COARSE_BYPASS (B_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (B_PO_COARSE_DELAY),
.PO_OCLK_DELAY (B_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (B_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (B_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (B_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (B_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (B_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (B_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (B_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_B(
.mem_dq_out (mem_dq_out[23:12]),
.mem_dq_ts (mem_dq_ts[23:12]),
.mem_dq_in (mem_dq_in[19:10]),
.mem_dqs_out (mem_dqs_out[1]),
.mem_dqs_ts (mem_dqs_ts[1]),
.mem_dqs_in (mem_dqs_in[1]),
.rst (B_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (B_ddr_clk),
.rclk (B_rclk),
.pi_dqs_found (B_pi_dqs_found),
.dqs_out_of_range (B_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (B_if_a_empty),
.if_empty (B_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*B_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*B_of_empty*/),
.of_a_full (B_of_a_full),
.of_full (B_of_full),
.pre_fifo_a_full (B_pre_fifo_a_full),
.phy_din (phy_din_remap[159:80]),
.phy_dout (phy_dout_remap[159:80]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (B_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (B_idelay_ce),
.idelay_ld (B_idelay_ld),
.pi_rst_dqs_find (B_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (B_po_fine_enable),
.po_coarse_enable (B_po_coarse_enable),
.po_fine_inc (B_po_fine_inc),
.po_coarse_inc (B_po_coarse_inc),
.po_counter_load_en (B_po_counter_load_en),
.po_counter_read_en (B_po_counter_read_en),
.po_counter_load_val (B_po_counter_load_val),
.po_coarse_overflow (B_po_coarse_overflow),
.po_fine_overflow (B_po_fine_overflow),
.po_counter_read_val (B_po_counter_read_val),
.po_sel_fine_oclk_delay(B_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (B_pi_fine_enable),
.pi_fine_inc (B_pi_fine_inc),
.pi_counter_load_en (B_pi_counter_load_en),
.pi_counter_read_en (B_pi_counter_read_en),
.pi_counter_load_val (B_pi_counter_load_val),
.pi_fine_overflow (B_pi_fine_overflow),
.pi_counter_read_val (B_pi_counter_read_val),
.pi_iserdes_rst (B_pi_iserdes_rst),
.pi_phase_locked (B_pi_phase_locked),
.fine_delay (B_fine_delay),
.fine_delay_sel (B_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_B
assign B_of_a_full = 1'b0;
assign B_of_full = 1'b0;
assign B_pre_fifo_a_full = 1'b0;
assign B_if_empty = 1'b0;
assign B_if_a_empty = 1'b0;
assign B_byte_rd_en = 1'b1;
assign B_pi_phase_locked = 1;
assign B_pi_dqs_found = 1;
assign B_rclk = 0;
assign B_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign B_pi_counter_read_val = 0;
assign B_po_counter_read_val = 0;
assign B_pi_fine_overflow = 0;
assign B_po_coarse_overflow = 0;
assign B_po_fine_overflow = 0;
end
if ( BYTE_LANES[2] ) begin : ddr_byte_lane_C
assign phy_dout_remap[239:160] = part_select_80(phy_dout, (LANE_REMAP[9:8]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("C"),
.PO_DATA_CTL (PC_DATA_CTL_N[2] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[35:24]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[35:24]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (C_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (C_PI_BURST_MODE),
.PI_CLKOUT_DIV (C_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (C_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (C_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (C_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (C_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (C_PO_CLKOUT_DIV),
.PO_FINE_DELAY (C_PO_FINE_DELAY),
.PO_COARSE_BYPASS (C_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (C_PO_COARSE_DELAY),
.PO_OCLK_DELAY (C_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (C_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (C_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (C_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (C_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (C_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (C_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (C_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_C(
.mem_dq_out (mem_dq_out[35:24]),
.mem_dq_ts (mem_dq_ts[35:24]),
.mem_dq_in (mem_dq_in[29:20]),
.mem_dqs_out (mem_dqs_out[2]),
.mem_dqs_ts (mem_dqs_ts[2]),
.mem_dqs_in (mem_dqs_in[2]),
.rst (C_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (C_ddr_clk),
.rclk (C_rclk),
.pi_dqs_found (C_pi_dqs_found),
.dqs_out_of_range (C_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (C_if_a_empty),
.if_empty (C_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*C_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*C_of_empty*/),
.of_a_full (C_of_a_full),
.of_full (C_of_full),
.pre_fifo_a_full (C_pre_fifo_a_full),
.phy_din (phy_din_remap[239:160]),
.phy_dout (phy_dout_remap[239:160]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (C_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (C_idelay_ce),
.idelay_ld (C_idelay_ld),
.pi_rst_dqs_find (C_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (C_po_fine_enable),
.po_coarse_enable (C_po_coarse_enable),
.po_fine_inc (C_po_fine_inc),
.po_coarse_inc (C_po_coarse_inc),
.po_counter_load_en (C_po_counter_load_en),
.po_counter_read_en (C_po_counter_read_en),
.po_counter_load_val (C_po_counter_load_val),
.po_coarse_overflow (C_po_coarse_overflow),
.po_fine_overflow (C_po_fine_overflow),
.po_counter_read_val (C_po_counter_read_val),
.po_sel_fine_oclk_delay(C_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (C_pi_fine_enable),
.pi_fine_inc (C_pi_fine_inc),
.pi_counter_load_en (C_pi_counter_load_en),
.pi_counter_read_en (C_pi_counter_read_en),
.pi_counter_load_val (C_pi_counter_load_val),
.pi_fine_overflow (C_pi_fine_overflow),
.pi_counter_read_val (C_pi_counter_read_val),
.pi_iserdes_rst (C_pi_iserdes_rst),
.pi_phase_locked (C_pi_phase_locked),
.fine_delay (C_fine_delay),
.fine_delay_sel (C_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_C
assign C_of_a_full = 1'b0;
assign C_of_full = 1'b0;
assign C_pre_fifo_a_full = 1'b0;
assign C_if_empty = 1'b0;
assign C_byte_rd_en = 1'b1;
assign C_if_a_empty = 1'b0;
assign C_pi_phase_locked = 1;
assign C_pi_dqs_found = 1;
assign C_rclk = 0;
assign C_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign C_pi_counter_read_val = 0;
assign C_po_counter_read_val = 0;
assign C_pi_fine_overflow = 0;
assign C_po_coarse_overflow = 0;
assign C_po_fine_overflow = 0;
end
if ( BYTE_LANES[3] ) begin : ddr_byte_lane_D
assign phy_dout_remap[319:240] = part_select_80(phy_dout, (LANE_REMAP[13:12]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("D"),
.PO_DATA_CTL (PC_DATA_CTL_N[3] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[47:36]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[47:36]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (D_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (D_PI_BURST_MODE),
.PI_CLKOUT_DIV (D_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (D_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (D_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (D_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (D_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (D_PO_CLKOUT_DIV),
.PO_FINE_DELAY (D_PO_FINE_DELAY),
.PO_COARSE_BYPASS (D_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (D_PO_COARSE_DELAY),
.PO_OCLK_DELAY (D_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (D_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (D_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (D_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (D_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (D_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (D_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (D_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_D(
.mem_dq_out (mem_dq_out[47:36]),
.mem_dq_ts (mem_dq_ts[47:36]),
.mem_dq_in (mem_dq_in[39:30]),
.mem_dqs_out (mem_dqs_out[3]),
.mem_dqs_ts (mem_dqs_ts[3]),
.mem_dqs_in (mem_dqs_in[3]),
.rst (D_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (D_ddr_clk),
.rclk (D_rclk),
.pi_dqs_found (D_pi_dqs_found),
.dqs_out_of_range (D_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (D_if_a_empty),
.if_empty (D_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*D_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*D_of_empty*/),
.of_a_full (D_of_a_full),
.of_full (D_of_full),
.pre_fifo_a_full (D_pre_fifo_a_full),
.phy_din (phy_din_remap[319:240]),
.phy_dout (phy_dout_remap[319:240]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.idelay_inc (idelay_inc),
.idelay_ce (D_idelay_ce),
.idelay_ld (D_idelay_ld),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,C_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (D_byte_rd_en),
// calibration signals
.pi_rst_dqs_find (D_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (D_po_fine_enable),
.po_coarse_enable (D_po_coarse_enable),
.po_fine_inc (D_po_fine_inc),
.po_coarse_inc (D_po_coarse_inc),
.po_counter_load_en (D_po_counter_load_en),
.po_counter_read_en (D_po_counter_read_en),
.po_counter_load_val (D_po_counter_load_val),
.po_coarse_overflow (D_po_coarse_overflow),
.po_fine_overflow (D_po_fine_overflow),
.po_counter_read_val (D_po_counter_read_val),
.po_sel_fine_oclk_delay(D_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (D_pi_fine_enable),
.pi_fine_inc (D_pi_fine_inc),
.pi_counter_load_en (D_pi_counter_load_en),
.pi_counter_read_en (D_pi_counter_read_en),
.pi_counter_load_val (D_pi_counter_load_val),
.pi_fine_overflow (D_pi_fine_overflow),
.pi_counter_read_val (D_pi_counter_read_val),
.pi_iserdes_rst (D_pi_iserdes_rst),
.pi_phase_locked (D_pi_phase_locked),
.fine_delay (D_fine_delay),
.fine_delay_sel (D_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_D
assign D_of_a_full = 1'b0;
assign D_of_full = 1'b0;
assign D_pre_fifo_a_full = 1'b0;
assign D_if_empty = 1'b0;
assign D_byte_rd_en = 1'b1;
assign D_if_a_empty = 1'b0;
assign D_rclk = 0;
assign D_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign D_pi_dqs_found = 1;
assign D_pi_phase_locked = 1;
assign D_pi_counter_read_val = 0;
assign D_po_counter_read_val = 0;
assign D_pi_fine_overflow = 0;
assign D_po_coarse_overflow = 0;
assign D_po_fine_overflow = 0;
end
endgenerate
assign phaser_ctl_bus[MSB_RANK_SEL_I : MSB_RANK_SEL_I - 7] = in_rank;
PHY_CONTROL #(
.AO_WRLVL_EN ( PC_AO_WRLVL_EN),
.AO_TOGGLE ( PC_AO_TOGGLE),
.BURST_MODE ( PC_BURST_MODE),
.CO_DURATION ( PC_CO_DURATION ),
.CLK_RATIO ( PC_CLK_RATIO),
.DATA_CTL_A_N ( PC_DATA_CTL_A),
.DATA_CTL_B_N ( PC_DATA_CTL_B),
.DATA_CTL_C_N ( PC_DATA_CTL_C),
.DATA_CTL_D_N ( PC_DATA_CTL_D),
.DI_DURATION ( PC_DI_DURATION ),
.DO_DURATION ( PC_DO_DURATION ),
.EVENTS_DELAY ( PC_EVENTS_DELAY),
.FOUR_WINDOW_CLOCKS ( PC_FOUR_WINDOW_CLOCKS),
.MULTI_REGION ( PC_MULTI_REGION ),
.PHY_COUNT_ENABLE ( PC_PHY_COUNT_EN),
.DISABLE_SEQ_MATCH ( PC_DISABLE_SEQ_MATCH),
.SYNC_MODE ( PC_SYNC_MODE),
.CMD_OFFSET ( PC_CMD_OFFSET),
.RD_CMD_OFFSET_0 ( PC_RD_CMD_OFFSET_0),
.RD_CMD_OFFSET_1 ( PC_RD_CMD_OFFSET_1),
.RD_CMD_OFFSET_2 ( PC_RD_CMD_OFFSET_2),
.RD_CMD_OFFSET_3 ( PC_RD_CMD_OFFSET_3),
.RD_DURATION_0 ( PC_RD_DURATION_0),
.RD_DURATION_1 ( PC_RD_DURATION_1),
.RD_DURATION_2 ( PC_RD_DURATION_2),
.RD_DURATION_3 ( PC_RD_DURATION_3),
.WR_CMD_OFFSET_0 ( PC_WR_CMD_OFFSET_0),
.WR_CMD_OFFSET_1 ( PC_WR_CMD_OFFSET_1),
.WR_CMD_OFFSET_2 ( PC_WR_CMD_OFFSET_2),
.WR_CMD_OFFSET_3 ( PC_WR_CMD_OFFSET_3),
.WR_DURATION_0 ( PC_WR_DURATION_0),
.WR_DURATION_1 ( PC_WR_DURATION_1),
.WR_DURATION_2 ( PC_WR_DURATION_2),
.WR_DURATION_3 ( PC_WR_DURATION_3)
) phy_control_i (
.AUXOUTPUT (aux_out),
.INBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PI:MSB_BURST_PEND_PI-3]),
.INRANKA (in_rank[1:0]),
.INRANKB (in_rank[3:2]),
.INRANKC (in_rank[5:4]),
.INRANKD (in_rank[7:6]),
.OUTBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PO:MSB_BURST_PEND_PO-3]),
.PCENABLECALIB (phy_encalib),
.PHYCTLALMOSTFULL (phy_ctl_a_full),
.PHYCTLEMPTY (phy_ctl_empty),
.PHYCTLFULL (phy_ctl_full),
.PHYCTLREADY (phy_ctl_ready),
.MEMREFCLK (mem_refclk),
.PHYCLK (phy_ctl_clk),
.PHYCTLMSTREMPTY (phy_ctl_mstr_empty),
.PHYCTLWD (_phy_ctl_wd),
.PHYCTLWRENABLE (phy_ctl_wr),
.PLLLOCK (pll_lock),
.REFDLLLOCK (ref_dll_lock), // is reset while !locked
.RESET (rst),
.SYNCIN (sync_pulse),
.READCALIBENABLE (phy_read_calib),
.WRITECALIBENABLE (phy_write_calib)
`ifdef USE_PHY_CONTROL_TEST
, .TESTINPUT (16'b0),
.TESTOUTPUT (test_output),
.TESTSELECT (test_select),
.SCANENABLEN (scan_enable)
`endif
);
// register outputs to give extra slack in timing
always @(posedge phy_clk ) begin
case (calib_sel[1:0])
2'h0: begin
po_coarse_overflow <= #1 A_po_coarse_overflow;
po_fine_overflow <= #1 A_po_fine_overflow;
po_counter_read_val <= #1 A_po_counter_read_val;
pi_fine_overflow <= #1 A_pi_fine_overflow;
pi_counter_read_val<= #1 A_pi_counter_read_val;
pi_phase_locked <= #1 A_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 A_pi_dqs_found;
pi_dqs_out_of_range <= #1 A_pi_dqs_out_of_range;
end
2'h1: begin
po_coarse_overflow <= #1 B_po_coarse_overflow;
po_fine_overflow <= #1 B_po_fine_overflow;
po_counter_read_val <= #1 B_po_counter_read_val;
pi_fine_overflow <= #1 B_pi_fine_overflow;
pi_counter_read_val <= #1 B_pi_counter_read_val;
pi_phase_locked <= #1 B_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 B_pi_dqs_found;
pi_dqs_out_of_range <= #1 B_pi_dqs_out_of_range;
end
2'h2: begin
po_coarse_overflow <= #1 C_po_coarse_overflow;
po_fine_overflow <= #1 C_po_fine_overflow;
po_counter_read_val <= #1 C_po_counter_read_val;
pi_fine_overflow <= #1 C_pi_fine_overflow;
pi_counter_read_val <= #1 C_pi_counter_read_val;
pi_phase_locked <= #1 C_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 C_pi_dqs_found;
pi_dqs_out_of_range <= #1 C_pi_dqs_out_of_range;
end
2'h3: begin
po_coarse_overflow <= #1 D_po_coarse_overflow;
po_fine_overflow <= #1 D_po_fine_overflow;
po_counter_read_val <= #1 D_po_counter_read_val;
pi_fine_overflow <= #1 D_pi_fine_overflow;
pi_counter_read_val <= #1 D_pi_counter_read_val;
pi_phase_locked <= #1 D_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 D_pi_dqs_found;
pi_dqs_out_of_range <= #1 D_pi_dqs_out_of_range;
end
default: begin
po_coarse_overflow <= po_coarse_overflow;
end
endcase
end
wire B_mux_ctrl;
wire C_mux_ctrl;
wire D_mux_ctrl;
generate
if (HIGHEST_LANE > 1)
assign B_mux_ctrl = ( !calib_zero_lanes[1] && ( ! calib_zero_ctrl || DATA_CTL_N[1]));
else
assign B_mux_ctrl = 0;
if (HIGHEST_LANE > 2)
assign C_mux_ctrl = ( !calib_zero_lanes[2] && (! calib_zero_ctrl || DATA_CTL_N[2]));
else
assign C_mux_ctrl = 0;
if (HIGHEST_LANE > 3)
assign D_mux_ctrl = ( !calib_zero_lanes[3] && ( ! calib_zero_ctrl || DATA_CTL_N[3]));
else
assign D_mux_ctrl = 0;
endgenerate
always @(*) begin
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
if ( calib_sel[2]) begin
// if this is asserted, all calib signals are deasserted
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
end else
if (calib_in_common) begin
// if this is asserted, each signal is broadcast to all phasers
// in common
if ( !calib_zero_lanes[0] && (! calib_zero_ctrl || DATA_CTL_N[0])) begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
if ( B_mux_ctrl) begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
if ( C_mux_ctrl) begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
if ( D_mux_ctrl) begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_read_en = po_counter_read_en;
D_po_counter_load_val = po_counter_load_val;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
end
else begin
// otherwise, only a single phaser is selected
case (calib_sel[1:0])
0: begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
1: begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
2: begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
3: begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_load_val = po_counter_load_val;
D_po_counter_read_en = po_counter_read_en;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
endcase
end
end
//obligatory phaser-ref
PHASER_REF phaser_ref_i(
.LOCKED (ref_dll_lock),
.CLKIN (freq_refclk),
.PWRDWN (1'b0),
.RST ( ! pll_lock)
);
// optional idelay_ctrl
generate
if ( GENERATE_IDELAYCTRL == "TRUE")
IDELAYCTRL idelayctrl (
.RDY (/*idelayctrl_rdy*/),
.REFCLK (idelayctrl_refclk),
.RST (rst)
);
endgenerate
endmodule
|
module mig_7series_v2_3_ddr_phy_4lanes #(
parameter GENERATE_IDELAYCTRL = "TRUE",
parameter IODELAY_GRP = "IODELAY_MIG",
parameter FPGA_SPEED_GRADE = 1,
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter BYTELANES_DDR_CK = 24'b0010_0010_0010_0010_0010_0010,
parameter NUM_DDR_CK = 1,
// next three parameter fields correspond to byte lanes for lane order DCBA
parameter BYTE_LANES = 4'b1111, // lane existence, one per lane
parameter DATA_CTL_N = 4'b1111, // data or control, per lane
parameter BITLANES = 48'hffff_ffff_ffff,
parameter BITLANES_OUTONLY = 48'h0000_0000_0000,
parameter LANE_REMAP = 16'h3210,// 4-bit index
// used to rewire to one of four
// input/output buss lanes
// example: 0321 remaps lanes as:
// D->A
// C->D
// B->C
// A->B
parameter LAST_BANK = "FALSE",
parameter USE_PRE_POST_FIFO = "FALSE",
parameter RCLK_SELECT_LANE = "B",
parameter real TCK = 0.00,
parameter SYNTHESIS = "FALSE",
parameter PO_CTL_COARSE_BYPASS = "FALSE",
parameter PO_FINE_DELAY = 0,
parameter PI_SEL_CLK_OFFSET = 0,
// phy_control paramter used in other paramsters
parameter PC_CLK_RATIO = 4,
//phaser_in parameters
parameter A_PI_FREQ_REF_DIV = "NONE",
parameter A_PI_CLKOUT_DIV = 2,
parameter A_PI_BURST_MODE = "TRUE",
parameter A_PI_OUTPUT_CLK_SRC = "DELAYED_REF" , //"DELAYED_REF",
parameter A_PI_FINE_DELAY = 60,
parameter A_PI_SYNC_IN_DIV_RST = "TRUE",
parameter B_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter B_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter B_PI_BURST_MODE = A_PI_BURST_MODE,
parameter B_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter B_PI_FINE_DELAY = A_PI_FINE_DELAY,
parameter B_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter C_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter C_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter C_PI_BURST_MODE = A_PI_BURST_MODE,
parameter C_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter C_PI_FINE_DELAY = 0,
parameter C_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter D_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter D_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter D_PI_BURST_MODE = A_PI_BURST_MODE,
parameter D_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter D_PI_FINE_DELAY = 0,
parameter D_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
//phaser_out parameters
parameter A_PO_CLKOUT_DIV = (DATA_CTL_N[0] == 0) ? PC_CLK_RATIO : 2,
parameter A_PO_FINE_DELAY = PO_FINE_DELAY,
parameter A_PO_COARSE_DELAY = 0,
parameter A_PO_OCLK_DELAY = 0,
parameter A_PO_OCLKDELAY_INV = "FALSE",
parameter A_PO_OUTPUT_CLK_SRC = "DELAYED_REF",
parameter A_PO_SYNC_IN_DIV_RST = "TRUE",
//parameter A_PO_SYNC_IN_DIV_RST = "FALSE",
parameter B_PO_CLKOUT_DIV = (DATA_CTL_N[1] == 0) ? PC_CLK_RATIO : 2,
parameter B_PO_FINE_DELAY = PO_FINE_DELAY,
parameter B_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter B_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter B_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter B_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter B_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter C_PO_CLKOUT_DIV = (DATA_CTL_N[2] == 0) ? PC_CLK_RATIO : 2,
parameter C_PO_FINE_DELAY = PO_FINE_DELAY,
parameter C_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter C_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter C_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter C_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter C_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter D_PO_CLKOUT_DIV = (DATA_CTL_N[3] == 0) ? PC_CLK_RATIO : 2,
parameter D_PO_FINE_DELAY = PO_FINE_DELAY,
parameter D_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter D_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter D_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter D_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter D_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter A_IDELAYE2_IDELAY_TYPE = "VARIABLE",
parameter A_IDELAYE2_IDELAY_VALUE = 00,
parameter B_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter B_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter C_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter C_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter D_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter D_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
// phy_control parameters
parameter PC_BURST_MODE = "TRUE",
parameter PC_DATA_CTL_N = DATA_CTL_N,
parameter PC_CMD_OFFSET = 0,
parameter PC_RD_CMD_OFFSET_0 = 0,
parameter PC_RD_CMD_OFFSET_1 = 0,
parameter PC_RD_CMD_OFFSET_2 = 0,
parameter PC_RD_CMD_OFFSET_3 = 0,
parameter PC_CO_DURATION = 1,
parameter PC_DI_DURATION = 1,
parameter PC_DO_DURATION = 1,
parameter PC_RD_DURATION_0 = 0,
parameter PC_RD_DURATION_1 = 0,
parameter PC_RD_DURATION_2 = 0,
parameter PC_RD_DURATION_3 = 0,
parameter PC_WR_CMD_OFFSET_0 = 5,
parameter PC_WR_CMD_OFFSET_1 = 5,
parameter PC_WR_CMD_OFFSET_2 = 5,
parameter PC_WR_CMD_OFFSET_3 = 5,
parameter PC_WR_DURATION_0 = 6,
parameter PC_WR_DURATION_1 = 6,
parameter PC_WR_DURATION_2 = 6,
parameter PC_WR_DURATION_3 = 6,
parameter PC_AO_WRLVL_EN = 0,
parameter PC_AO_TOGGLE = 4'b0101, // odd bits are toggle (CKE)
parameter PC_FOUR_WINDOW_CLOCKS = 63,
parameter PC_EVENTS_DELAY = 18,
parameter PC_PHY_COUNT_EN = "TRUE",
parameter PC_SYNC_MODE = "TRUE",
parameter PC_DISABLE_SEQ_MATCH = "TRUE",
parameter PC_MULTI_REGION = "FALSE",
// io fifo parameters
parameter A_OF_ARRAY_MODE = (DATA_CTL_N[0] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter B_OF_ARRAY_MODE = (DATA_CTL_N[1] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter C_OF_ARRAY_MODE = (DATA_CTL_N[2] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter D_OF_ARRAY_MODE = (DATA_CTL_N[3] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter OF_ALMOST_EMPTY_VALUE = 1,
parameter OF_ALMOST_FULL_VALUE = 1,
parameter OF_OUTPUT_DISABLE = "TRUE",
parameter OF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
parameter A_OS_DATA_RATE = "DDR",
parameter A_OS_DATA_WIDTH = 4,
parameter B_OS_DATA_RATE = A_OS_DATA_RATE,
parameter B_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter C_OS_DATA_RATE = A_OS_DATA_RATE,
parameter C_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter D_OS_DATA_RATE = A_OS_DATA_RATE,
parameter D_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter A_IF_ARRAY_MODE = "ARRAY_MODE_4_X_8",
parameter B_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter C_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter D_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter IF_ALMOST_EMPTY_VALUE = 1,
parameter IF_ALMOST_FULL_VALUE = 1,
parameter IF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
// this is used locally, not for external pushdown
// NOTE: the 0+ is needed in each to coerce to integer for addition.
// otherwise 4x 1'b values are added producing a 1'b value.
parameter HIGHEST_LANE = LAST_BANK == "FALSE" ? 4 : (BYTE_LANES[3] ? 4 : BYTE_LANES[2] ? 3 : BYTE_LANES[1] ? 2 : 1),
parameter N_CTL_LANES = ((0+(!DATA_CTL_N[0]) & BYTE_LANES[0]) + (0+(!DATA_CTL_N[1]) & BYTE_LANES[1]) + (0+(!DATA_CTL_N[2]) & BYTE_LANES[2]) + (0+(!DATA_CTL_N[3]) & BYTE_LANES[3])),
parameter N_BYTE_LANES = (0+BYTE_LANES[0]) + (0+BYTE_LANES[1]) + (0+BYTE_LANES[2]) + (0+BYTE_LANES[3]),
parameter N_DATA_LANES = N_BYTE_LANES - N_CTL_LANES,
// assume odt per rank + any declared cke's
parameter AUXOUT_WIDTH = 4,
parameter LP_DDR_CK_WIDTH = 2
,parameter CKE_ODT_AUX = "FALSE"
)
(
//`include "phy.vh"
input rst,
input phy_clk,
input phy_ctl_clk,
input freq_refclk,
input mem_refclk,
input mem_refclk_div4,
input pll_lock,
input sync_pulse,
input idelayctrl_refclk,
input [HIGHEST_LANE*80-1:0] phy_dout,
input phy_cmd_wr_en,
input phy_data_wr_en,
input phy_rd_en,
input phy_ctl_mstr_empty,
input [31:0] phy_ctl_wd,
input [`PC_DATA_OFFSET_RANGE] data_offset,
input phy_ctl_wr,
input if_empty_def,
input phyGo,
input input_sink,
output [(LP_DDR_CK_WIDTH*24)-1:0] ddr_clk, // to memory
output rclk,
output if_a_empty,
output if_empty,
output byte_rd_en,
output if_empty_or,
output if_empty_and,
output of_ctl_a_full,
output of_data_a_full,
output of_ctl_full,
output of_data_full,
output pre_data_a_full,
output [HIGHEST_LANE*80-1:0]phy_din, // assume input bus same size as output bus
output phy_ctl_empty,
output phy_ctl_a_full,
output phy_ctl_full,
output [HIGHEST_LANE*12-1:0]mem_dq_out,
output [HIGHEST_LANE*12-1:0]mem_dq_ts,
input [HIGHEST_LANE*10-1:0]mem_dq_in,
output [HIGHEST_LANE-1:0] mem_dqs_out,
output [HIGHEST_LANE-1:0] mem_dqs_ts,
input [HIGHEST_LANE-1:0] mem_dqs_in,
input [1:0] byte_rd_en_oth_banks,
output [AUXOUT_WIDTH-1:0] aux_out,
output reg rst_out = 0,
output reg mcGo=0,
output phy_ctl_ready,
output ref_dll_lock,
input if_rst,
input phy_read_calib,
input phy_write_calib,
input idelay_inc,
input idelay_ce,
input idelay_ld,
input [2:0] calib_sel,
input calib_zero_ctrl,
input [HIGHEST_LANE-1:0] calib_zero_lanes,
input calib_in_common,
input po_fine_enable,
input po_coarse_enable,
input po_fine_inc,
input po_coarse_inc,
input po_counter_load_en,
input po_counter_read_en,
input [8:0] po_counter_load_val,
input po_sel_fine_oclk_delay,
output reg po_coarse_overflow,
output reg po_fine_overflow,
output reg [8:0] po_counter_read_val,
input pi_rst_dqs_find,
input pi_fine_enable,
input pi_fine_inc,
input pi_counter_load_en,
input pi_counter_read_en,
input [5:0] pi_counter_load_val,
output reg pi_fine_overflow,
output reg [5:0] pi_counter_read_val,
output reg pi_dqs_found,
output pi_dqs_found_all,
output pi_dqs_found_any,
output [HIGHEST_LANE-1:0] pi_phase_locked_lanes,
output [HIGHEST_LANE-1:0] pi_dqs_found_lanes,
output reg pi_dqs_out_of_range,
output reg pi_phase_locked,
output pi_phase_locked_all,
input [29:0] fine_delay,
input fine_delay_sel
);
localparam DATA_CTL_A = (~DATA_CTL_N[0]);
localparam DATA_CTL_B = (~DATA_CTL_N[1]);
localparam DATA_CTL_C = (~DATA_CTL_N[2]);
localparam DATA_CTL_D = (~DATA_CTL_N[3]);
localparam PRESENT_CTL_A = BYTE_LANES[0] && ! DATA_CTL_N[0];
localparam PRESENT_CTL_B = BYTE_LANES[1] && ! DATA_CTL_N[1];
localparam PRESENT_CTL_C = BYTE_LANES[2] && ! DATA_CTL_N[2];
localparam PRESENT_CTL_D = BYTE_LANES[3] && ! DATA_CTL_N[3];
localparam PRESENT_DATA_A = BYTE_LANES[0] && DATA_CTL_N[0];
localparam PRESENT_DATA_B = BYTE_LANES[1] && DATA_CTL_N[1];
localparam PRESENT_DATA_C = BYTE_LANES[2] && DATA_CTL_N[2];
localparam PRESENT_DATA_D = BYTE_LANES[3] && DATA_CTL_N[3];
localparam PC_DATA_CTL_A = (DATA_CTL_A) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_B = (DATA_CTL_B) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_C = (DATA_CTL_C) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_D = (DATA_CTL_D) ? "FALSE" : "TRUE";
localparam A_PO_COARSE_BYPASS = (DATA_CTL_A) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam B_PO_COARSE_BYPASS = (DATA_CTL_B) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam C_PO_COARSE_BYPASS = (DATA_CTL_C) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam D_PO_COARSE_BYPASS = (DATA_CTL_D) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam IO_A_START = 41;
localparam IO_A_END = 40;
localparam IO_B_START = 43;
localparam IO_B_END = 42;
localparam IO_C_START = 45;
localparam IO_C_END = 44;
localparam IO_D_START = 47;
localparam IO_D_END = 46;
localparam IO_A_X_START = (HIGHEST_LANE * 10) + 1;
localparam IO_A_X_END = (IO_A_X_START-1);
localparam IO_B_X_START = (IO_A_X_START + 2);
localparam IO_B_X_END = (IO_B_X_START -1);
localparam IO_C_X_START = (IO_B_X_START + 2);
localparam IO_C_X_END = (IO_C_X_START -1);
localparam IO_D_X_START = (IO_C_X_START + 2);
localparam IO_D_X_END = (IO_D_X_START -1);
localparam MSB_BURST_PEND_PO = 3;
localparam MSB_BURST_PEND_PI = 7;
localparam MSB_RANK_SEL_I = MSB_BURST_PEND_PI + 8;
localparam PHASER_CTL_BUS_WIDTH = MSB_RANK_SEL_I + 1;
wire [1:0] oserdes_dqs;
wire [1:0] oserdes_dqs_ts;
wire [1:0] oserdes_dq_ts;
wire [PHASER_CTL_BUS_WIDTH-1:0] phaser_ctl_bus;
wire [7:0] in_rank;
wire [11:0] IO_A;
wire [11:0] IO_B;
wire [11:0] IO_C;
wire [11:0] IO_D;
wire [319:0] phy_din_remap;
reg A_po_counter_read_en;
wire [8:0] A_po_counter_read_val;
reg A_pi_counter_read_en;
wire [5:0] A_pi_counter_read_val;
wire A_pi_fine_overflow;
wire A_po_coarse_overflow;
wire A_po_fine_overflow;
wire A_pi_dqs_found;
wire A_pi_dqs_out_of_range;
wire A_pi_phase_locked;
wire A_pi_iserdes_rst;
reg A_pi_fine_enable;
reg A_pi_fine_inc;
reg A_pi_counter_load_en;
reg [5:0] A_pi_counter_load_val;
reg A_pi_rst_dqs_find;
reg A_po_fine_enable;
reg A_po_coarse_enable;
reg A_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg A_po_sel_fine_oclk_delay;
reg A_po_coarse_inc;
reg A_po_counter_load_en;
reg [8:0] A_po_counter_load_val;
wire A_rclk;
reg A_idelay_ce;
reg A_idelay_ld;
reg [29:0] A_fine_delay;
reg A_fine_delay_sel;
reg B_po_counter_read_en;
wire [8:0] B_po_counter_read_val;
reg B_pi_counter_read_en;
wire [5:0] B_pi_counter_read_val;
wire B_pi_fine_overflow;
wire B_po_coarse_overflow;
wire B_po_fine_overflow;
wire B_pi_phase_locked;
wire B_pi_iserdes_rst;
wire B_pi_dqs_found;
wire B_pi_dqs_out_of_range;
reg B_pi_fine_enable;
reg B_pi_fine_inc;
reg B_pi_counter_load_en;
reg [5:0] B_pi_counter_load_val;
reg B_pi_rst_dqs_find;
reg B_po_fine_enable;
reg B_po_coarse_enable;
reg B_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg B_po_coarse_inc;
reg B_po_sel_fine_oclk_delay;
reg B_po_counter_load_en;
reg [8:0] B_po_counter_load_val;
wire B_rclk;
reg B_idelay_ce;
reg B_idelay_ld;
reg [29:0] B_fine_delay;
reg B_fine_delay_sel;
reg C_pi_fine_inc;
reg D_pi_fine_inc;
reg C_pi_fine_enable;
reg D_pi_fine_enable;
reg C_po_counter_load_en;
reg D_po_counter_load_en;
reg C_po_coarse_inc;
reg D_po_coarse_inc;
reg C_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg D_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg C_po_sel_fine_oclk_delay;
reg D_po_sel_fine_oclk_delay;
reg [5:0] C_pi_counter_load_val;
reg [5:0] D_pi_counter_load_val;
reg [8:0] C_po_counter_load_val;
reg [8:0] D_po_counter_load_val;
reg C_po_coarse_enable;
reg D_po_coarse_enable;
reg C_po_fine_enable;
reg D_po_fine_enable;
wire C_po_coarse_overflow;
wire D_po_coarse_overflow;
wire C_po_fine_overflow;
wire D_po_fine_overflow;
wire [8:0] C_po_counter_read_val;
wire [8:0] D_po_counter_read_val;
reg C_po_counter_read_en;
reg D_po_counter_read_en;
wire C_pi_dqs_found;
wire D_pi_dqs_found;
wire C_pi_fine_overflow;
wire D_pi_fine_overflow;
reg C_pi_counter_read_en;
reg D_pi_counter_read_en;
reg C_pi_counter_load_en;
reg D_pi_counter_load_en;
wire C_pi_phase_locked;
wire C_pi_iserdes_rst;
wire D_pi_phase_locked;
wire D_pi_iserdes_rst;
wire C_pi_dqs_out_of_range;
wire D_pi_dqs_out_of_range;
wire [5:0] C_pi_counter_read_val;
wire [5:0] D_pi_counter_read_val;
wire C_rclk;
wire D_rclk;
reg C_idelay_ce;
reg D_idelay_ce;
reg C_idelay_ld;
reg D_idelay_ld;
reg C_pi_rst_dqs_find;
reg D_pi_rst_dqs_find;
reg [29:0] C_fine_delay;
reg [29:0] D_fine_delay;
reg C_fine_delay_sel;
reg D_fine_delay_sel;
wire pi_iserdes_rst;
wire A_if_empty;
wire B_if_empty;
wire C_if_empty;
wire D_if_empty;
wire A_byte_rd_en;
wire B_byte_rd_en;
wire C_byte_rd_en;
wire D_byte_rd_en;
wire A_if_a_empty;
wire B_if_a_empty;
wire C_if_a_empty;
wire D_if_a_empty;
//wire A_if_full;
//wire B_if_full;
//wire C_if_full;
//wire D_if_full;
//wire A_of_empty;
//wire B_of_empty;
//wire C_of_empty;
//wire D_of_empty;
wire A_of_full;
wire B_of_full;
wire C_of_full;
wire D_of_full;
wire A_of_ctl_full;
wire B_of_ctl_full;
wire C_of_ctl_full;
wire D_of_ctl_full;
wire A_of_data_full;
wire B_of_data_full;
wire C_of_data_full;
wire D_of_data_full;
wire A_of_a_full;
wire B_of_a_full;
wire C_of_a_full;
wire D_of_a_full;
wire A_pre_fifo_a_full;
wire B_pre_fifo_a_full;
wire C_pre_fifo_a_full;
wire D_pre_fifo_a_full;
wire A_of_ctl_a_full;
wire B_of_ctl_a_full;
wire C_of_ctl_a_full;
wire D_of_ctl_a_full;
wire A_of_data_a_full;
wire B_of_data_a_full;
wire C_of_data_a_full;
wire D_of_data_a_full;
wire A_pre_data_a_full;
wire B_pre_data_a_full;
wire C_pre_data_a_full;
wire D_pre_data_a_full;
wire [LP_DDR_CK_WIDTH*6-1:0] A_ddr_clk; // for generation
wire [LP_DDR_CK_WIDTH*6-1:0] B_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] C_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] D_ddr_clk; //
wire [3:0] dummy_data;
wire [31:0] _phy_ctl_wd;
wire [1:0] phy_encalib;
assign pi_dqs_found_all =
(! PRESENT_DATA_A | A_pi_dqs_found) &
(! PRESENT_DATA_B | B_pi_dqs_found) &
(! PRESENT_DATA_C | C_pi_dqs_found) &
(! PRESENT_DATA_D | D_pi_dqs_found) ;
assign pi_dqs_found_any =
( PRESENT_DATA_A & A_pi_dqs_found) |
( PRESENT_DATA_B & B_pi_dqs_found) |
( PRESENT_DATA_C & C_pi_dqs_found) |
( PRESENT_DATA_D & D_pi_dqs_found) ;
assign pi_phase_locked_all =
(! PRESENT_DATA_A | A_pi_phase_locked) &
(! PRESENT_DATA_B | B_pi_phase_locked) &
(! PRESENT_DATA_C | C_pi_phase_locked) &
(! PRESENT_DATA_D | D_pi_phase_locked);
wire dangling_inputs = (& dummy_data) & input_sink & 1'b0; // this reduces all constant 0 values to 1 signal
// which is combined into another signals such that
// the other signal isn't changed. The purpose
// is to fake the tools into ignoring dangling inputs.
// Because it is anded with 1'b0, the contributing signals
// are folded as constants or trimmed.
assign if_empty = !if_empty_def ? (A_if_empty | B_if_empty | C_if_empty | D_if_empty) : (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign byte_rd_en = !if_empty_def ? (A_byte_rd_en & B_byte_rd_en & C_byte_rd_en & D_byte_rd_en) :
(A_byte_rd_en | B_byte_rd_en | C_byte_rd_en | D_byte_rd_en);
assign if_empty_or = (A_if_empty | B_if_empty | C_if_empty | D_if_empty);
assign if_empty_and = (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign if_a_empty = A_if_a_empty | B_if_a_empty | C_if_a_empty | D_if_a_empty;
//assign if_full = A_if_full | B_if_full | C_if_full | D_if_full ;
//assign of_empty = A_of_empty & B_of_empty & C_of_empty & D_of_empty;
assign of_ctl_full = A_of_ctl_full | B_of_ctl_full | C_of_ctl_full | D_of_ctl_full ;
assign of_data_full = A_of_data_full | B_of_data_full | C_of_data_full | D_of_data_full ;
assign of_ctl_a_full = A_of_ctl_a_full | B_of_ctl_a_full | C_of_ctl_a_full | D_of_ctl_a_full ;
assign of_data_a_full = A_of_data_a_full | B_of_data_a_full | C_of_data_a_full | D_of_data_a_full | dangling_inputs ;
assign pre_data_a_full = A_pre_data_a_full | B_pre_data_a_full | C_pre_data_a_full | D_pre_data_a_full;
function [79:0] part_select_80;
input [319:0] vector;
input [1:0] select;
begin
case (select)
2'b00 : part_select_80[79:0] = vector[1*80-1:0*80];
2'b01 : part_select_80[79:0] = vector[2*80-1:1*80];
2'b10 : part_select_80[79:0] = vector[3*80-1:2*80];
2'b11 : part_select_80[79:0] = vector[4*80-1:3*80];
endcase
end
endfunction
wire [319:0] phy_dout_remap;
reg rst_out_trig = 1'b0;
reg [31:0] rclk_delay;
reg rst_edge1 = 1'b0;
reg rst_edge2 = 1'b0;
reg rst_edge3 = 1'b0;
reg rst_edge_detect = 1'b0;
wire rclk_;
reg rst_out_start = 1'b0 ;
reg rst_primitives=0;
reg A_rst_primitives=0;
reg B_rst_primitives=0;
reg C_rst_primitives=0;
reg D_rst_primitives=0;
`ifdef USE_PHY_CONTROL_TEST
wire [15:0] test_output;
wire [15:0] test_input;
wire [2:0] test_select=0;
wire scan_enable = 0;
`endif
generate
genvar i;
if (RCLK_SELECT_LANE == "A") begin
assign rclk_ = A_rclk;
assign pi_iserdes_rst = A_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "B") begin
assign rclk_ = B_rclk;
assign pi_iserdes_rst = B_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "C") begin
assign rclk_ = C_rclk;
assign pi_iserdes_rst = C_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "D") begin
assign rclk_ = D_rclk;
assign pi_iserdes_rst = D_pi_iserdes_rst;
end
else begin
assign rclk_ = B_rclk; // default
end
endgenerate
assign ddr_clk[LP_DDR_CK_WIDTH*6-1:0] = A_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*12-1:LP_DDR_CK_WIDTH*6] = B_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*18-1:LP_DDR_CK_WIDTH*12] = C_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*24-1:LP_DDR_CK_WIDTH*18] = D_ddr_clk;
assign pi_phase_locked_lanes =
{(! PRESENT_DATA_A[0] | A_pi_phase_locked),
(! PRESENT_DATA_B[0] | B_pi_phase_locked) ,
(! PRESENT_DATA_C[0] | C_pi_phase_locked) ,
(! PRESENT_DATA_D[0] | D_pi_phase_locked)};
assign pi_dqs_found_lanes = {D_pi_dqs_found, C_pi_dqs_found, B_pi_dqs_found, A_pi_dqs_found};
// this block scrubs X from rclk_delay[11]
reg rclk_delay_11;
always @(rclk_delay[11]) begin : rclk_delay_11_blk
if ( rclk_delay[11])
rclk_delay_11 = 1;
else
rclk_delay_11 = 0;
end
always @(posedge phy_clk or posedge rst ) begin
// scrub 4-state values from rclk_delay[11]
if ( rst) begin
rst_out <= #1 0;
end
else begin
if ( rclk_delay_11)
rst_out <= #1 1;
end
end
always @(posedge phy_clk ) begin
// phy_ctl_ready drives reset of the system
rst_primitives <= !phy_ctl_ready ;
A_rst_primitives <= rst_primitives ;
B_rst_primitives <= rst_primitives ;
C_rst_primitives <= rst_primitives ;
D_rst_primitives <= rst_primitives ;
rclk_delay <= #1 (rclk_delay << 1) | (!rst_primitives && phyGo);
mcGo <= #1 rst_out ;
end
generate
if (BYTE_LANES[0]) begin
assign dummy_data[0] = 0;
end
else begin
assign dummy_data[0] = &phy_dout_remap[1*80-1:0*80];
end
if (BYTE_LANES[1]) begin
assign dummy_data[1] = 0;
end
else begin
assign dummy_data[1] = &phy_dout_remap[2*80-1:1*80];
end
if (BYTE_LANES[2]) begin
assign dummy_data[2] = 0;
end
else begin
assign dummy_data[2] = &phy_dout_remap[3*80-1:2*80];
end
if (BYTE_LANES[3]) begin
assign dummy_data[3] = 0;
end
else begin
assign dummy_data[3] = &phy_dout_remap[4*80-1:3*80];
end
if (PRESENT_DATA_A) begin
assign A_of_data_full = A_of_full;
assign A_of_ctl_full = 0;
assign A_of_data_a_full = A_of_a_full;
assign A_of_ctl_a_full = 0;
assign A_pre_data_a_full = A_pre_fifo_a_full;
end
else begin
assign A_of_ctl_full = A_of_full;
assign A_of_data_full = 0;
assign A_of_ctl_a_full = A_of_a_full;
assign A_of_data_a_full = 0;
assign A_pre_data_a_full = 0;
end
if (PRESENT_DATA_B) begin
assign B_of_data_full = B_of_full;
assign B_of_ctl_full = 0;
assign B_of_data_a_full = B_of_a_full;
assign B_of_ctl_a_full = 0;
assign B_pre_data_a_full = B_pre_fifo_a_full;
end
else begin
assign B_of_ctl_full = B_of_full;
assign B_of_data_full = 0;
assign B_of_ctl_a_full = B_of_a_full;
assign B_of_data_a_full = 0;
assign B_pre_data_a_full = 0;
end
if (PRESENT_DATA_C) begin
assign C_of_data_full = C_of_full;
assign C_of_ctl_full = 0;
assign C_of_data_a_full = C_of_a_full;
assign C_of_ctl_a_full = 0;
assign C_pre_data_a_full = C_pre_fifo_a_full;
end
else begin
assign C_of_ctl_full = C_of_full;
assign C_of_data_full = 0;
assign C_of_ctl_a_full = C_of_a_full;
assign C_of_data_a_full = 0;
assign C_pre_data_a_full = 0;
end
if (PRESENT_DATA_D) begin
assign D_of_data_full = D_of_full;
assign D_of_ctl_full = 0;
assign D_of_data_a_full = D_of_a_full;
assign D_of_ctl_a_full = 0;
assign D_pre_data_a_full = D_pre_fifo_a_full;
end
else begin
assign D_of_ctl_full = D_of_full;
assign D_of_data_full = 0;
assign D_of_ctl_a_full = D_of_a_full;
assign D_of_data_a_full = 0;
assign D_pre_data_a_full = 0;
end
// byte lane must exist and be data lane.
if (PRESENT_DATA_A )
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[79:0];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[79:0];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[79:0];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[79:0];
endcase
else
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_B )
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[159:80];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[159:80];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[159:80];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[159:80];
endcase
else
if (HIGHEST_LANE > 1)
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_C)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[239:160];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[239:160];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[239:160];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[239:160];
endcase
else
if (HIGHEST_LANE > 2)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_D )
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[319:240];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[319:240];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[319:240];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[319:240];
endcase
else
if (HIGHEST_LANE > 3)
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (HIGHEST_LANE > 1)
assign _phy_ctl_wd = {phy_ctl_wd[31:23], data_offset, phy_ctl_wd[16:0]};
if (HIGHEST_LANE == 1)
assign _phy_ctl_wd = phy_ctl_wd;
//BUFR #(.BUFR_DIVIDE ("1")) rclk_buf(.I(rclk_), .O(rclk), .CE (1'b1), .CLR (pi_iserdes_rst));
BUFIO rclk_buf(.I(rclk_), .O(rclk) );
if ( BYTE_LANES[0] ) begin : ddr_byte_lane_A
assign phy_dout_remap[79:0] = part_select_80(phy_dout, (LANE_REMAP[1:0]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("A"),
.PO_DATA_CTL (PC_DATA_CTL_N[0] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[11:0]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[11:0]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (A_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (A_PI_BURST_MODE),
.PI_CLKOUT_DIV (A_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (A_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (A_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (A_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (A_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (A_PO_CLKOUT_DIV),
.PO_FINE_DELAY (A_PO_FINE_DELAY),
.PO_COARSE_BYPASS (A_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (A_PO_COARSE_DELAY),
.PO_OCLK_DELAY (A_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (A_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (A_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (A_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (A_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (A_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (A_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (A_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_A(
.mem_dq_out (mem_dq_out[11:0]),
.mem_dq_ts (mem_dq_ts[11:0]),
.mem_dq_in (mem_dq_in[9:0]),
.mem_dqs_out (mem_dqs_out[0]),
.mem_dqs_ts (mem_dqs_ts[0]),
.mem_dqs_in (mem_dqs_in[0]),
.rst (A_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (A_ddr_clk),
.rclk (A_rclk),
.pi_dqs_found (A_pi_dqs_found),
.dqs_out_of_range (A_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (A_if_a_empty),
.if_empty (A_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*A_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*A_of_empty*/),
.of_a_full (A_of_a_full),
.of_full (A_of_full),
.pre_fifo_a_full (A_pre_fifo_a_full),
.phy_din (phy_din_remap[79:0]),
.phy_dout (phy_dout_remap[79:0]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({B_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (A_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (A_idelay_ce),
.idelay_ld (A_idelay_ld),
.pi_rst_dqs_find (A_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (A_po_fine_enable),
.po_coarse_enable (A_po_coarse_enable),
.po_fine_inc (A_po_fine_inc),
.po_coarse_inc (A_po_coarse_inc),
.po_counter_load_en (A_po_counter_load_en),
.po_counter_read_en (A_po_counter_read_en),
.po_counter_load_val (A_po_counter_load_val),
.po_coarse_overflow (A_po_coarse_overflow),
.po_fine_overflow (A_po_fine_overflow),
.po_counter_read_val (A_po_counter_read_val),
.po_sel_fine_oclk_delay(A_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (A_pi_fine_enable),
.pi_fine_inc (A_pi_fine_inc),
.pi_counter_load_en (A_pi_counter_load_en),
.pi_counter_read_en (A_pi_counter_read_en),
.pi_counter_load_val (A_pi_counter_load_val),
.pi_fine_overflow (A_pi_fine_overflow),
.pi_counter_read_val (A_pi_counter_read_val),
.pi_iserdes_rst (A_pi_iserdes_rst),
.pi_phase_locked (A_pi_phase_locked),
.fine_delay (A_fine_delay),
.fine_delay_sel (A_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_A
assign A_of_a_full = 1'b0;
assign A_of_full = 1'b0;
assign A_pre_fifo_a_full = 1'b0;
assign A_if_empty = 1'b0;
assign A_byte_rd_en = 1'b1;
assign A_if_a_empty = 1'b0;
assign A_pi_phase_locked = 1;
assign A_pi_dqs_found = 1;
assign A_rclk = 0;
assign A_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign A_pi_counter_read_val = 0;
assign A_po_counter_read_val = 0;
assign A_pi_fine_overflow = 0;
assign A_po_coarse_overflow = 0;
assign A_po_fine_overflow = 0;
end
if ( BYTE_LANES[1] ) begin : ddr_byte_lane_B
assign phy_dout_remap[159:80] = part_select_80(phy_dout, (LANE_REMAP[5:4]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("B"),
.PO_DATA_CTL (PC_DATA_CTL_N[1] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[23:12]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[23:12]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (B_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (B_PI_BURST_MODE),
.PI_CLKOUT_DIV (B_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (B_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (B_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (B_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (B_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (B_PO_CLKOUT_DIV),
.PO_FINE_DELAY (B_PO_FINE_DELAY),
.PO_COARSE_BYPASS (B_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (B_PO_COARSE_DELAY),
.PO_OCLK_DELAY (B_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (B_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (B_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (B_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (B_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (B_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (B_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (B_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_B(
.mem_dq_out (mem_dq_out[23:12]),
.mem_dq_ts (mem_dq_ts[23:12]),
.mem_dq_in (mem_dq_in[19:10]),
.mem_dqs_out (mem_dqs_out[1]),
.mem_dqs_ts (mem_dqs_ts[1]),
.mem_dqs_in (mem_dqs_in[1]),
.rst (B_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (B_ddr_clk),
.rclk (B_rclk),
.pi_dqs_found (B_pi_dqs_found),
.dqs_out_of_range (B_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (B_if_a_empty),
.if_empty (B_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*B_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*B_of_empty*/),
.of_a_full (B_of_a_full),
.of_full (B_of_full),
.pre_fifo_a_full (B_pre_fifo_a_full),
.phy_din (phy_din_remap[159:80]),
.phy_dout (phy_dout_remap[159:80]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (B_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (B_idelay_ce),
.idelay_ld (B_idelay_ld),
.pi_rst_dqs_find (B_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (B_po_fine_enable),
.po_coarse_enable (B_po_coarse_enable),
.po_fine_inc (B_po_fine_inc),
.po_coarse_inc (B_po_coarse_inc),
.po_counter_load_en (B_po_counter_load_en),
.po_counter_read_en (B_po_counter_read_en),
.po_counter_load_val (B_po_counter_load_val),
.po_coarse_overflow (B_po_coarse_overflow),
.po_fine_overflow (B_po_fine_overflow),
.po_counter_read_val (B_po_counter_read_val),
.po_sel_fine_oclk_delay(B_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (B_pi_fine_enable),
.pi_fine_inc (B_pi_fine_inc),
.pi_counter_load_en (B_pi_counter_load_en),
.pi_counter_read_en (B_pi_counter_read_en),
.pi_counter_load_val (B_pi_counter_load_val),
.pi_fine_overflow (B_pi_fine_overflow),
.pi_counter_read_val (B_pi_counter_read_val),
.pi_iserdes_rst (B_pi_iserdes_rst),
.pi_phase_locked (B_pi_phase_locked),
.fine_delay (B_fine_delay),
.fine_delay_sel (B_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_B
assign B_of_a_full = 1'b0;
assign B_of_full = 1'b0;
assign B_pre_fifo_a_full = 1'b0;
assign B_if_empty = 1'b0;
assign B_if_a_empty = 1'b0;
assign B_byte_rd_en = 1'b1;
assign B_pi_phase_locked = 1;
assign B_pi_dqs_found = 1;
assign B_rclk = 0;
assign B_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign B_pi_counter_read_val = 0;
assign B_po_counter_read_val = 0;
assign B_pi_fine_overflow = 0;
assign B_po_coarse_overflow = 0;
assign B_po_fine_overflow = 0;
end
if ( BYTE_LANES[2] ) begin : ddr_byte_lane_C
assign phy_dout_remap[239:160] = part_select_80(phy_dout, (LANE_REMAP[9:8]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("C"),
.PO_DATA_CTL (PC_DATA_CTL_N[2] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[35:24]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[35:24]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (C_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (C_PI_BURST_MODE),
.PI_CLKOUT_DIV (C_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (C_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (C_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (C_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (C_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (C_PO_CLKOUT_DIV),
.PO_FINE_DELAY (C_PO_FINE_DELAY),
.PO_COARSE_BYPASS (C_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (C_PO_COARSE_DELAY),
.PO_OCLK_DELAY (C_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (C_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (C_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (C_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (C_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (C_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (C_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (C_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_C(
.mem_dq_out (mem_dq_out[35:24]),
.mem_dq_ts (mem_dq_ts[35:24]),
.mem_dq_in (mem_dq_in[29:20]),
.mem_dqs_out (mem_dqs_out[2]),
.mem_dqs_ts (mem_dqs_ts[2]),
.mem_dqs_in (mem_dqs_in[2]),
.rst (C_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (C_ddr_clk),
.rclk (C_rclk),
.pi_dqs_found (C_pi_dqs_found),
.dqs_out_of_range (C_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (C_if_a_empty),
.if_empty (C_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*C_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*C_of_empty*/),
.of_a_full (C_of_a_full),
.of_full (C_of_full),
.pre_fifo_a_full (C_pre_fifo_a_full),
.phy_din (phy_din_remap[239:160]),
.phy_dout (phy_dout_remap[239:160]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (C_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (C_idelay_ce),
.idelay_ld (C_idelay_ld),
.pi_rst_dqs_find (C_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (C_po_fine_enable),
.po_coarse_enable (C_po_coarse_enable),
.po_fine_inc (C_po_fine_inc),
.po_coarse_inc (C_po_coarse_inc),
.po_counter_load_en (C_po_counter_load_en),
.po_counter_read_en (C_po_counter_read_en),
.po_counter_load_val (C_po_counter_load_val),
.po_coarse_overflow (C_po_coarse_overflow),
.po_fine_overflow (C_po_fine_overflow),
.po_counter_read_val (C_po_counter_read_val),
.po_sel_fine_oclk_delay(C_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (C_pi_fine_enable),
.pi_fine_inc (C_pi_fine_inc),
.pi_counter_load_en (C_pi_counter_load_en),
.pi_counter_read_en (C_pi_counter_read_en),
.pi_counter_load_val (C_pi_counter_load_val),
.pi_fine_overflow (C_pi_fine_overflow),
.pi_counter_read_val (C_pi_counter_read_val),
.pi_iserdes_rst (C_pi_iserdes_rst),
.pi_phase_locked (C_pi_phase_locked),
.fine_delay (C_fine_delay),
.fine_delay_sel (C_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_C
assign C_of_a_full = 1'b0;
assign C_of_full = 1'b0;
assign C_pre_fifo_a_full = 1'b0;
assign C_if_empty = 1'b0;
assign C_byte_rd_en = 1'b1;
assign C_if_a_empty = 1'b0;
assign C_pi_phase_locked = 1;
assign C_pi_dqs_found = 1;
assign C_rclk = 0;
assign C_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign C_pi_counter_read_val = 0;
assign C_po_counter_read_val = 0;
assign C_pi_fine_overflow = 0;
assign C_po_coarse_overflow = 0;
assign C_po_fine_overflow = 0;
end
if ( BYTE_LANES[3] ) begin : ddr_byte_lane_D
assign phy_dout_remap[319:240] = part_select_80(phy_dout, (LANE_REMAP[13:12]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("D"),
.PO_DATA_CTL (PC_DATA_CTL_N[3] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[47:36]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[47:36]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (D_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (D_PI_BURST_MODE),
.PI_CLKOUT_DIV (D_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (D_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (D_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (D_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (D_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (D_PO_CLKOUT_DIV),
.PO_FINE_DELAY (D_PO_FINE_DELAY),
.PO_COARSE_BYPASS (D_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (D_PO_COARSE_DELAY),
.PO_OCLK_DELAY (D_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (D_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (D_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (D_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (D_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (D_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (D_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (D_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_D(
.mem_dq_out (mem_dq_out[47:36]),
.mem_dq_ts (mem_dq_ts[47:36]),
.mem_dq_in (mem_dq_in[39:30]),
.mem_dqs_out (mem_dqs_out[3]),
.mem_dqs_ts (mem_dqs_ts[3]),
.mem_dqs_in (mem_dqs_in[3]),
.rst (D_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (D_ddr_clk),
.rclk (D_rclk),
.pi_dqs_found (D_pi_dqs_found),
.dqs_out_of_range (D_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (D_if_a_empty),
.if_empty (D_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*D_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*D_of_empty*/),
.of_a_full (D_of_a_full),
.of_full (D_of_full),
.pre_fifo_a_full (D_pre_fifo_a_full),
.phy_din (phy_din_remap[319:240]),
.phy_dout (phy_dout_remap[319:240]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.idelay_inc (idelay_inc),
.idelay_ce (D_idelay_ce),
.idelay_ld (D_idelay_ld),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,C_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (D_byte_rd_en),
// calibration signals
.pi_rst_dqs_find (D_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (D_po_fine_enable),
.po_coarse_enable (D_po_coarse_enable),
.po_fine_inc (D_po_fine_inc),
.po_coarse_inc (D_po_coarse_inc),
.po_counter_load_en (D_po_counter_load_en),
.po_counter_read_en (D_po_counter_read_en),
.po_counter_load_val (D_po_counter_load_val),
.po_coarse_overflow (D_po_coarse_overflow),
.po_fine_overflow (D_po_fine_overflow),
.po_counter_read_val (D_po_counter_read_val),
.po_sel_fine_oclk_delay(D_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (D_pi_fine_enable),
.pi_fine_inc (D_pi_fine_inc),
.pi_counter_load_en (D_pi_counter_load_en),
.pi_counter_read_en (D_pi_counter_read_en),
.pi_counter_load_val (D_pi_counter_load_val),
.pi_fine_overflow (D_pi_fine_overflow),
.pi_counter_read_val (D_pi_counter_read_val),
.pi_iserdes_rst (D_pi_iserdes_rst),
.pi_phase_locked (D_pi_phase_locked),
.fine_delay (D_fine_delay),
.fine_delay_sel (D_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_D
assign D_of_a_full = 1'b0;
assign D_of_full = 1'b0;
assign D_pre_fifo_a_full = 1'b0;
assign D_if_empty = 1'b0;
assign D_byte_rd_en = 1'b1;
assign D_if_a_empty = 1'b0;
assign D_rclk = 0;
assign D_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign D_pi_dqs_found = 1;
assign D_pi_phase_locked = 1;
assign D_pi_counter_read_val = 0;
assign D_po_counter_read_val = 0;
assign D_pi_fine_overflow = 0;
assign D_po_coarse_overflow = 0;
assign D_po_fine_overflow = 0;
end
endgenerate
assign phaser_ctl_bus[MSB_RANK_SEL_I : MSB_RANK_SEL_I - 7] = in_rank;
PHY_CONTROL #(
.AO_WRLVL_EN ( PC_AO_WRLVL_EN),
.AO_TOGGLE ( PC_AO_TOGGLE),
.BURST_MODE ( PC_BURST_MODE),
.CO_DURATION ( PC_CO_DURATION ),
.CLK_RATIO ( PC_CLK_RATIO),
.DATA_CTL_A_N ( PC_DATA_CTL_A),
.DATA_CTL_B_N ( PC_DATA_CTL_B),
.DATA_CTL_C_N ( PC_DATA_CTL_C),
.DATA_CTL_D_N ( PC_DATA_CTL_D),
.DI_DURATION ( PC_DI_DURATION ),
.DO_DURATION ( PC_DO_DURATION ),
.EVENTS_DELAY ( PC_EVENTS_DELAY),
.FOUR_WINDOW_CLOCKS ( PC_FOUR_WINDOW_CLOCKS),
.MULTI_REGION ( PC_MULTI_REGION ),
.PHY_COUNT_ENABLE ( PC_PHY_COUNT_EN),
.DISABLE_SEQ_MATCH ( PC_DISABLE_SEQ_MATCH),
.SYNC_MODE ( PC_SYNC_MODE),
.CMD_OFFSET ( PC_CMD_OFFSET),
.RD_CMD_OFFSET_0 ( PC_RD_CMD_OFFSET_0),
.RD_CMD_OFFSET_1 ( PC_RD_CMD_OFFSET_1),
.RD_CMD_OFFSET_2 ( PC_RD_CMD_OFFSET_2),
.RD_CMD_OFFSET_3 ( PC_RD_CMD_OFFSET_3),
.RD_DURATION_0 ( PC_RD_DURATION_0),
.RD_DURATION_1 ( PC_RD_DURATION_1),
.RD_DURATION_2 ( PC_RD_DURATION_2),
.RD_DURATION_3 ( PC_RD_DURATION_3),
.WR_CMD_OFFSET_0 ( PC_WR_CMD_OFFSET_0),
.WR_CMD_OFFSET_1 ( PC_WR_CMD_OFFSET_1),
.WR_CMD_OFFSET_2 ( PC_WR_CMD_OFFSET_2),
.WR_CMD_OFFSET_3 ( PC_WR_CMD_OFFSET_3),
.WR_DURATION_0 ( PC_WR_DURATION_0),
.WR_DURATION_1 ( PC_WR_DURATION_1),
.WR_DURATION_2 ( PC_WR_DURATION_2),
.WR_DURATION_3 ( PC_WR_DURATION_3)
) phy_control_i (
.AUXOUTPUT (aux_out),
.INBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PI:MSB_BURST_PEND_PI-3]),
.INRANKA (in_rank[1:0]),
.INRANKB (in_rank[3:2]),
.INRANKC (in_rank[5:4]),
.INRANKD (in_rank[7:6]),
.OUTBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PO:MSB_BURST_PEND_PO-3]),
.PCENABLECALIB (phy_encalib),
.PHYCTLALMOSTFULL (phy_ctl_a_full),
.PHYCTLEMPTY (phy_ctl_empty),
.PHYCTLFULL (phy_ctl_full),
.PHYCTLREADY (phy_ctl_ready),
.MEMREFCLK (mem_refclk),
.PHYCLK (phy_ctl_clk),
.PHYCTLMSTREMPTY (phy_ctl_mstr_empty),
.PHYCTLWD (_phy_ctl_wd),
.PHYCTLWRENABLE (phy_ctl_wr),
.PLLLOCK (pll_lock),
.REFDLLLOCK (ref_dll_lock), // is reset while !locked
.RESET (rst),
.SYNCIN (sync_pulse),
.READCALIBENABLE (phy_read_calib),
.WRITECALIBENABLE (phy_write_calib)
`ifdef USE_PHY_CONTROL_TEST
, .TESTINPUT (16'b0),
.TESTOUTPUT (test_output),
.TESTSELECT (test_select),
.SCANENABLEN (scan_enable)
`endif
);
// register outputs to give extra slack in timing
always @(posedge phy_clk ) begin
case (calib_sel[1:0])
2'h0: begin
po_coarse_overflow <= #1 A_po_coarse_overflow;
po_fine_overflow <= #1 A_po_fine_overflow;
po_counter_read_val <= #1 A_po_counter_read_val;
pi_fine_overflow <= #1 A_pi_fine_overflow;
pi_counter_read_val<= #1 A_pi_counter_read_val;
pi_phase_locked <= #1 A_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 A_pi_dqs_found;
pi_dqs_out_of_range <= #1 A_pi_dqs_out_of_range;
end
2'h1: begin
po_coarse_overflow <= #1 B_po_coarse_overflow;
po_fine_overflow <= #1 B_po_fine_overflow;
po_counter_read_val <= #1 B_po_counter_read_val;
pi_fine_overflow <= #1 B_pi_fine_overflow;
pi_counter_read_val <= #1 B_pi_counter_read_val;
pi_phase_locked <= #1 B_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 B_pi_dqs_found;
pi_dqs_out_of_range <= #1 B_pi_dqs_out_of_range;
end
2'h2: begin
po_coarse_overflow <= #1 C_po_coarse_overflow;
po_fine_overflow <= #1 C_po_fine_overflow;
po_counter_read_val <= #1 C_po_counter_read_val;
pi_fine_overflow <= #1 C_pi_fine_overflow;
pi_counter_read_val <= #1 C_pi_counter_read_val;
pi_phase_locked <= #1 C_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 C_pi_dqs_found;
pi_dqs_out_of_range <= #1 C_pi_dqs_out_of_range;
end
2'h3: begin
po_coarse_overflow <= #1 D_po_coarse_overflow;
po_fine_overflow <= #1 D_po_fine_overflow;
po_counter_read_val <= #1 D_po_counter_read_val;
pi_fine_overflow <= #1 D_pi_fine_overflow;
pi_counter_read_val <= #1 D_pi_counter_read_val;
pi_phase_locked <= #1 D_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 D_pi_dqs_found;
pi_dqs_out_of_range <= #1 D_pi_dqs_out_of_range;
end
default: begin
po_coarse_overflow <= po_coarse_overflow;
end
endcase
end
wire B_mux_ctrl;
wire C_mux_ctrl;
wire D_mux_ctrl;
generate
if (HIGHEST_LANE > 1)
assign B_mux_ctrl = ( !calib_zero_lanes[1] && ( ! calib_zero_ctrl || DATA_CTL_N[1]));
else
assign B_mux_ctrl = 0;
if (HIGHEST_LANE > 2)
assign C_mux_ctrl = ( !calib_zero_lanes[2] && (! calib_zero_ctrl || DATA_CTL_N[2]));
else
assign C_mux_ctrl = 0;
if (HIGHEST_LANE > 3)
assign D_mux_ctrl = ( !calib_zero_lanes[3] && ( ! calib_zero_ctrl || DATA_CTL_N[3]));
else
assign D_mux_ctrl = 0;
endgenerate
always @(*) begin
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
if ( calib_sel[2]) begin
// if this is asserted, all calib signals are deasserted
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
end else
if (calib_in_common) begin
// if this is asserted, each signal is broadcast to all phasers
// in common
if ( !calib_zero_lanes[0] && (! calib_zero_ctrl || DATA_CTL_N[0])) begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
if ( B_mux_ctrl) begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
if ( C_mux_ctrl) begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
if ( D_mux_ctrl) begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_read_en = po_counter_read_en;
D_po_counter_load_val = po_counter_load_val;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
end
else begin
// otherwise, only a single phaser is selected
case (calib_sel[1:0])
0: begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
1: begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
2: begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
3: begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_load_val = po_counter_load_val;
D_po_counter_read_en = po_counter_read_en;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
endcase
end
end
//obligatory phaser-ref
PHASER_REF phaser_ref_i(
.LOCKED (ref_dll_lock),
.CLKIN (freq_refclk),
.PWRDWN (1'b0),
.RST ( ! pll_lock)
);
// optional idelay_ctrl
generate
if ( GENERATE_IDELAYCTRL == "TRUE")
IDELAYCTRL idelayctrl (
.RDY (/*idelayctrl_rdy*/),
.REFCLK (idelayctrl_refclk),
.RST (rst)
);
endgenerate
endmodule
|
module mig_7series_v2_3_ddr_phy_4lanes #(
parameter GENERATE_IDELAYCTRL = "TRUE",
parameter IODELAY_GRP = "IODELAY_MIG",
parameter FPGA_SPEED_GRADE = 1,
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter BYTELANES_DDR_CK = 24'b0010_0010_0010_0010_0010_0010,
parameter NUM_DDR_CK = 1,
// next three parameter fields correspond to byte lanes for lane order DCBA
parameter BYTE_LANES = 4'b1111, // lane existence, one per lane
parameter DATA_CTL_N = 4'b1111, // data or control, per lane
parameter BITLANES = 48'hffff_ffff_ffff,
parameter BITLANES_OUTONLY = 48'h0000_0000_0000,
parameter LANE_REMAP = 16'h3210,// 4-bit index
// used to rewire to one of four
// input/output buss lanes
// example: 0321 remaps lanes as:
// D->A
// C->D
// B->C
// A->B
parameter LAST_BANK = "FALSE",
parameter USE_PRE_POST_FIFO = "FALSE",
parameter RCLK_SELECT_LANE = "B",
parameter real TCK = 0.00,
parameter SYNTHESIS = "FALSE",
parameter PO_CTL_COARSE_BYPASS = "FALSE",
parameter PO_FINE_DELAY = 0,
parameter PI_SEL_CLK_OFFSET = 0,
// phy_control paramter used in other paramsters
parameter PC_CLK_RATIO = 4,
//phaser_in parameters
parameter A_PI_FREQ_REF_DIV = "NONE",
parameter A_PI_CLKOUT_DIV = 2,
parameter A_PI_BURST_MODE = "TRUE",
parameter A_PI_OUTPUT_CLK_SRC = "DELAYED_REF" , //"DELAYED_REF",
parameter A_PI_FINE_DELAY = 60,
parameter A_PI_SYNC_IN_DIV_RST = "TRUE",
parameter B_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter B_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter B_PI_BURST_MODE = A_PI_BURST_MODE,
parameter B_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter B_PI_FINE_DELAY = A_PI_FINE_DELAY,
parameter B_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter C_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter C_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter C_PI_BURST_MODE = A_PI_BURST_MODE,
parameter C_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter C_PI_FINE_DELAY = 0,
parameter C_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter D_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter D_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter D_PI_BURST_MODE = A_PI_BURST_MODE,
parameter D_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter D_PI_FINE_DELAY = 0,
parameter D_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
//phaser_out parameters
parameter A_PO_CLKOUT_DIV = (DATA_CTL_N[0] == 0) ? PC_CLK_RATIO : 2,
parameter A_PO_FINE_DELAY = PO_FINE_DELAY,
parameter A_PO_COARSE_DELAY = 0,
parameter A_PO_OCLK_DELAY = 0,
parameter A_PO_OCLKDELAY_INV = "FALSE",
parameter A_PO_OUTPUT_CLK_SRC = "DELAYED_REF",
parameter A_PO_SYNC_IN_DIV_RST = "TRUE",
//parameter A_PO_SYNC_IN_DIV_RST = "FALSE",
parameter B_PO_CLKOUT_DIV = (DATA_CTL_N[1] == 0) ? PC_CLK_RATIO : 2,
parameter B_PO_FINE_DELAY = PO_FINE_DELAY,
parameter B_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter B_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter B_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter B_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter B_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter C_PO_CLKOUT_DIV = (DATA_CTL_N[2] == 0) ? PC_CLK_RATIO : 2,
parameter C_PO_FINE_DELAY = PO_FINE_DELAY,
parameter C_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter C_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter C_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter C_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter C_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter D_PO_CLKOUT_DIV = (DATA_CTL_N[3] == 0) ? PC_CLK_RATIO : 2,
parameter D_PO_FINE_DELAY = PO_FINE_DELAY,
parameter D_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter D_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter D_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter D_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter D_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter A_IDELAYE2_IDELAY_TYPE = "VARIABLE",
parameter A_IDELAYE2_IDELAY_VALUE = 00,
parameter B_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter B_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter C_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter C_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter D_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter D_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
// phy_control parameters
parameter PC_BURST_MODE = "TRUE",
parameter PC_DATA_CTL_N = DATA_CTL_N,
parameter PC_CMD_OFFSET = 0,
parameter PC_RD_CMD_OFFSET_0 = 0,
parameter PC_RD_CMD_OFFSET_1 = 0,
parameter PC_RD_CMD_OFFSET_2 = 0,
parameter PC_RD_CMD_OFFSET_3 = 0,
parameter PC_CO_DURATION = 1,
parameter PC_DI_DURATION = 1,
parameter PC_DO_DURATION = 1,
parameter PC_RD_DURATION_0 = 0,
parameter PC_RD_DURATION_1 = 0,
parameter PC_RD_DURATION_2 = 0,
parameter PC_RD_DURATION_3 = 0,
parameter PC_WR_CMD_OFFSET_0 = 5,
parameter PC_WR_CMD_OFFSET_1 = 5,
parameter PC_WR_CMD_OFFSET_2 = 5,
parameter PC_WR_CMD_OFFSET_3 = 5,
parameter PC_WR_DURATION_0 = 6,
parameter PC_WR_DURATION_1 = 6,
parameter PC_WR_DURATION_2 = 6,
parameter PC_WR_DURATION_3 = 6,
parameter PC_AO_WRLVL_EN = 0,
parameter PC_AO_TOGGLE = 4'b0101, // odd bits are toggle (CKE)
parameter PC_FOUR_WINDOW_CLOCKS = 63,
parameter PC_EVENTS_DELAY = 18,
parameter PC_PHY_COUNT_EN = "TRUE",
parameter PC_SYNC_MODE = "TRUE",
parameter PC_DISABLE_SEQ_MATCH = "TRUE",
parameter PC_MULTI_REGION = "FALSE",
// io fifo parameters
parameter A_OF_ARRAY_MODE = (DATA_CTL_N[0] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter B_OF_ARRAY_MODE = (DATA_CTL_N[1] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter C_OF_ARRAY_MODE = (DATA_CTL_N[2] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter D_OF_ARRAY_MODE = (DATA_CTL_N[3] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter OF_ALMOST_EMPTY_VALUE = 1,
parameter OF_ALMOST_FULL_VALUE = 1,
parameter OF_OUTPUT_DISABLE = "TRUE",
parameter OF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
parameter A_OS_DATA_RATE = "DDR",
parameter A_OS_DATA_WIDTH = 4,
parameter B_OS_DATA_RATE = A_OS_DATA_RATE,
parameter B_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter C_OS_DATA_RATE = A_OS_DATA_RATE,
parameter C_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter D_OS_DATA_RATE = A_OS_DATA_RATE,
parameter D_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter A_IF_ARRAY_MODE = "ARRAY_MODE_4_X_8",
parameter B_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter C_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter D_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter IF_ALMOST_EMPTY_VALUE = 1,
parameter IF_ALMOST_FULL_VALUE = 1,
parameter IF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
// this is used locally, not for external pushdown
// NOTE: the 0+ is needed in each to coerce to integer for addition.
// otherwise 4x 1'b values are added producing a 1'b value.
parameter HIGHEST_LANE = LAST_BANK == "FALSE" ? 4 : (BYTE_LANES[3] ? 4 : BYTE_LANES[2] ? 3 : BYTE_LANES[1] ? 2 : 1),
parameter N_CTL_LANES = ((0+(!DATA_CTL_N[0]) & BYTE_LANES[0]) + (0+(!DATA_CTL_N[1]) & BYTE_LANES[1]) + (0+(!DATA_CTL_N[2]) & BYTE_LANES[2]) + (0+(!DATA_CTL_N[3]) & BYTE_LANES[3])),
parameter N_BYTE_LANES = (0+BYTE_LANES[0]) + (0+BYTE_LANES[1]) + (0+BYTE_LANES[2]) + (0+BYTE_LANES[3]),
parameter N_DATA_LANES = N_BYTE_LANES - N_CTL_LANES,
// assume odt per rank + any declared cke's
parameter AUXOUT_WIDTH = 4,
parameter LP_DDR_CK_WIDTH = 2
,parameter CKE_ODT_AUX = "FALSE"
)
(
//`include "phy.vh"
input rst,
input phy_clk,
input phy_ctl_clk,
input freq_refclk,
input mem_refclk,
input mem_refclk_div4,
input pll_lock,
input sync_pulse,
input idelayctrl_refclk,
input [HIGHEST_LANE*80-1:0] phy_dout,
input phy_cmd_wr_en,
input phy_data_wr_en,
input phy_rd_en,
input phy_ctl_mstr_empty,
input [31:0] phy_ctl_wd,
input [`PC_DATA_OFFSET_RANGE] data_offset,
input phy_ctl_wr,
input if_empty_def,
input phyGo,
input input_sink,
output [(LP_DDR_CK_WIDTH*24)-1:0] ddr_clk, // to memory
output rclk,
output if_a_empty,
output if_empty,
output byte_rd_en,
output if_empty_or,
output if_empty_and,
output of_ctl_a_full,
output of_data_a_full,
output of_ctl_full,
output of_data_full,
output pre_data_a_full,
output [HIGHEST_LANE*80-1:0]phy_din, // assume input bus same size as output bus
output phy_ctl_empty,
output phy_ctl_a_full,
output phy_ctl_full,
output [HIGHEST_LANE*12-1:0]mem_dq_out,
output [HIGHEST_LANE*12-1:0]mem_dq_ts,
input [HIGHEST_LANE*10-1:0]mem_dq_in,
output [HIGHEST_LANE-1:0] mem_dqs_out,
output [HIGHEST_LANE-1:0] mem_dqs_ts,
input [HIGHEST_LANE-1:0] mem_dqs_in,
input [1:0] byte_rd_en_oth_banks,
output [AUXOUT_WIDTH-1:0] aux_out,
output reg rst_out = 0,
output reg mcGo=0,
output phy_ctl_ready,
output ref_dll_lock,
input if_rst,
input phy_read_calib,
input phy_write_calib,
input idelay_inc,
input idelay_ce,
input idelay_ld,
input [2:0] calib_sel,
input calib_zero_ctrl,
input [HIGHEST_LANE-1:0] calib_zero_lanes,
input calib_in_common,
input po_fine_enable,
input po_coarse_enable,
input po_fine_inc,
input po_coarse_inc,
input po_counter_load_en,
input po_counter_read_en,
input [8:0] po_counter_load_val,
input po_sel_fine_oclk_delay,
output reg po_coarse_overflow,
output reg po_fine_overflow,
output reg [8:0] po_counter_read_val,
input pi_rst_dqs_find,
input pi_fine_enable,
input pi_fine_inc,
input pi_counter_load_en,
input pi_counter_read_en,
input [5:0] pi_counter_load_val,
output reg pi_fine_overflow,
output reg [5:0] pi_counter_read_val,
output reg pi_dqs_found,
output pi_dqs_found_all,
output pi_dqs_found_any,
output [HIGHEST_LANE-1:0] pi_phase_locked_lanes,
output [HIGHEST_LANE-1:0] pi_dqs_found_lanes,
output reg pi_dqs_out_of_range,
output reg pi_phase_locked,
output pi_phase_locked_all,
input [29:0] fine_delay,
input fine_delay_sel
);
localparam DATA_CTL_A = (~DATA_CTL_N[0]);
localparam DATA_CTL_B = (~DATA_CTL_N[1]);
localparam DATA_CTL_C = (~DATA_CTL_N[2]);
localparam DATA_CTL_D = (~DATA_CTL_N[3]);
localparam PRESENT_CTL_A = BYTE_LANES[0] && ! DATA_CTL_N[0];
localparam PRESENT_CTL_B = BYTE_LANES[1] && ! DATA_CTL_N[1];
localparam PRESENT_CTL_C = BYTE_LANES[2] && ! DATA_CTL_N[2];
localparam PRESENT_CTL_D = BYTE_LANES[3] && ! DATA_CTL_N[3];
localparam PRESENT_DATA_A = BYTE_LANES[0] && DATA_CTL_N[0];
localparam PRESENT_DATA_B = BYTE_LANES[1] && DATA_CTL_N[1];
localparam PRESENT_DATA_C = BYTE_LANES[2] && DATA_CTL_N[2];
localparam PRESENT_DATA_D = BYTE_LANES[3] && DATA_CTL_N[3];
localparam PC_DATA_CTL_A = (DATA_CTL_A) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_B = (DATA_CTL_B) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_C = (DATA_CTL_C) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_D = (DATA_CTL_D) ? "FALSE" : "TRUE";
localparam A_PO_COARSE_BYPASS = (DATA_CTL_A) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam B_PO_COARSE_BYPASS = (DATA_CTL_B) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam C_PO_COARSE_BYPASS = (DATA_CTL_C) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam D_PO_COARSE_BYPASS = (DATA_CTL_D) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam IO_A_START = 41;
localparam IO_A_END = 40;
localparam IO_B_START = 43;
localparam IO_B_END = 42;
localparam IO_C_START = 45;
localparam IO_C_END = 44;
localparam IO_D_START = 47;
localparam IO_D_END = 46;
localparam IO_A_X_START = (HIGHEST_LANE * 10) + 1;
localparam IO_A_X_END = (IO_A_X_START-1);
localparam IO_B_X_START = (IO_A_X_START + 2);
localparam IO_B_X_END = (IO_B_X_START -1);
localparam IO_C_X_START = (IO_B_X_START + 2);
localparam IO_C_X_END = (IO_C_X_START -1);
localparam IO_D_X_START = (IO_C_X_START + 2);
localparam IO_D_X_END = (IO_D_X_START -1);
localparam MSB_BURST_PEND_PO = 3;
localparam MSB_BURST_PEND_PI = 7;
localparam MSB_RANK_SEL_I = MSB_BURST_PEND_PI + 8;
localparam PHASER_CTL_BUS_WIDTH = MSB_RANK_SEL_I + 1;
wire [1:0] oserdes_dqs;
wire [1:0] oserdes_dqs_ts;
wire [1:0] oserdes_dq_ts;
wire [PHASER_CTL_BUS_WIDTH-1:0] phaser_ctl_bus;
wire [7:0] in_rank;
wire [11:0] IO_A;
wire [11:0] IO_B;
wire [11:0] IO_C;
wire [11:0] IO_D;
wire [319:0] phy_din_remap;
reg A_po_counter_read_en;
wire [8:0] A_po_counter_read_val;
reg A_pi_counter_read_en;
wire [5:0] A_pi_counter_read_val;
wire A_pi_fine_overflow;
wire A_po_coarse_overflow;
wire A_po_fine_overflow;
wire A_pi_dqs_found;
wire A_pi_dqs_out_of_range;
wire A_pi_phase_locked;
wire A_pi_iserdes_rst;
reg A_pi_fine_enable;
reg A_pi_fine_inc;
reg A_pi_counter_load_en;
reg [5:0] A_pi_counter_load_val;
reg A_pi_rst_dqs_find;
reg A_po_fine_enable;
reg A_po_coarse_enable;
reg A_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg A_po_sel_fine_oclk_delay;
reg A_po_coarse_inc;
reg A_po_counter_load_en;
reg [8:0] A_po_counter_load_val;
wire A_rclk;
reg A_idelay_ce;
reg A_idelay_ld;
reg [29:0] A_fine_delay;
reg A_fine_delay_sel;
reg B_po_counter_read_en;
wire [8:0] B_po_counter_read_val;
reg B_pi_counter_read_en;
wire [5:0] B_pi_counter_read_val;
wire B_pi_fine_overflow;
wire B_po_coarse_overflow;
wire B_po_fine_overflow;
wire B_pi_phase_locked;
wire B_pi_iserdes_rst;
wire B_pi_dqs_found;
wire B_pi_dqs_out_of_range;
reg B_pi_fine_enable;
reg B_pi_fine_inc;
reg B_pi_counter_load_en;
reg [5:0] B_pi_counter_load_val;
reg B_pi_rst_dqs_find;
reg B_po_fine_enable;
reg B_po_coarse_enable;
reg B_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg B_po_coarse_inc;
reg B_po_sel_fine_oclk_delay;
reg B_po_counter_load_en;
reg [8:0] B_po_counter_load_val;
wire B_rclk;
reg B_idelay_ce;
reg B_idelay_ld;
reg [29:0] B_fine_delay;
reg B_fine_delay_sel;
reg C_pi_fine_inc;
reg D_pi_fine_inc;
reg C_pi_fine_enable;
reg D_pi_fine_enable;
reg C_po_counter_load_en;
reg D_po_counter_load_en;
reg C_po_coarse_inc;
reg D_po_coarse_inc;
reg C_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg D_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg C_po_sel_fine_oclk_delay;
reg D_po_sel_fine_oclk_delay;
reg [5:0] C_pi_counter_load_val;
reg [5:0] D_pi_counter_load_val;
reg [8:0] C_po_counter_load_val;
reg [8:0] D_po_counter_load_val;
reg C_po_coarse_enable;
reg D_po_coarse_enable;
reg C_po_fine_enable;
reg D_po_fine_enable;
wire C_po_coarse_overflow;
wire D_po_coarse_overflow;
wire C_po_fine_overflow;
wire D_po_fine_overflow;
wire [8:0] C_po_counter_read_val;
wire [8:0] D_po_counter_read_val;
reg C_po_counter_read_en;
reg D_po_counter_read_en;
wire C_pi_dqs_found;
wire D_pi_dqs_found;
wire C_pi_fine_overflow;
wire D_pi_fine_overflow;
reg C_pi_counter_read_en;
reg D_pi_counter_read_en;
reg C_pi_counter_load_en;
reg D_pi_counter_load_en;
wire C_pi_phase_locked;
wire C_pi_iserdes_rst;
wire D_pi_phase_locked;
wire D_pi_iserdes_rst;
wire C_pi_dqs_out_of_range;
wire D_pi_dqs_out_of_range;
wire [5:0] C_pi_counter_read_val;
wire [5:0] D_pi_counter_read_val;
wire C_rclk;
wire D_rclk;
reg C_idelay_ce;
reg D_idelay_ce;
reg C_idelay_ld;
reg D_idelay_ld;
reg C_pi_rst_dqs_find;
reg D_pi_rst_dqs_find;
reg [29:0] C_fine_delay;
reg [29:0] D_fine_delay;
reg C_fine_delay_sel;
reg D_fine_delay_sel;
wire pi_iserdes_rst;
wire A_if_empty;
wire B_if_empty;
wire C_if_empty;
wire D_if_empty;
wire A_byte_rd_en;
wire B_byte_rd_en;
wire C_byte_rd_en;
wire D_byte_rd_en;
wire A_if_a_empty;
wire B_if_a_empty;
wire C_if_a_empty;
wire D_if_a_empty;
//wire A_if_full;
//wire B_if_full;
//wire C_if_full;
//wire D_if_full;
//wire A_of_empty;
//wire B_of_empty;
//wire C_of_empty;
//wire D_of_empty;
wire A_of_full;
wire B_of_full;
wire C_of_full;
wire D_of_full;
wire A_of_ctl_full;
wire B_of_ctl_full;
wire C_of_ctl_full;
wire D_of_ctl_full;
wire A_of_data_full;
wire B_of_data_full;
wire C_of_data_full;
wire D_of_data_full;
wire A_of_a_full;
wire B_of_a_full;
wire C_of_a_full;
wire D_of_a_full;
wire A_pre_fifo_a_full;
wire B_pre_fifo_a_full;
wire C_pre_fifo_a_full;
wire D_pre_fifo_a_full;
wire A_of_ctl_a_full;
wire B_of_ctl_a_full;
wire C_of_ctl_a_full;
wire D_of_ctl_a_full;
wire A_of_data_a_full;
wire B_of_data_a_full;
wire C_of_data_a_full;
wire D_of_data_a_full;
wire A_pre_data_a_full;
wire B_pre_data_a_full;
wire C_pre_data_a_full;
wire D_pre_data_a_full;
wire [LP_DDR_CK_WIDTH*6-1:0] A_ddr_clk; // for generation
wire [LP_DDR_CK_WIDTH*6-1:0] B_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] C_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] D_ddr_clk; //
wire [3:0] dummy_data;
wire [31:0] _phy_ctl_wd;
wire [1:0] phy_encalib;
assign pi_dqs_found_all =
(! PRESENT_DATA_A | A_pi_dqs_found) &
(! PRESENT_DATA_B | B_pi_dqs_found) &
(! PRESENT_DATA_C | C_pi_dqs_found) &
(! PRESENT_DATA_D | D_pi_dqs_found) ;
assign pi_dqs_found_any =
( PRESENT_DATA_A & A_pi_dqs_found) |
( PRESENT_DATA_B & B_pi_dqs_found) |
( PRESENT_DATA_C & C_pi_dqs_found) |
( PRESENT_DATA_D & D_pi_dqs_found) ;
assign pi_phase_locked_all =
(! PRESENT_DATA_A | A_pi_phase_locked) &
(! PRESENT_DATA_B | B_pi_phase_locked) &
(! PRESENT_DATA_C | C_pi_phase_locked) &
(! PRESENT_DATA_D | D_pi_phase_locked);
wire dangling_inputs = (& dummy_data) & input_sink & 1'b0; // this reduces all constant 0 values to 1 signal
// which is combined into another signals such that
// the other signal isn't changed. The purpose
// is to fake the tools into ignoring dangling inputs.
// Because it is anded with 1'b0, the contributing signals
// are folded as constants or trimmed.
assign if_empty = !if_empty_def ? (A_if_empty | B_if_empty | C_if_empty | D_if_empty) : (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign byte_rd_en = !if_empty_def ? (A_byte_rd_en & B_byte_rd_en & C_byte_rd_en & D_byte_rd_en) :
(A_byte_rd_en | B_byte_rd_en | C_byte_rd_en | D_byte_rd_en);
assign if_empty_or = (A_if_empty | B_if_empty | C_if_empty | D_if_empty);
assign if_empty_and = (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign if_a_empty = A_if_a_empty | B_if_a_empty | C_if_a_empty | D_if_a_empty;
//assign if_full = A_if_full | B_if_full | C_if_full | D_if_full ;
//assign of_empty = A_of_empty & B_of_empty & C_of_empty & D_of_empty;
assign of_ctl_full = A_of_ctl_full | B_of_ctl_full | C_of_ctl_full | D_of_ctl_full ;
assign of_data_full = A_of_data_full | B_of_data_full | C_of_data_full | D_of_data_full ;
assign of_ctl_a_full = A_of_ctl_a_full | B_of_ctl_a_full | C_of_ctl_a_full | D_of_ctl_a_full ;
assign of_data_a_full = A_of_data_a_full | B_of_data_a_full | C_of_data_a_full | D_of_data_a_full | dangling_inputs ;
assign pre_data_a_full = A_pre_data_a_full | B_pre_data_a_full | C_pre_data_a_full | D_pre_data_a_full;
function [79:0] part_select_80;
input [319:0] vector;
input [1:0] select;
begin
case (select)
2'b00 : part_select_80[79:0] = vector[1*80-1:0*80];
2'b01 : part_select_80[79:0] = vector[2*80-1:1*80];
2'b10 : part_select_80[79:0] = vector[3*80-1:2*80];
2'b11 : part_select_80[79:0] = vector[4*80-1:3*80];
endcase
end
endfunction
wire [319:0] phy_dout_remap;
reg rst_out_trig = 1'b0;
reg [31:0] rclk_delay;
reg rst_edge1 = 1'b0;
reg rst_edge2 = 1'b0;
reg rst_edge3 = 1'b0;
reg rst_edge_detect = 1'b0;
wire rclk_;
reg rst_out_start = 1'b0 ;
reg rst_primitives=0;
reg A_rst_primitives=0;
reg B_rst_primitives=0;
reg C_rst_primitives=0;
reg D_rst_primitives=0;
`ifdef USE_PHY_CONTROL_TEST
wire [15:0] test_output;
wire [15:0] test_input;
wire [2:0] test_select=0;
wire scan_enable = 0;
`endif
generate
genvar i;
if (RCLK_SELECT_LANE == "A") begin
assign rclk_ = A_rclk;
assign pi_iserdes_rst = A_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "B") begin
assign rclk_ = B_rclk;
assign pi_iserdes_rst = B_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "C") begin
assign rclk_ = C_rclk;
assign pi_iserdes_rst = C_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "D") begin
assign rclk_ = D_rclk;
assign pi_iserdes_rst = D_pi_iserdes_rst;
end
else begin
assign rclk_ = B_rclk; // default
end
endgenerate
assign ddr_clk[LP_DDR_CK_WIDTH*6-1:0] = A_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*12-1:LP_DDR_CK_WIDTH*6] = B_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*18-1:LP_DDR_CK_WIDTH*12] = C_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*24-1:LP_DDR_CK_WIDTH*18] = D_ddr_clk;
assign pi_phase_locked_lanes =
{(! PRESENT_DATA_A[0] | A_pi_phase_locked),
(! PRESENT_DATA_B[0] | B_pi_phase_locked) ,
(! PRESENT_DATA_C[0] | C_pi_phase_locked) ,
(! PRESENT_DATA_D[0] | D_pi_phase_locked)};
assign pi_dqs_found_lanes = {D_pi_dqs_found, C_pi_dqs_found, B_pi_dqs_found, A_pi_dqs_found};
// this block scrubs X from rclk_delay[11]
reg rclk_delay_11;
always @(rclk_delay[11]) begin : rclk_delay_11_blk
if ( rclk_delay[11])
rclk_delay_11 = 1;
else
rclk_delay_11 = 0;
end
always @(posedge phy_clk or posedge rst ) begin
// scrub 4-state values from rclk_delay[11]
if ( rst) begin
rst_out <= #1 0;
end
else begin
if ( rclk_delay_11)
rst_out <= #1 1;
end
end
always @(posedge phy_clk ) begin
// phy_ctl_ready drives reset of the system
rst_primitives <= !phy_ctl_ready ;
A_rst_primitives <= rst_primitives ;
B_rst_primitives <= rst_primitives ;
C_rst_primitives <= rst_primitives ;
D_rst_primitives <= rst_primitives ;
rclk_delay <= #1 (rclk_delay << 1) | (!rst_primitives && phyGo);
mcGo <= #1 rst_out ;
end
generate
if (BYTE_LANES[0]) begin
assign dummy_data[0] = 0;
end
else begin
assign dummy_data[0] = &phy_dout_remap[1*80-1:0*80];
end
if (BYTE_LANES[1]) begin
assign dummy_data[1] = 0;
end
else begin
assign dummy_data[1] = &phy_dout_remap[2*80-1:1*80];
end
if (BYTE_LANES[2]) begin
assign dummy_data[2] = 0;
end
else begin
assign dummy_data[2] = &phy_dout_remap[3*80-1:2*80];
end
if (BYTE_LANES[3]) begin
assign dummy_data[3] = 0;
end
else begin
assign dummy_data[3] = &phy_dout_remap[4*80-1:3*80];
end
if (PRESENT_DATA_A) begin
assign A_of_data_full = A_of_full;
assign A_of_ctl_full = 0;
assign A_of_data_a_full = A_of_a_full;
assign A_of_ctl_a_full = 0;
assign A_pre_data_a_full = A_pre_fifo_a_full;
end
else begin
assign A_of_ctl_full = A_of_full;
assign A_of_data_full = 0;
assign A_of_ctl_a_full = A_of_a_full;
assign A_of_data_a_full = 0;
assign A_pre_data_a_full = 0;
end
if (PRESENT_DATA_B) begin
assign B_of_data_full = B_of_full;
assign B_of_ctl_full = 0;
assign B_of_data_a_full = B_of_a_full;
assign B_of_ctl_a_full = 0;
assign B_pre_data_a_full = B_pre_fifo_a_full;
end
else begin
assign B_of_ctl_full = B_of_full;
assign B_of_data_full = 0;
assign B_of_ctl_a_full = B_of_a_full;
assign B_of_data_a_full = 0;
assign B_pre_data_a_full = 0;
end
if (PRESENT_DATA_C) begin
assign C_of_data_full = C_of_full;
assign C_of_ctl_full = 0;
assign C_of_data_a_full = C_of_a_full;
assign C_of_ctl_a_full = 0;
assign C_pre_data_a_full = C_pre_fifo_a_full;
end
else begin
assign C_of_ctl_full = C_of_full;
assign C_of_data_full = 0;
assign C_of_ctl_a_full = C_of_a_full;
assign C_of_data_a_full = 0;
assign C_pre_data_a_full = 0;
end
if (PRESENT_DATA_D) begin
assign D_of_data_full = D_of_full;
assign D_of_ctl_full = 0;
assign D_of_data_a_full = D_of_a_full;
assign D_of_ctl_a_full = 0;
assign D_pre_data_a_full = D_pre_fifo_a_full;
end
else begin
assign D_of_ctl_full = D_of_full;
assign D_of_data_full = 0;
assign D_of_ctl_a_full = D_of_a_full;
assign D_of_data_a_full = 0;
assign D_pre_data_a_full = 0;
end
// byte lane must exist and be data lane.
if (PRESENT_DATA_A )
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[79:0];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[79:0];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[79:0];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[79:0];
endcase
else
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_B )
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[159:80];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[159:80];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[159:80];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[159:80];
endcase
else
if (HIGHEST_LANE > 1)
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_C)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[239:160];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[239:160];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[239:160];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[239:160];
endcase
else
if (HIGHEST_LANE > 2)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_D )
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[319:240];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[319:240];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[319:240];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[319:240];
endcase
else
if (HIGHEST_LANE > 3)
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (HIGHEST_LANE > 1)
assign _phy_ctl_wd = {phy_ctl_wd[31:23], data_offset, phy_ctl_wd[16:0]};
if (HIGHEST_LANE == 1)
assign _phy_ctl_wd = phy_ctl_wd;
//BUFR #(.BUFR_DIVIDE ("1")) rclk_buf(.I(rclk_), .O(rclk), .CE (1'b1), .CLR (pi_iserdes_rst));
BUFIO rclk_buf(.I(rclk_), .O(rclk) );
if ( BYTE_LANES[0] ) begin : ddr_byte_lane_A
assign phy_dout_remap[79:0] = part_select_80(phy_dout, (LANE_REMAP[1:0]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("A"),
.PO_DATA_CTL (PC_DATA_CTL_N[0] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[11:0]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[11:0]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (A_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (A_PI_BURST_MODE),
.PI_CLKOUT_DIV (A_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (A_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (A_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (A_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (A_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (A_PO_CLKOUT_DIV),
.PO_FINE_DELAY (A_PO_FINE_DELAY),
.PO_COARSE_BYPASS (A_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (A_PO_COARSE_DELAY),
.PO_OCLK_DELAY (A_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (A_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (A_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (A_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (A_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (A_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (A_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (A_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_A(
.mem_dq_out (mem_dq_out[11:0]),
.mem_dq_ts (mem_dq_ts[11:0]),
.mem_dq_in (mem_dq_in[9:0]),
.mem_dqs_out (mem_dqs_out[0]),
.mem_dqs_ts (mem_dqs_ts[0]),
.mem_dqs_in (mem_dqs_in[0]),
.rst (A_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (A_ddr_clk),
.rclk (A_rclk),
.pi_dqs_found (A_pi_dqs_found),
.dqs_out_of_range (A_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (A_if_a_empty),
.if_empty (A_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*A_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*A_of_empty*/),
.of_a_full (A_of_a_full),
.of_full (A_of_full),
.pre_fifo_a_full (A_pre_fifo_a_full),
.phy_din (phy_din_remap[79:0]),
.phy_dout (phy_dout_remap[79:0]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({B_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (A_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (A_idelay_ce),
.idelay_ld (A_idelay_ld),
.pi_rst_dqs_find (A_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (A_po_fine_enable),
.po_coarse_enable (A_po_coarse_enable),
.po_fine_inc (A_po_fine_inc),
.po_coarse_inc (A_po_coarse_inc),
.po_counter_load_en (A_po_counter_load_en),
.po_counter_read_en (A_po_counter_read_en),
.po_counter_load_val (A_po_counter_load_val),
.po_coarse_overflow (A_po_coarse_overflow),
.po_fine_overflow (A_po_fine_overflow),
.po_counter_read_val (A_po_counter_read_val),
.po_sel_fine_oclk_delay(A_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (A_pi_fine_enable),
.pi_fine_inc (A_pi_fine_inc),
.pi_counter_load_en (A_pi_counter_load_en),
.pi_counter_read_en (A_pi_counter_read_en),
.pi_counter_load_val (A_pi_counter_load_val),
.pi_fine_overflow (A_pi_fine_overflow),
.pi_counter_read_val (A_pi_counter_read_val),
.pi_iserdes_rst (A_pi_iserdes_rst),
.pi_phase_locked (A_pi_phase_locked),
.fine_delay (A_fine_delay),
.fine_delay_sel (A_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_A
assign A_of_a_full = 1'b0;
assign A_of_full = 1'b0;
assign A_pre_fifo_a_full = 1'b0;
assign A_if_empty = 1'b0;
assign A_byte_rd_en = 1'b1;
assign A_if_a_empty = 1'b0;
assign A_pi_phase_locked = 1;
assign A_pi_dqs_found = 1;
assign A_rclk = 0;
assign A_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign A_pi_counter_read_val = 0;
assign A_po_counter_read_val = 0;
assign A_pi_fine_overflow = 0;
assign A_po_coarse_overflow = 0;
assign A_po_fine_overflow = 0;
end
if ( BYTE_LANES[1] ) begin : ddr_byte_lane_B
assign phy_dout_remap[159:80] = part_select_80(phy_dout, (LANE_REMAP[5:4]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("B"),
.PO_DATA_CTL (PC_DATA_CTL_N[1] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[23:12]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[23:12]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (B_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (B_PI_BURST_MODE),
.PI_CLKOUT_DIV (B_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (B_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (B_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (B_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (B_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (B_PO_CLKOUT_DIV),
.PO_FINE_DELAY (B_PO_FINE_DELAY),
.PO_COARSE_BYPASS (B_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (B_PO_COARSE_DELAY),
.PO_OCLK_DELAY (B_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (B_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (B_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (B_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (B_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (B_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (B_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (B_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_B(
.mem_dq_out (mem_dq_out[23:12]),
.mem_dq_ts (mem_dq_ts[23:12]),
.mem_dq_in (mem_dq_in[19:10]),
.mem_dqs_out (mem_dqs_out[1]),
.mem_dqs_ts (mem_dqs_ts[1]),
.mem_dqs_in (mem_dqs_in[1]),
.rst (B_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (B_ddr_clk),
.rclk (B_rclk),
.pi_dqs_found (B_pi_dqs_found),
.dqs_out_of_range (B_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (B_if_a_empty),
.if_empty (B_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*B_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*B_of_empty*/),
.of_a_full (B_of_a_full),
.of_full (B_of_full),
.pre_fifo_a_full (B_pre_fifo_a_full),
.phy_din (phy_din_remap[159:80]),
.phy_dout (phy_dout_remap[159:80]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (B_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (B_idelay_ce),
.idelay_ld (B_idelay_ld),
.pi_rst_dqs_find (B_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (B_po_fine_enable),
.po_coarse_enable (B_po_coarse_enable),
.po_fine_inc (B_po_fine_inc),
.po_coarse_inc (B_po_coarse_inc),
.po_counter_load_en (B_po_counter_load_en),
.po_counter_read_en (B_po_counter_read_en),
.po_counter_load_val (B_po_counter_load_val),
.po_coarse_overflow (B_po_coarse_overflow),
.po_fine_overflow (B_po_fine_overflow),
.po_counter_read_val (B_po_counter_read_val),
.po_sel_fine_oclk_delay(B_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (B_pi_fine_enable),
.pi_fine_inc (B_pi_fine_inc),
.pi_counter_load_en (B_pi_counter_load_en),
.pi_counter_read_en (B_pi_counter_read_en),
.pi_counter_load_val (B_pi_counter_load_val),
.pi_fine_overflow (B_pi_fine_overflow),
.pi_counter_read_val (B_pi_counter_read_val),
.pi_iserdes_rst (B_pi_iserdes_rst),
.pi_phase_locked (B_pi_phase_locked),
.fine_delay (B_fine_delay),
.fine_delay_sel (B_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_B
assign B_of_a_full = 1'b0;
assign B_of_full = 1'b0;
assign B_pre_fifo_a_full = 1'b0;
assign B_if_empty = 1'b0;
assign B_if_a_empty = 1'b0;
assign B_byte_rd_en = 1'b1;
assign B_pi_phase_locked = 1;
assign B_pi_dqs_found = 1;
assign B_rclk = 0;
assign B_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign B_pi_counter_read_val = 0;
assign B_po_counter_read_val = 0;
assign B_pi_fine_overflow = 0;
assign B_po_coarse_overflow = 0;
assign B_po_fine_overflow = 0;
end
if ( BYTE_LANES[2] ) begin : ddr_byte_lane_C
assign phy_dout_remap[239:160] = part_select_80(phy_dout, (LANE_REMAP[9:8]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("C"),
.PO_DATA_CTL (PC_DATA_CTL_N[2] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[35:24]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[35:24]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (C_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (C_PI_BURST_MODE),
.PI_CLKOUT_DIV (C_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (C_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (C_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (C_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (C_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (C_PO_CLKOUT_DIV),
.PO_FINE_DELAY (C_PO_FINE_DELAY),
.PO_COARSE_BYPASS (C_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (C_PO_COARSE_DELAY),
.PO_OCLK_DELAY (C_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (C_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (C_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (C_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (C_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (C_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (C_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (C_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_C(
.mem_dq_out (mem_dq_out[35:24]),
.mem_dq_ts (mem_dq_ts[35:24]),
.mem_dq_in (mem_dq_in[29:20]),
.mem_dqs_out (mem_dqs_out[2]),
.mem_dqs_ts (mem_dqs_ts[2]),
.mem_dqs_in (mem_dqs_in[2]),
.rst (C_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (C_ddr_clk),
.rclk (C_rclk),
.pi_dqs_found (C_pi_dqs_found),
.dqs_out_of_range (C_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (C_if_a_empty),
.if_empty (C_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*C_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*C_of_empty*/),
.of_a_full (C_of_a_full),
.of_full (C_of_full),
.pre_fifo_a_full (C_pre_fifo_a_full),
.phy_din (phy_din_remap[239:160]),
.phy_dout (phy_dout_remap[239:160]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (C_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (C_idelay_ce),
.idelay_ld (C_idelay_ld),
.pi_rst_dqs_find (C_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (C_po_fine_enable),
.po_coarse_enable (C_po_coarse_enable),
.po_fine_inc (C_po_fine_inc),
.po_coarse_inc (C_po_coarse_inc),
.po_counter_load_en (C_po_counter_load_en),
.po_counter_read_en (C_po_counter_read_en),
.po_counter_load_val (C_po_counter_load_val),
.po_coarse_overflow (C_po_coarse_overflow),
.po_fine_overflow (C_po_fine_overflow),
.po_counter_read_val (C_po_counter_read_val),
.po_sel_fine_oclk_delay(C_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (C_pi_fine_enable),
.pi_fine_inc (C_pi_fine_inc),
.pi_counter_load_en (C_pi_counter_load_en),
.pi_counter_read_en (C_pi_counter_read_en),
.pi_counter_load_val (C_pi_counter_load_val),
.pi_fine_overflow (C_pi_fine_overflow),
.pi_counter_read_val (C_pi_counter_read_val),
.pi_iserdes_rst (C_pi_iserdes_rst),
.pi_phase_locked (C_pi_phase_locked),
.fine_delay (C_fine_delay),
.fine_delay_sel (C_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_C
assign C_of_a_full = 1'b0;
assign C_of_full = 1'b0;
assign C_pre_fifo_a_full = 1'b0;
assign C_if_empty = 1'b0;
assign C_byte_rd_en = 1'b1;
assign C_if_a_empty = 1'b0;
assign C_pi_phase_locked = 1;
assign C_pi_dqs_found = 1;
assign C_rclk = 0;
assign C_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign C_pi_counter_read_val = 0;
assign C_po_counter_read_val = 0;
assign C_pi_fine_overflow = 0;
assign C_po_coarse_overflow = 0;
assign C_po_fine_overflow = 0;
end
if ( BYTE_LANES[3] ) begin : ddr_byte_lane_D
assign phy_dout_remap[319:240] = part_select_80(phy_dout, (LANE_REMAP[13:12]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("D"),
.PO_DATA_CTL (PC_DATA_CTL_N[3] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[47:36]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[47:36]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (D_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (D_PI_BURST_MODE),
.PI_CLKOUT_DIV (D_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (D_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (D_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (D_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (D_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (D_PO_CLKOUT_DIV),
.PO_FINE_DELAY (D_PO_FINE_DELAY),
.PO_COARSE_BYPASS (D_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (D_PO_COARSE_DELAY),
.PO_OCLK_DELAY (D_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (D_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (D_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (D_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (D_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (D_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (D_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (D_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_D(
.mem_dq_out (mem_dq_out[47:36]),
.mem_dq_ts (mem_dq_ts[47:36]),
.mem_dq_in (mem_dq_in[39:30]),
.mem_dqs_out (mem_dqs_out[3]),
.mem_dqs_ts (mem_dqs_ts[3]),
.mem_dqs_in (mem_dqs_in[3]),
.rst (D_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (D_ddr_clk),
.rclk (D_rclk),
.pi_dqs_found (D_pi_dqs_found),
.dqs_out_of_range (D_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (D_if_a_empty),
.if_empty (D_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*D_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*D_of_empty*/),
.of_a_full (D_of_a_full),
.of_full (D_of_full),
.pre_fifo_a_full (D_pre_fifo_a_full),
.phy_din (phy_din_remap[319:240]),
.phy_dout (phy_dout_remap[319:240]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.idelay_inc (idelay_inc),
.idelay_ce (D_idelay_ce),
.idelay_ld (D_idelay_ld),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,C_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (D_byte_rd_en),
// calibration signals
.pi_rst_dqs_find (D_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (D_po_fine_enable),
.po_coarse_enable (D_po_coarse_enable),
.po_fine_inc (D_po_fine_inc),
.po_coarse_inc (D_po_coarse_inc),
.po_counter_load_en (D_po_counter_load_en),
.po_counter_read_en (D_po_counter_read_en),
.po_counter_load_val (D_po_counter_load_val),
.po_coarse_overflow (D_po_coarse_overflow),
.po_fine_overflow (D_po_fine_overflow),
.po_counter_read_val (D_po_counter_read_val),
.po_sel_fine_oclk_delay(D_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (D_pi_fine_enable),
.pi_fine_inc (D_pi_fine_inc),
.pi_counter_load_en (D_pi_counter_load_en),
.pi_counter_read_en (D_pi_counter_read_en),
.pi_counter_load_val (D_pi_counter_load_val),
.pi_fine_overflow (D_pi_fine_overflow),
.pi_counter_read_val (D_pi_counter_read_val),
.pi_iserdes_rst (D_pi_iserdes_rst),
.pi_phase_locked (D_pi_phase_locked),
.fine_delay (D_fine_delay),
.fine_delay_sel (D_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_D
assign D_of_a_full = 1'b0;
assign D_of_full = 1'b0;
assign D_pre_fifo_a_full = 1'b0;
assign D_if_empty = 1'b0;
assign D_byte_rd_en = 1'b1;
assign D_if_a_empty = 1'b0;
assign D_rclk = 0;
assign D_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign D_pi_dqs_found = 1;
assign D_pi_phase_locked = 1;
assign D_pi_counter_read_val = 0;
assign D_po_counter_read_val = 0;
assign D_pi_fine_overflow = 0;
assign D_po_coarse_overflow = 0;
assign D_po_fine_overflow = 0;
end
endgenerate
assign phaser_ctl_bus[MSB_RANK_SEL_I : MSB_RANK_SEL_I - 7] = in_rank;
PHY_CONTROL #(
.AO_WRLVL_EN ( PC_AO_WRLVL_EN),
.AO_TOGGLE ( PC_AO_TOGGLE),
.BURST_MODE ( PC_BURST_MODE),
.CO_DURATION ( PC_CO_DURATION ),
.CLK_RATIO ( PC_CLK_RATIO),
.DATA_CTL_A_N ( PC_DATA_CTL_A),
.DATA_CTL_B_N ( PC_DATA_CTL_B),
.DATA_CTL_C_N ( PC_DATA_CTL_C),
.DATA_CTL_D_N ( PC_DATA_CTL_D),
.DI_DURATION ( PC_DI_DURATION ),
.DO_DURATION ( PC_DO_DURATION ),
.EVENTS_DELAY ( PC_EVENTS_DELAY),
.FOUR_WINDOW_CLOCKS ( PC_FOUR_WINDOW_CLOCKS),
.MULTI_REGION ( PC_MULTI_REGION ),
.PHY_COUNT_ENABLE ( PC_PHY_COUNT_EN),
.DISABLE_SEQ_MATCH ( PC_DISABLE_SEQ_MATCH),
.SYNC_MODE ( PC_SYNC_MODE),
.CMD_OFFSET ( PC_CMD_OFFSET),
.RD_CMD_OFFSET_0 ( PC_RD_CMD_OFFSET_0),
.RD_CMD_OFFSET_1 ( PC_RD_CMD_OFFSET_1),
.RD_CMD_OFFSET_2 ( PC_RD_CMD_OFFSET_2),
.RD_CMD_OFFSET_3 ( PC_RD_CMD_OFFSET_3),
.RD_DURATION_0 ( PC_RD_DURATION_0),
.RD_DURATION_1 ( PC_RD_DURATION_1),
.RD_DURATION_2 ( PC_RD_DURATION_2),
.RD_DURATION_3 ( PC_RD_DURATION_3),
.WR_CMD_OFFSET_0 ( PC_WR_CMD_OFFSET_0),
.WR_CMD_OFFSET_1 ( PC_WR_CMD_OFFSET_1),
.WR_CMD_OFFSET_2 ( PC_WR_CMD_OFFSET_2),
.WR_CMD_OFFSET_3 ( PC_WR_CMD_OFFSET_3),
.WR_DURATION_0 ( PC_WR_DURATION_0),
.WR_DURATION_1 ( PC_WR_DURATION_1),
.WR_DURATION_2 ( PC_WR_DURATION_2),
.WR_DURATION_3 ( PC_WR_DURATION_3)
) phy_control_i (
.AUXOUTPUT (aux_out),
.INBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PI:MSB_BURST_PEND_PI-3]),
.INRANKA (in_rank[1:0]),
.INRANKB (in_rank[3:2]),
.INRANKC (in_rank[5:4]),
.INRANKD (in_rank[7:6]),
.OUTBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PO:MSB_BURST_PEND_PO-3]),
.PCENABLECALIB (phy_encalib),
.PHYCTLALMOSTFULL (phy_ctl_a_full),
.PHYCTLEMPTY (phy_ctl_empty),
.PHYCTLFULL (phy_ctl_full),
.PHYCTLREADY (phy_ctl_ready),
.MEMREFCLK (mem_refclk),
.PHYCLK (phy_ctl_clk),
.PHYCTLMSTREMPTY (phy_ctl_mstr_empty),
.PHYCTLWD (_phy_ctl_wd),
.PHYCTLWRENABLE (phy_ctl_wr),
.PLLLOCK (pll_lock),
.REFDLLLOCK (ref_dll_lock), // is reset while !locked
.RESET (rst),
.SYNCIN (sync_pulse),
.READCALIBENABLE (phy_read_calib),
.WRITECALIBENABLE (phy_write_calib)
`ifdef USE_PHY_CONTROL_TEST
, .TESTINPUT (16'b0),
.TESTOUTPUT (test_output),
.TESTSELECT (test_select),
.SCANENABLEN (scan_enable)
`endif
);
// register outputs to give extra slack in timing
always @(posedge phy_clk ) begin
case (calib_sel[1:0])
2'h0: begin
po_coarse_overflow <= #1 A_po_coarse_overflow;
po_fine_overflow <= #1 A_po_fine_overflow;
po_counter_read_val <= #1 A_po_counter_read_val;
pi_fine_overflow <= #1 A_pi_fine_overflow;
pi_counter_read_val<= #1 A_pi_counter_read_val;
pi_phase_locked <= #1 A_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 A_pi_dqs_found;
pi_dqs_out_of_range <= #1 A_pi_dqs_out_of_range;
end
2'h1: begin
po_coarse_overflow <= #1 B_po_coarse_overflow;
po_fine_overflow <= #1 B_po_fine_overflow;
po_counter_read_val <= #1 B_po_counter_read_val;
pi_fine_overflow <= #1 B_pi_fine_overflow;
pi_counter_read_val <= #1 B_pi_counter_read_val;
pi_phase_locked <= #1 B_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 B_pi_dqs_found;
pi_dqs_out_of_range <= #1 B_pi_dqs_out_of_range;
end
2'h2: begin
po_coarse_overflow <= #1 C_po_coarse_overflow;
po_fine_overflow <= #1 C_po_fine_overflow;
po_counter_read_val <= #1 C_po_counter_read_val;
pi_fine_overflow <= #1 C_pi_fine_overflow;
pi_counter_read_val <= #1 C_pi_counter_read_val;
pi_phase_locked <= #1 C_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 C_pi_dqs_found;
pi_dqs_out_of_range <= #1 C_pi_dqs_out_of_range;
end
2'h3: begin
po_coarse_overflow <= #1 D_po_coarse_overflow;
po_fine_overflow <= #1 D_po_fine_overflow;
po_counter_read_val <= #1 D_po_counter_read_val;
pi_fine_overflow <= #1 D_pi_fine_overflow;
pi_counter_read_val <= #1 D_pi_counter_read_val;
pi_phase_locked <= #1 D_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 D_pi_dqs_found;
pi_dqs_out_of_range <= #1 D_pi_dqs_out_of_range;
end
default: begin
po_coarse_overflow <= po_coarse_overflow;
end
endcase
end
wire B_mux_ctrl;
wire C_mux_ctrl;
wire D_mux_ctrl;
generate
if (HIGHEST_LANE > 1)
assign B_mux_ctrl = ( !calib_zero_lanes[1] && ( ! calib_zero_ctrl || DATA_CTL_N[1]));
else
assign B_mux_ctrl = 0;
if (HIGHEST_LANE > 2)
assign C_mux_ctrl = ( !calib_zero_lanes[2] && (! calib_zero_ctrl || DATA_CTL_N[2]));
else
assign C_mux_ctrl = 0;
if (HIGHEST_LANE > 3)
assign D_mux_ctrl = ( !calib_zero_lanes[3] && ( ! calib_zero_ctrl || DATA_CTL_N[3]));
else
assign D_mux_ctrl = 0;
endgenerate
always @(*) begin
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
if ( calib_sel[2]) begin
// if this is asserted, all calib signals are deasserted
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
end else
if (calib_in_common) begin
// if this is asserted, each signal is broadcast to all phasers
// in common
if ( !calib_zero_lanes[0] && (! calib_zero_ctrl || DATA_CTL_N[0])) begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
if ( B_mux_ctrl) begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
if ( C_mux_ctrl) begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
if ( D_mux_ctrl) begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_read_en = po_counter_read_en;
D_po_counter_load_val = po_counter_load_val;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
end
else begin
// otherwise, only a single phaser is selected
case (calib_sel[1:0])
0: begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
1: begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
2: begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
3: begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_load_val = po_counter_load_val;
D_po_counter_read_en = po_counter_read_en;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
endcase
end
end
//obligatory phaser-ref
PHASER_REF phaser_ref_i(
.LOCKED (ref_dll_lock),
.CLKIN (freq_refclk),
.PWRDWN (1'b0),
.RST ( ! pll_lock)
);
// optional idelay_ctrl
generate
if ( GENERATE_IDELAYCTRL == "TRUE")
IDELAYCTRL idelayctrl (
.RDY (/*idelayctrl_rdy*/),
.REFCLK (idelayctrl_refclk),
.RST (rst)
);
endgenerate
endmodule
|
module mig_7series_v2_3_ddr_phy_4lanes #(
parameter GENERATE_IDELAYCTRL = "TRUE",
parameter IODELAY_GRP = "IODELAY_MIG",
parameter FPGA_SPEED_GRADE = 1,
parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO"
parameter BYTELANES_DDR_CK = 24'b0010_0010_0010_0010_0010_0010,
parameter NUM_DDR_CK = 1,
// next three parameter fields correspond to byte lanes for lane order DCBA
parameter BYTE_LANES = 4'b1111, // lane existence, one per lane
parameter DATA_CTL_N = 4'b1111, // data or control, per lane
parameter BITLANES = 48'hffff_ffff_ffff,
parameter BITLANES_OUTONLY = 48'h0000_0000_0000,
parameter LANE_REMAP = 16'h3210,// 4-bit index
// used to rewire to one of four
// input/output buss lanes
// example: 0321 remaps lanes as:
// D->A
// C->D
// B->C
// A->B
parameter LAST_BANK = "FALSE",
parameter USE_PRE_POST_FIFO = "FALSE",
parameter RCLK_SELECT_LANE = "B",
parameter real TCK = 0.00,
parameter SYNTHESIS = "FALSE",
parameter PO_CTL_COARSE_BYPASS = "FALSE",
parameter PO_FINE_DELAY = 0,
parameter PI_SEL_CLK_OFFSET = 0,
// phy_control paramter used in other paramsters
parameter PC_CLK_RATIO = 4,
//phaser_in parameters
parameter A_PI_FREQ_REF_DIV = "NONE",
parameter A_PI_CLKOUT_DIV = 2,
parameter A_PI_BURST_MODE = "TRUE",
parameter A_PI_OUTPUT_CLK_SRC = "DELAYED_REF" , //"DELAYED_REF",
parameter A_PI_FINE_DELAY = 60,
parameter A_PI_SYNC_IN_DIV_RST = "TRUE",
parameter B_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter B_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter B_PI_BURST_MODE = A_PI_BURST_MODE,
parameter B_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter B_PI_FINE_DELAY = A_PI_FINE_DELAY,
parameter B_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter C_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter C_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter C_PI_BURST_MODE = A_PI_BURST_MODE,
parameter C_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter C_PI_FINE_DELAY = 0,
parameter C_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
parameter D_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV,
parameter D_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV,
parameter D_PI_BURST_MODE = A_PI_BURST_MODE,
parameter D_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC,
parameter D_PI_FINE_DELAY = 0,
parameter D_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST,
//phaser_out parameters
parameter A_PO_CLKOUT_DIV = (DATA_CTL_N[0] == 0) ? PC_CLK_RATIO : 2,
parameter A_PO_FINE_DELAY = PO_FINE_DELAY,
parameter A_PO_COARSE_DELAY = 0,
parameter A_PO_OCLK_DELAY = 0,
parameter A_PO_OCLKDELAY_INV = "FALSE",
parameter A_PO_OUTPUT_CLK_SRC = "DELAYED_REF",
parameter A_PO_SYNC_IN_DIV_RST = "TRUE",
//parameter A_PO_SYNC_IN_DIV_RST = "FALSE",
parameter B_PO_CLKOUT_DIV = (DATA_CTL_N[1] == 0) ? PC_CLK_RATIO : 2,
parameter B_PO_FINE_DELAY = PO_FINE_DELAY,
parameter B_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter B_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter B_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter B_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter B_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter C_PO_CLKOUT_DIV = (DATA_CTL_N[2] == 0) ? PC_CLK_RATIO : 2,
parameter C_PO_FINE_DELAY = PO_FINE_DELAY,
parameter C_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter C_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter C_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter C_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter C_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter D_PO_CLKOUT_DIV = (DATA_CTL_N[3] == 0) ? PC_CLK_RATIO : 2,
parameter D_PO_FINE_DELAY = PO_FINE_DELAY,
parameter D_PO_COARSE_DELAY = A_PO_COARSE_DELAY,
parameter D_PO_OCLK_DELAY = A_PO_OCLK_DELAY,
parameter D_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV,
parameter D_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC,
parameter D_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST,
parameter A_IDELAYE2_IDELAY_TYPE = "VARIABLE",
parameter A_IDELAYE2_IDELAY_VALUE = 00,
parameter B_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter B_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter C_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter C_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
parameter D_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE,
parameter D_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE,
// phy_control parameters
parameter PC_BURST_MODE = "TRUE",
parameter PC_DATA_CTL_N = DATA_CTL_N,
parameter PC_CMD_OFFSET = 0,
parameter PC_RD_CMD_OFFSET_0 = 0,
parameter PC_RD_CMD_OFFSET_1 = 0,
parameter PC_RD_CMD_OFFSET_2 = 0,
parameter PC_RD_CMD_OFFSET_3 = 0,
parameter PC_CO_DURATION = 1,
parameter PC_DI_DURATION = 1,
parameter PC_DO_DURATION = 1,
parameter PC_RD_DURATION_0 = 0,
parameter PC_RD_DURATION_1 = 0,
parameter PC_RD_DURATION_2 = 0,
parameter PC_RD_DURATION_3 = 0,
parameter PC_WR_CMD_OFFSET_0 = 5,
parameter PC_WR_CMD_OFFSET_1 = 5,
parameter PC_WR_CMD_OFFSET_2 = 5,
parameter PC_WR_CMD_OFFSET_3 = 5,
parameter PC_WR_DURATION_0 = 6,
parameter PC_WR_DURATION_1 = 6,
parameter PC_WR_DURATION_2 = 6,
parameter PC_WR_DURATION_3 = 6,
parameter PC_AO_WRLVL_EN = 0,
parameter PC_AO_TOGGLE = 4'b0101, // odd bits are toggle (CKE)
parameter PC_FOUR_WINDOW_CLOCKS = 63,
parameter PC_EVENTS_DELAY = 18,
parameter PC_PHY_COUNT_EN = "TRUE",
parameter PC_SYNC_MODE = "TRUE",
parameter PC_DISABLE_SEQ_MATCH = "TRUE",
parameter PC_MULTI_REGION = "FALSE",
// io fifo parameters
parameter A_OF_ARRAY_MODE = (DATA_CTL_N[0] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter B_OF_ARRAY_MODE = (DATA_CTL_N[1] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter C_OF_ARRAY_MODE = (DATA_CTL_N[2] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter D_OF_ARRAY_MODE = (DATA_CTL_N[3] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4",
parameter OF_ALMOST_EMPTY_VALUE = 1,
parameter OF_ALMOST_FULL_VALUE = 1,
parameter OF_OUTPUT_DISABLE = "TRUE",
parameter OF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
parameter A_OS_DATA_RATE = "DDR",
parameter A_OS_DATA_WIDTH = 4,
parameter B_OS_DATA_RATE = A_OS_DATA_RATE,
parameter B_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter C_OS_DATA_RATE = A_OS_DATA_RATE,
parameter C_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter D_OS_DATA_RATE = A_OS_DATA_RATE,
parameter D_OS_DATA_WIDTH = A_OS_DATA_WIDTH,
parameter A_IF_ARRAY_MODE = "ARRAY_MODE_4_X_8",
parameter B_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter C_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter D_IF_ARRAY_MODE = A_IF_ARRAY_MODE,
parameter IF_ALMOST_EMPTY_VALUE = 1,
parameter IF_ALMOST_FULL_VALUE = 1,
parameter IF_SYNCHRONOUS_MODE = PC_SYNC_MODE,
// this is used locally, not for external pushdown
// NOTE: the 0+ is needed in each to coerce to integer for addition.
// otherwise 4x 1'b values are added producing a 1'b value.
parameter HIGHEST_LANE = LAST_BANK == "FALSE" ? 4 : (BYTE_LANES[3] ? 4 : BYTE_LANES[2] ? 3 : BYTE_LANES[1] ? 2 : 1),
parameter N_CTL_LANES = ((0+(!DATA_CTL_N[0]) & BYTE_LANES[0]) + (0+(!DATA_CTL_N[1]) & BYTE_LANES[1]) + (0+(!DATA_CTL_N[2]) & BYTE_LANES[2]) + (0+(!DATA_CTL_N[3]) & BYTE_LANES[3])),
parameter N_BYTE_LANES = (0+BYTE_LANES[0]) + (0+BYTE_LANES[1]) + (0+BYTE_LANES[2]) + (0+BYTE_LANES[3]),
parameter N_DATA_LANES = N_BYTE_LANES - N_CTL_LANES,
// assume odt per rank + any declared cke's
parameter AUXOUT_WIDTH = 4,
parameter LP_DDR_CK_WIDTH = 2
,parameter CKE_ODT_AUX = "FALSE"
)
(
//`include "phy.vh"
input rst,
input phy_clk,
input phy_ctl_clk,
input freq_refclk,
input mem_refclk,
input mem_refclk_div4,
input pll_lock,
input sync_pulse,
input idelayctrl_refclk,
input [HIGHEST_LANE*80-1:0] phy_dout,
input phy_cmd_wr_en,
input phy_data_wr_en,
input phy_rd_en,
input phy_ctl_mstr_empty,
input [31:0] phy_ctl_wd,
input [`PC_DATA_OFFSET_RANGE] data_offset,
input phy_ctl_wr,
input if_empty_def,
input phyGo,
input input_sink,
output [(LP_DDR_CK_WIDTH*24)-1:0] ddr_clk, // to memory
output rclk,
output if_a_empty,
output if_empty,
output byte_rd_en,
output if_empty_or,
output if_empty_and,
output of_ctl_a_full,
output of_data_a_full,
output of_ctl_full,
output of_data_full,
output pre_data_a_full,
output [HIGHEST_LANE*80-1:0]phy_din, // assume input bus same size as output bus
output phy_ctl_empty,
output phy_ctl_a_full,
output phy_ctl_full,
output [HIGHEST_LANE*12-1:0]mem_dq_out,
output [HIGHEST_LANE*12-1:0]mem_dq_ts,
input [HIGHEST_LANE*10-1:0]mem_dq_in,
output [HIGHEST_LANE-1:0] mem_dqs_out,
output [HIGHEST_LANE-1:0] mem_dqs_ts,
input [HIGHEST_LANE-1:0] mem_dqs_in,
input [1:0] byte_rd_en_oth_banks,
output [AUXOUT_WIDTH-1:0] aux_out,
output reg rst_out = 0,
output reg mcGo=0,
output phy_ctl_ready,
output ref_dll_lock,
input if_rst,
input phy_read_calib,
input phy_write_calib,
input idelay_inc,
input idelay_ce,
input idelay_ld,
input [2:0] calib_sel,
input calib_zero_ctrl,
input [HIGHEST_LANE-1:0] calib_zero_lanes,
input calib_in_common,
input po_fine_enable,
input po_coarse_enable,
input po_fine_inc,
input po_coarse_inc,
input po_counter_load_en,
input po_counter_read_en,
input [8:0] po_counter_load_val,
input po_sel_fine_oclk_delay,
output reg po_coarse_overflow,
output reg po_fine_overflow,
output reg [8:0] po_counter_read_val,
input pi_rst_dqs_find,
input pi_fine_enable,
input pi_fine_inc,
input pi_counter_load_en,
input pi_counter_read_en,
input [5:0] pi_counter_load_val,
output reg pi_fine_overflow,
output reg [5:0] pi_counter_read_val,
output reg pi_dqs_found,
output pi_dqs_found_all,
output pi_dqs_found_any,
output [HIGHEST_LANE-1:0] pi_phase_locked_lanes,
output [HIGHEST_LANE-1:0] pi_dqs_found_lanes,
output reg pi_dqs_out_of_range,
output reg pi_phase_locked,
output pi_phase_locked_all,
input [29:0] fine_delay,
input fine_delay_sel
);
localparam DATA_CTL_A = (~DATA_CTL_N[0]);
localparam DATA_CTL_B = (~DATA_CTL_N[1]);
localparam DATA_CTL_C = (~DATA_CTL_N[2]);
localparam DATA_CTL_D = (~DATA_CTL_N[3]);
localparam PRESENT_CTL_A = BYTE_LANES[0] && ! DATA_CTL_N[0];
localparam PRESENT_CTL_B = BYTE_LANES[1] && ! DATA_CTL_N[1];
localparam PRESENT_CTL_C = BYTE_LANES[2] && ! DATA_CTL_N[2];
localparam PRESENT_CTL_D = BYTE_LANES[3] && ! DATA_CTL_N[3];
localparam PRESENT_DATA_A = BYTE_LANES[0] && DATA_CTL_N[0];
localparam PRESENT_DATA_B = BYTE_LANES[1] && DATA_CTL_N[1];
localparam PRESENT_DATA_C = BYTE_LANES[2] && DATA_CTL_N[2];
localparam PRESENT_DATA_D = BYTE_LANES[3] && DATA_CTL_N[3];
localparam PC_DATA_CTL_A = (DATA_CTL_A) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_B = (DATA_CTL_B) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_C = (DATA_CTL_C) ? "FALSE" : "TRUE";
localparam PC_DATA_CTL_D = (DATA_CTL_D) ? "FALSE" : "TRUE";
localparam A_PO_COARSE_BYPASS = (DATA_CTL_A) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam B_PO_COARSE_BYPASS = (DATA_CTL_B) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam C_PO_COARSE_BYPASS = (DATA_CTL_C) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam D_PO_COARSE_BYPASS = (DATA_CTL_D) ? PO_CTL_COARSE_BYPASS : "FALSE";
localparam IO_A_START = 41;
localparam IO_A_END = 40;
localparam IO_B_START = 43;
localparam IO_B_END = 42;
localparam IO_C_START = 45;
localparam IO_C_END = 44;
localparam IO_D_START = 47;
localparam IO_D_END = 46;
localparam IO_A_X_START = (HIGHEST_LANE * 10) + 1;
localparam IO_A_X_END = (IO_A_X_START-1);
localparam IO_B_X_START = (IO_A_X_START + 2);
localparam IO_B_X_END = (IO_B_X_START -1);
localparam IO_C_X_START = (IO_B_X_START + 2);
localparam IO_C_X_END = (IO_C_X_START -1);
localparam IO_D_X_START = (IO_C_X_START + 2);
localparam IO_D_X_END = (IO_D_X_START -1);
localparam MSB_BURST_PEND_PO = 3;
localparam MSB_BURST_PEND_PI = 7;
localparam MSB_RANK_SEL_I = MSB_BURST_PEND_PI + 8;
localparam PHASER_CTL_BUS_WIDTH = MSB_RANK_SEL_I + 1;
wire [1:0] oserdes_dqs;
wire [1:0] oserdes_dqs_ts;
wire [1:0] oserdes_dq_ts;
wire [PHASER_CTL_BUS_WIDTH-1:0] phaser_ctl_bus;
wire [7:0] in_rank;
wire [11:0] IO_A;
wire [11:0] IO_B;
wire [11:0] IO_C;
wire [11:0] IO_D;
wire [319:0] phy_din_remap;
reg A_po_counter_read_en;
wire [8:0] A_po_counter_read_val;
reg A_pi_counter_read_en;
wire [5:0] A_pi_counter_read_val;
wire A_pi_fine_overflow;
wire A_po_coarse_overflow;
wire A_po_fine_overflow;
wire A_pi_dqs_found;
wire A_pi_dqs_out_of_range;
wire A_pi_phase_locked;
wire A_pi_iserdes_rst;
reg A_pi_fine_enable;
reg A_pi_fine_inc;
reg A_pi_counter_load_en;
reg [5:0] A_pi_counter_load_val;
reg A_pi_rst_dqs_find;
reg A_po_fine_enable;
reg A_po_coarse_enable;
reg A_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg A_po_sel_fine_oclk_delay;
reg A_po_coarse_inc;
reg A_po_counter_load_en;
reg [8:0] A_po_counter_load_val;
wire A_rclk;
reg A_idelay_ce;
reg A_idelay_ld;
reg [29:0] A_fine_delay;
reg A_fine_delay_sel;
reg B_po_counter_read_en;
wire [8:0] B_po_counter_read_val;
reg B_pi_counter_read_en;
wire [5:0] B_pi_counter_read_val;
wire B_pi_fine_overflow;
wire B_po_coarse_overflow;
wire B_po_fine_overflow;
wire B_pi_phase_locked;
wire B_pi_iserdes_rst;
wire B_pi_dqs_found;
wire B_pi_dqs_out_of_range;
reg B_pi_fine_enable;
reg B_pi_fine_inc;
reg B_pi_counter_load_en;
reg [5:0] B_pi_counter_load_val;
reg B_pi_rst_dqs_find;
reg B_po_fine_enable;
reg B_po_coarse_enable;
reg B_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg B_po_coarse_inc;
reg B_po_sel_fine_oclk_delay;
reg B_po_counter_load_en;
reg [8:0] B_po_counter_load_val;
wire B_rclk;
reg B_idelay_ce;
reg B_idelay_ld;
reg [29:0] B_fine_delay;
reg B_fine_delay_sel;
reg C_pi_fine_inc;
reg D_pi_fine_inc;
reg C_pi_fine_enable;
reg D_pi_fine_enable;
reg C_po_counter_load_en;
reg D_po_counter_load_en;
reg C_po_coarse_inc;
reg D_po_coarse_inc;
reg C_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg D_po_fine_inc /* synthesis syn_maxfan = 3 */;
reg C_po_sel_fine_oclk_delay;
reg D_po_sel_fine_oclk_delay;
reg [5:0] C_pi_counter_load_val;
reg [5:0] D_pi_counter_load_val;
reg [8:0] C_po_counter_load_val;
reg [8:0] D_po_counter_load_val;
reg C_po_coarse_enable;
reg D_po_coarse_enable;
reg C_po_fine_enable;
reg D_po_fine_enable;
wire C_po_coarse_overflow;
wire D_po_coarse_overflow;
wire C_po_fine_overflow;
wire D_po_fine_overflow;
wire [8:0] C_po_counter_read_val;
wire [8:0] D_po_counter_read_val;
reg C_po_counter_read_en;
reg D_po_counter_read_en;
wire C_pi_dqs_found;
wire D_pi_dqs_found;
wire C_pi_fine_overflow;
wire D_pi_fine_overflow;
reg C_pi_counter_read_en;
reg D_pi_counter_read_en;
reg C_pi_counter_load_en;
reg D_pi_counter_load_en;
wire C_pi_phase_locked;
wire C_pi_iserdes_rst;
wire D_pi_phase_locked;
wire D_pi_iserdes_rst;
wire C_pi_dqs_out_of_range;
wire D_pi_dqs_out_of_range;
wire [5:0] C_pi_counter_read_val;
wire [5:0] D_pi_counter_read_val;
wire C_rclk;
wire D_rclk;
reg C_idelay_ce;
reg D_idelay_ce;
reg C_idelay_ld;
reg D_idelay_ld;
reg C_pi_rst_dqs_find;
reg D_pi_rst_dqs_find;
reg [29:0] C_fine_delay;
reg [29:0] D_fine_delay;
reg C_fine_delay_sel;
reg D_fine_delay_sel;
wire pi_iserdes_rst;
wire A_if_empty;
wire B_if_empty;
wire C_if_empty;
wire D_if_empty;
wire A_byte_rd_en;
wire B_byte_rd_en;
wire C_byte_rd_en;
wire D_byte_rd_en;
wire A_if_a_empty;
wire B_if_a_empty;
wire C_if_a_empty;
wire D_if_a_empty;
//wire A_if_full;
//wire B_if_full;
//wire C_if_full;
//wire D_if_full;
//wire A_of_empty;
//wire B_of_empty;
//wire C_of_empty;
//wire D_of_empty;
wire A_of_full;
wire B_of_full;
wire C_of_full;
wire D_of_full;
wire A_of_ctl_full;
wire B_of_ctl_full;
wire C_of_ctl_full;
wire D_of_ctl_full;
wire A_of_data_full;
wire B_of_data_full;
wire C_of_data_full;
wire D_of_data_full;
wire A_of_a_full;
wire B_of_a_full;
wire C_of_a_full;
wire D_of_a_full;
wire A_pre_fifo_a_full;
wire B_pre_fifo_a_full;
wire C_pre_fifo_a_full;
wire D_pre_fifo_a_full;
wire A_of_ctl_a_full;
wire B_of_ctl_a_full;
wire C_of_ctl_a_full;
wire D_of_ctl_a_full;
wire A_of_data_a_full;
wire B_of_data_a_full;
wire C_of_data_a_full;
wire D_of_data_a_full;
wire A_pre_data_a_full;
wire B_pre_data_a_full;
wire C_pre_data_a_full;
wire D_pre_data_a_full;
wire [LP_DDR_CK_WIDTH*6-1:0] A_ddr_clk; // for generation
wire [LP_DDR_CK_WIDTH*6-1:0] B_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] C_ddr_clk; //
wire [LP_DDR_CK_WIDTH*6-1:0] D_ddr_clk; //
wire [3:0] dummy_data;
wire [31:0] _phy_ctl_wd;
wire [1:0] phy_encalib;
assign pi_dqs_found_all =
(! PRESENT_DATA_A | A_pi_dqs_found) &
(! PRESENT_DATA_B | B_pi_dqs_found) &
(! PRESENT_DATA_C | C_pi_dqs_found) &
(! PRESENT_DATA_D | D_pi_dqs_found) ;
assign pi_dqs_found_any =
( PRESENT_DATA_A & A_pi_dqs_found) |
( PRESENT_DATA_B & B_pi_dqs_found) |
( PRESENT_DATA_C & C_pi_dqs_found) |
( PRESENT_DATA_D & D_pi_dqs_found) ;
assign pi_phase_locked_all =
(! PRESENT_DATA_A | A_pi_phase_locked) &
(! PRESENT_DATA_B | B_pi_phase_locked) &
(! PRESENT_DATA_C | C_pi_phase_locked) &
(! PRESENT_DATA_D | D_pi_phase_locked);
wire dangling_inputs = (& dummy_data) & input_sink & 1'b0; // this reduces all constant 0 values to 1 signal
// which is combined into another signals such that
// the other signal isn't changed. The purpose
// is to fake the tools into ignoring dangling inputs.
// Because it is anded with 1'b0, the contributing signals
// are folded as constants or trimmed.
assign if_empty = !if_empty_def ? (A_if_empty | B_if_empty | C_if_empty | D_if_empty) : (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign byte_rd_en = !if_empty_def ? (A_byte_rd_en & B_byte_rd_en & C_byte_rd_en & D_byte_rd_en) :
(A_byte_rd_en | B_byte_rd_en | C_byte_rd_en | D_byte_rd_en);
assign if_empty_or = (A_if_empty | B_if_empty | C_if_empty | D_if_empty);
assign if_empty_and = (A_if_empty & B_if_empty & C_if_empty & D_if_empty);
assign if_a_empty = A_if_a_empty | B_if_a_empty | C_if_a_empty | D_if_a_empty;
//assign if_full = A_if_full | B_if_full | C_if_full | D_if_full ;
//assign of_empty = A_of_empty & B_of_empty & C_of_empty & D_of_empty;
assign of_ctl_full = A_of_ctl_full | B_of_ctl_full | C_of_ctl_full | D_of_ctl_full ;
assign of_data_full = A_of_data_full | B_of_data_full | C_of_data_full | D_of_data_full ;
assign of_ctl_a_full = A_of_ctl_a_full | B_of_ctl_a_full | C_of_ctl_a_full | D_of_ctl_a_full ;
assign of_data_a_full = A_of_data_a_full | B_of_data_a_full | C_of_data_a_full | D_of_data_a_full | dangling_inputs ;
assign pre_data_a_full = A_pre_data_a_full | B_pre_data_a_full | C_pre_data_a_full | D_pre_data_a_full;
function [79:0] part_select_80;
input [319:0] vector;
input [1:0] select;
begin
case (select)
2'b00 : part_select_80[79:0] = vector[1*80-1:0*80];
2'b01 : part_select_80[79:0] = vector[2*80-1:1*80];
2'b10 : part_select_80[79:0] = vector[3*80-1:2*80];
2'b11 : part_select_80[79:0] = vector[4*80-1:3*80];
endcase
end
endfunction
wire [319:0] phy_dout_remap;
reg rst_out_trig = 1'b0;
reg [31:0] rclk_delay;
reg rst_edge1 = 1'b0;
reg rst_edge2 = 1'b0;
reg rst_edge3 = 1'b0;
reg rst_edge_detect = 1'b0;
wire rclk_;
reg rst_out_start = 1'b0 ;
reg rst_primitives=0;
reg A_rst_primitives=0;
reg B_rst_primitives=0;
reg C_rst_primitives=0;
reg D_rst_primitives=0;
`ifdef USE_PHY_CONTROL_TEST
wire [15:0] test_output;
wire [15:0] test_input;
wire [2:0] test_select=0;
wire scan_enable = 0;
`endif
generate
genvar i;
if (RCLK_SELECT_LANE == "A") begin
assign rclk_ = A_rclk;
assign pi_iserdes_rst = A_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "B") begin
assign rclk_ = B_rclk;
assign pi_iserdes_rst = B_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "C") begin
assign rclk_ = C_rclk;
assign pi_iserdes_rst = C_pi_iserdes_rst;
end
else if (RCLK_SELECT_LANE == "D") begin
assign rclk_ = D_rclk;
assign pi_iserdes_rst = D_pi_iserdes_rst;
end
else begin
assign rclk_ = B_rclk; // default
end
endgenerate
assign ddr_clk[LP_DDR_CK_WIDTH*6-1:0] = A_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*12-1:LP_DDR_CK_WIDTH*6] = B_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*18-1:LP_DDR_CK_WIDTH*12] = C_ddr_clk;
assign ddr_clk[LP_DDR_CK_WIDTH*24-1:LP_DDR_CK_WIDTH*18] = D_ddr_clk;
assign pi_phase_locked_lanes =
{(! PRESENT_DATA_A[0] | A_pi_phase_locked),
(! PRESENT_DATA_B[0] | B_pi_phase_locked) ,
(! PRESENT_DATA_C[0] | C_pi_phase_locked) ,
(! PRESENT_DATA_D[0] | D_pi_phase_locked)};
assign pi_dqs_found_lanes = {D_pi_dqs_found, C_pi_dqs_found, B_pi_dqs_found, A_pi_dqs_found};
// this block scrubs X from rclk_delay[11]
reg rclk_delay_11;
always @(rclk_delay[11]) begin : rclk_delay_11_blk
if ( rclk_delay[11])
rclk_delay_11 = 1;
else
rclk_delay_11 = 0;
end
always @(posedge phy_clk or posedge rst ) begin
// scrub 4-state values from rclk_delay[11]
if ( rst) begin
rst_out <= #1 0;
end
else begin
if ( rclk_delay_11)
rst_out <= #1 1;
end
end
always @(posedge phy_clk ) begin
// phy_ctl_ready drives reset of the system
rst_primitives <= !phy_ctl_ready ;
A_rst_primitives <= rst_primitives ;
B_rst_primitives <= rst_primitives ;
C_rst_primitives <= rst_primitives ;
D_rst_primitives <= rst_primitives ;
rclk_delay <= #1 (rclk_delay << 1) | (!rst_primitives && phyGo);
mcGo <= #1 rst_out ;
end
generate
if (BYTE_LANES[0]) begin
assign dummy_data[0] = 0;
end
else begin
assign dummy_data[0] = &phy_dout_remap[1*80-1:0*80];
end
if (BYTE_LANES[1]) begin
assign dummy_data[1] = 0;
end
else begin
assign dummy_data[1] = &phy_dout_remap[2*80-1:1*80];
end
if (BYTE_LANES[2]) begin
assign dummy_data[2] = 0;
end
else begin
assign dummy_data[2] = &phy_dout_remap[3*80-1:2*80];
end
if (BYTE_LANES[3]) begin
assign dummy_data[3] = 0;
end
else begin
assign dummy_data[3] = &phy_dout_remap[4*80-1:3*80];
end
if (PRESENT_DATA_A) begin
assign A_of_data_full = A_of_full;
assign A_of_ctl_full = 0;
assign A_of_data_a_full = A_of_a_full;
assign A_of_ctl_a_full = 0;
assign A_pre_data_a_full = A_pre_fifo_a_full;
end
else begin
assign A_of_ctl_full = A_of_full;
assign A_of_data_full = 0;
assign A_of_ctl_a_full = A_of_a_full;
assign A_of_data_a_full = 0;
assign A_pre_data_a_full = 0;
end
if (PRESENT_DATA_B) begin
assign B_of_data_full = B_of_full;
assign B_of_ctl_full = 0;
assign B_of_data_a_full = B_of_a_full;
assign B_of_ctl_a_full = 0;
assign B_pre_data_a_full = B_pre_fifo_a_full;
end
else begin
assign B_of_ctl_full = B_of_full;
assign B_of_data_full = 0;
assign B_of_ctl_a_full = B_of_a_full;
assign B_of_data_a_full = 0;
assign B_pre_data_a_full = 0;
end
if (PRESENT_DATA_C) begin
assign C_of_data_full = C_of_full;
assign C_of_ctl_full = 0;
assign C_of_data_a_full = C_of_a_full;
assign C_of_ctl_a_full = 0;
assign C_pre_data_a_full = C_pre_fifo_a_full;
end
else begin
assign C_of_ctl_full = C_of_full;
assign C_of_data_full = 0;
assign C_of_ctl_a_full = C_of_a_full;
assign C_of_data_a_full = 0;
assign C_pre_data_a_full = 0;
end
if (PRESENT_DATA_D) begin
assign D_of_data_full = D_of_full;
assign D_of_ctl_full = 0;
assign D_of_data_a_full = D_of_a_full;
assign D_of_ctl_a_full = 0;
assign D_pre_data_a_full = D_pre_fifo_a_full;
end
else begin
assign D_of_ctl_full = D_of_full;
assign D_of_data_full = 0;
assign D_of_ctl_a_full = D_of_a_full;
assign D_of_data_a_full = 0;
assign D_pre_data_a_full = 0;
end
// byte lane must exist and be data lane.
if (PRESENT_DATA_A )
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[79:0];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[79:0];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[79:0];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[79:0];
endcase
else
case ( LANE_REMAP[1:0] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_B )
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[159:80];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[159:80];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[159:80];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[159:80];
endcase
else
if (HIGHEST_LANE > 1)
case ( LANE_REMAP[5:4] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_C)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[239:160];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[239:160];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[239:160];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[239:160];
endcase
else
if (HIGHEST_LANE > 2)
case ( LANE_REMAP[9:8] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (PRESENT_DATA_D )
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[319:240];
2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[319:240];
2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[319:240];
2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[319:240];
endcase
else
if (HIGHEST_LANE > 3)
case ( LANE_REMAP[13:12] )
2'b00 : assign phy_din[1*80-1:0] = 80'h0;
2'b01 : assign phy_din[2*80-1:80] = 80'h0;
2'b10 : assign phy_din[3*80-1:160] = 80'h0;
2'b11 : assign phy_din[4*80-1:240] = 80'h0;
endcase
if (HIGHEST_LANE > 1)
assign _phy_ctl_wd = {phy_ctl_wd[31:23], data_offset, phy_ctl_wd[16:0]};
if (HIGHEST_LANE == 1)
assign _phy_ctl_wd = phy_ctl_wd;
//BUFR #(.BUFR_DIVIDE ("1")) rclk_buf(.I(rclk_), .O(rclk), .CE (1'b1), .CLR (pi_iserdes_rst));
BUFIO rclk_buf(.I(rclk_), .O(rclk) );
if ( BYTE_LANES[0] ) begin : ddr_byte_lane_A
assign phy_dout_remap[79:0] = part_select_80(phy_dout, (LANE_REMAP[1:0]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("A"),
.PO_DATA_CTL (PC_DATA_CTL_N[0] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[11:0]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[11:0]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (A_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (A_PI_BURST_MODE),
.PI_CLKOUT_DIV (A_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (A_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (A_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (A_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (A_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (A_PO_CLKOUT_DIV),
.PO_FINE_DELAY (A_PO_FINE_DELAY),
.PO_COARSE_BYPASS (A_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (A_PO_COARSE_DELAY),
.PO_OCLK_DELAY (A_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (A_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (A_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (A_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (A_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (A_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (A_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (A_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_A(
.mem_dq_out (mem_dq_out[11:0]),
.mem_dq_ts (mem_dq_ts[11:0]),
.mem_dq_in (mem_dq_in[9:0]),
.mem_dqs_out (mem_dqs_out[0]),
.mem_dqs_ts (mem_dqs_ts[0]),
.mem_dqs_in (mem_dqs_in[0]),
.rst (A_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (A_ddr_clk),
.rclk (A_rclk),
.pi_dqs_found (A_pi_dqs_found),
.dqs_out_of_range (A_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (A_if_a_empty),
.if_empty (A_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*A_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*A_of_empty*/),
.of_a_full (A_of_a_full),
.of_full (A_of_full),
.pre_fifo_a_full (A_pre_fifo_a_full),
.phy_din (phy_din_remap[79:0]),
.phy_dout (phy_dout_remap[79:0]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({B_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (A_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (A_idelay_ce),
.idelay_ld (A_idelay_ld),
.pi_rst_dqs_find (A_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (A_po_fine_enable),
.po_coarse_enable (A_po_coarse_enable),
.po_fine_inc (A_po_fine_inc),
.po_coarse_inc (A_po_coarse_inc),
.po_counter_load_en (A_po_counter_load_en),
.po_counter_read_en (A_po_counter_read_en),
.po_counter_load_val (A_po_counter_load_val),
.po_coarse_overflow (A_po_coarse_overflow),
.po_fine_overflow (A_po_fine_overflow),
.po_counter_read_val (A_po_counter_read_val),
.po_sel_fine_oclk_delay(A_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (A_pi_fine_enable),
.pi_fine_inc (A_pi_fine_inc),
.pi_counter_load_en (A_pi_counter_load_en),
.pi_counter_read_en (A_pi_counter_read_en),
.pi_counter_load_val (A_pi_counter_load_val),
.pi_fine_overflow (A_pi_fine_overflow),
.pi_counter_read_val (A_pi_counter_read_val),
.pi_iserdes_rst (A_pi_iserdes_rst),
.pi_phase_locked (A_pi_phase_locked),
.fine_delay (A_fine_delay),
.fine_delay_sel (A_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_A
assign A_of_a_full = 1'b0;
assign A_of_full = 1'b0;
assign A_pre_fifo_a_full = 1'b0;
assign A_if_empty = 1'b0;
assign A_byte_rd_en = 1'b1;
assign A_if_a_empty = 1'b0;
assign A_pi_phase_locked = 1;
assign A_pi_dqs_found = 1;
assign A_rclk = 0;
assign A_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign A_pi_counter_read_val = 0;
assign A_po_counter_read_val = 0;
assign A_pi_fine_overflow = 0;
assign A_po_coarse_overflow = 0;
assign A_po_fine_overflow = 0;
end
if ( BYTE_LANES[1] ) begin : ddr_byte_lane_B
assign phy_dout_remap[159:80] = part_select_80(phy_dout, (LANE_REMAP[5:4]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("B"),
.PO_DATA_CTL (PC_DATA_CTL_N[1] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[23:12]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[23:12]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (B_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (B_PI_BURST_MODE),
.PI_CLKOUT_DIV (B_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (B_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (B_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (B_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (B_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (B_PO_CLKOUT_DIV),
.PO_FINE_DELAY (B_PO_FINE_DELAY),
.PO_COARSE_BYPASS (B_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (B_PO_COARSE_DELAY),
.PO_OCLK_DELAY (B_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (B_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (B_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (B_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (B_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (B_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (B_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (B_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_B(
.mem_dq_out (mem_dq_out[23:12]),
.mem_dq_ts (mem_dq_ts[23:12]),
.mem_dq_in (mem_dq_in[19:10]),
.mem_dqs_out (mem_dqs_out[1]),
.mem_dqs_ts (mem_dqs_ts[1]),
.mem_dqs_in (mem_dqs_in[1]),
.rst (B_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (B_ddr_clk),
.rclk (B_rclk),
.pi_dqs_found (B_pi_dqs_found),
.dqs_out_of_range (B_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (B_if_a_empty),
.if_empty (B_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*B_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*B_of_empty*/),
.of_a_full (B_of_a_full),
.of_full (B_of_full),
.pre_fifo_a_full (B_pre_fifo_a_full),
.phy_din (phy_din_remap[159:80]),
.phy_dout (phy_dout_remap[159:80]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,C_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (B_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (B_idelay_ce),
.idelay_ld (B_idelay_ld),
.pi_rst_dqs_find (B_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (B_po_fine_enable),
.po_coarse_enable (B_po_coarse_enable),
.po_fine_inc (B_po_fine_inc),
.po_coarse_inc (B_po_coarse_inc),
.po_counter_load_en (B_po_counter_load_en),
.po_counter_read_en (B_po_counter_read_en),
.po_counter_load_val (B_po_counter_load_val),
.po_coarse_overflow (B_po_coarse_overflow),
.po_fine_overflow (B_po_fine_overflow),
.po_counter_read_val (B_po_counter_read_val),
.po_sel_fine_oclk_delay(B_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (B_pi_fine_enable),
.pi_fine_inc (B_pi_fine_inc),
.pi_counter_load_en (B_pi_counter_load_en),
.pi_counter_read_en (B_pi_counter_read_en),
.pi_counter_load_val (B_pi_counter_load_val),
.pi_fine_overflow (B_pi_fine_overflow),
.pi_counter_read_val (B_pi_counter_read_val),
.pi_iserdes_rst (B_pi_iserdes_rst),
.pi_phase_locked (B_pi_phase_locked),
.fine_delay (B_fine_delay),
.fine_delay_sel (B_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_B
assign B_of_a_full = 1'b0;
assign B_of_full = 1'b0;
assign B_pre_fifo_a_full = 1'b0;
assign B_if_empty = 1'b0;
assign B_if_a_empty = 1'b0;
assign B_byte_rd_en = 1'b1;
assign B_pi_phase_locked = 1;
assign B_pi_dqs_found = 1;
assign B_rclk = 0;
assign B_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign B_pi_counter_read_val = 0;
assign B_po_counter_read_val = 0;
assign B_pi_fine_overflow = 0;
assign B_po_coarse_overflow = 0;
assign B_po_fine_overflow = 0;
end
if ( BYTE_LANES[2] ) begin : ddr_byte_lane_C
assign phy_dout_remap[239:160] = part_select_80(phy_dout, (LANE_REMAP[9:8]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("C"),
.PO_DATA_CTL (PC_DATA_CTL_N[2] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[35:24]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[35:24]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (C_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (C_PI_BURST_MODE),
.PI_CLKOUT_DIV (C_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (C_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (C_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (C_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (C_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (C_PO_CLKOUT_DIV),
.PO_FINE_DELAY (C_PO_FINE_DELAY),
.PO_COARSE_BYPASS (C_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (C_PO_COARSE_DELAY),
.PO_OCLK_DELAY (C_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (C_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (C_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (C_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (C_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (C_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (C_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (C_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_C(
.mem_dq_out (mem_dq_out[35:24]),
.mem_dq_ts (mem_dq_ts[35:24]),
.mem_dq_in (mem_dq_in[29:20]),
.mem_dqs_out (mem_dqs_out[2]),
.mem_dqs_ts (mem_dqs_ts[2]),
.mem_dqs_in (mem_dqs_in[2]),
.rst (C_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (C_ddr_clk),
.rclk (C_rclk),
.pi_dqs_found (C_pi_dqs_found),
.dqs_out_of_range (C_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (C_if_a_empty),
.if_empty (C_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*C_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*C_of_empty*/),
.of_a_full (C_of_a_full),
.of_full (C_of_full),
.pre_fifo_a_full (C_pre_fifo_a_full),
.phy_din (phy_din_remap[239:160]),
.phy_dout (phy_dout_remap[239:160]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,D_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (C_byte_rd_en),
// calibration signals
.idelay_inc (idelay_inc),
.idelay_ce (C_idelay_ce),
.idelay_ld (C_idelay_ld),
.pi_rst_dqs_find (C_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (C_po_fine_enable),
.po_coarse_enable (C_po_coarse_enable),
.po_fine_inc (C_po_fine_inc),
.po_coarse_inc (C_po_coarse_inc),
.po_counter_load_en (C_po_counter_load_en),
.po_counter_read_en (C_po_counter_read_en),
.po_counter_load_val (C_po_counter_load_val),
.po_coarse_overflow (C_po_coarse_overflow),
.po_fine_overflow (C_po_fine_overflow),
.po_counter_read_val (C_po_counter_read_val),
.po_sel_fine_oclk_delay(C_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (C_pi_fine_enable),
.pi_fine_inc (C_pi_fine_inc),
.pi_counter_load_en (C_pi_counter_load_en),
.pi_counter_read_en (C_pi_counter_read_en),
.pi_counter_load_val (C_pi_counter_load_val),
.pi_fine_overflow (C_pi_fine_overflow),
.pi_counter_read_val (C_pi_counter_read_val),
.pi_iserdes_rst (C_pi_iserdes_rst),
.pi_phase_locked (C_pi_phase_locked),
.fine_delay (C_fine_delay),
.fine_delay_sel (C_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_C
assign C_of_a_full = 1'b0;
assign C_of_full = 1'b0;
assign C_pre_fifo_a_full = 1'b0;
assign C_if_empty = 1'b0;
assign C_byte_rd_en = 1'b1;
assign C_if_a_empty = 1'b0;
assign C_pi_phase_locked = 1;
assign C_pi_dqs_found = 1;
assign C_rclk = 0;
assign C_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign C_pi_counter_read_val = 0;
assign C_po_counter_read_val = 0;
assign C_pi_fine_overflow = 0;
assign C_po_coarse_overflow = 0;
assign C_po_fine_overflow = 0;
end
if ( BYTE_LANES[3] ) begin : ddr_byte_lane_D
assign phy_dout_remap[319:240] = part_select_80(phy_dout, (LANE_REMAP[13:12]));
mig_7series_v2_3_ddr_byte_lane #
(
.ABCD ("D"),
.PO_DATA_CTL (PC_DATA_CTL_N[3] ? "TRUE" : "FALSE"),
.BITLANES (BITLANES[47:36]),
.BITLANES_OUTONLY (BITLANES_OUTONLY[47:36]),
.OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE),
.OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE),
.OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE),
//.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE),
//.OF_ARRAY_MODE (D_OF_ARRAY_MODE),
//.IF_ARRAY_MODE (IF_ARRAY_MODE),
.IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE),
.IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE),
.IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE),
.IODELAY_GRP (IODELAY_GRP),
.FPGA_SPEED_GRADE (FPGA_SPEED_GRADE),
.BANK_TYPE (BANK_TYPE),
.BYTELANES_DDR_CK (BYTELANES_DDR_CK),
.RCLK_SELECT_LANE (RCLK_SELECT_LANE),
.USE_PRE_POST_FIFO (USE_PRE_POST_FIFO),
.SYNTHESIS (SYNTHESIS),
.TCK (TCK),
.PC_CLK_RATIO (PC_CLK_RATIO),
.PI_BURST_MODE (D_PI_BURST_MODE),
.PI_CLKOUT_DIV (D_PI_CLKOUT_DIV),
.PI_FREQ_REF_DIV (D_PI_FREQ_REF_DIV),
.PI_FINE_DELAY (D_PI_FINE_DELAY),
.PI_OUTPUT_CLK_SRC (D_PI_OUTPUT_CLK_SRC),
.PI_SYNC_IN_DIV_RST (D_PI_SYNC_IN_DIV_RST),
.PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET),
.PO_CLKOUT_DIV (D_PO_CLKOUT_DIV),
.PO_FINE_DELAY (D_PO_FINE_DELAY),
.PO_COARSE_BYPASS (D_PO_COARSE_BYPASS),
.PO_COARSE_DELAY (D_PO_COARSE_DELAY),
.PO_OCLK_DELAY (D_PO_OCLK_DELAY),
.PO_OCLKDELAY_INV (D_PO_OCLKDELAY_INV),
.PO_OUTPUT_CLK_SRC (D_PO_OUTPUT_CLK_SRC),
.PO_SYNC_IN_DIV_RST (D_PO_SYNC_IN_DIV_RST),
.OSERDES_DATA_RATE (D_OS_DATA_RATE),
.OSERDES_DATA_WIDTH (D_OS_DATA_WIDTH),
.IDELAYE2_IDELAY_TYPE (D_IDELAYE2_IDELAY_TYPE),
.IDELAYE2_IDELAY_VALUE (D_IDELAYE2_IDELAY_VALUE)
,.CKE_ODT_AUX (CKE_ODT_AUX)
)
ddr_byte_lane_D(
.mem_dq_out (mem_dq_out[47:36]),
.mem_dq_ts (mem_dq_ts[47:36]),
.mem_dq_in (mem_dq_in[39:30]),
.mem_dqs_out (mem_dqs_out[3]),
.mem_dqs_ts (mem_dqs_ts[3]),
.mem_dqs_in (mem_dqs_in[3]),
.rst (D_rst_primitives),
.phy_clk (phy_clk),
.freq_refclk (freq_refclk),
.mem_refclk (mem_refclk),
.idelayctrl_refclk (idelayctrl_refclk),
.sync_pulse (sync_pulse),
.ddr_ck_out (D_ddr_clk),
.rclk (D_rclk),
.pi_dqs_found (D_pi_dqs_found),
.dqs_out_of_range (D_pi_dqs_out_of_range),
.if_empty_def (if_empty_def),
.if_a_empty (D_if_a_empty),
.if_empty (D_if_empty),
.if_a_full (/*if_a_full*/),
.if_full (/*D_if_full*/),
.of_a_empty (/*of_a_empty*/),
.of_empty (/*D_of_empty*/),
.of_a_full (D_of_a_full),
.of_full (D_of_full),
.pre_fifo_a_full (D_pre_fifo_a_full),
.phy_din (phy_din_remap[319:240]),
.phy_dout (phy_dout_remap[319:240]),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phaser_ctl_bus (phaser_ctl_bus),
.idelay_inc (idelay_inc),
.idelay_ce (D_idelay_ce),
.idelay_ld (D_idelay_ld),
.if_rst (if_rst),
.byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,C_byte_rd_en}),
.byte_rd_en_oth_banks (byte_rd_en_oth_banks),
.byte_rd_en (D_byte_rd_en),
// calibration signals
.pi_rst_dqs_find (D_pi_rst_dqs_find),
.po_en_calib (phy_encalib),
.po_fine_enable (D_po_fine_enable),
.po_coarse_enable (D_po_coarse_enable),
.po_fine_inc (D_po_fine_inc),
.po_coarse_inc (D_po_coarse_inc),
.po_counter_load_en (D_po_counter_load_en),
.po_counter_read_en (D_po_counter_read_en),
.po_counter_load_val (D_po_counter_load_val),
.po_coarse_overflow (D_po_coarse_overflow),
.po_fine_overflow (D_po_fine_overflow),
.po_counter_read_val (D_po_counter_read_val),
.po_sel_fine_oclk_delay(D_po_sel_fine_oclk_delay),
.pi_en_calib (phy_encalib),
.pi_fine_enable (D_pi_fine_enable),
.pi_fine_inc (D_pi_fine_inc),
.pi_counter_load_en (D_pi_counter_load_en),
.pi_counter_read_en (D_pi_counter_read_en),
.pi_counter_load_val (D_pi_counter_load_val),
.pi_fine_overflow (D_pi_fine_overflow),
.pi_counter_read_val (D_pi_counter_read_val),
.pi_iserdes_rst (D_pi_iserdes_rst),
.pi_phase_locked (D_pi_phase_locked),
.fine_delay (D_fine_delay),
.fine_delay_sel (D_fine_delay_sel)
);
end
else begin : no_ddr_byte_lane_D
assign D_of_a_full = 1'b0;
assign D_of_full = 1'b0;
assign D_pre_fifo_a_full = 1'b0;
assign D_if_empty = 1'b0;
assign D_byte_rd_en = 1'b1;
assign D_if_a_empty = 1'b0;
assign D_rclk = 0;
assign D_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}};
assign D_pi_dqs_found = 1;
assign D_pi_phase_locked = 1;
assign D_pi_counter_read_val = 0;
assign D_po_counter_read_val = 0;
assign D_pi_fine_overflow = 0;
assign D_po_coarse_overflow = 0;
assign D_po_fine_overflow = 0;
end
endgenerate
assign phaser_ctl_bus[MSB_RANK_SEL_I : MSB_RANK_SEL_I - 7] = in_rank;
PHY_CONTROL #(
.AO_WRLVL_EN ( PC_AO_WRLVL_EN),
.AO_TOGGLE ( PC_AO_TOGGLE),
.BURST_MODE ( PC_BURST_MODE),
.CO_DURATION ( PC_CO_DURATION ),
.CLK_RATIO ( PC_CLK_RATIO),
.DATA_CTL_A_N ( PC_DATA_CTL_A),
.DATA_CTL_B_N ( PC_DATA_CTL_B),
.DATA_CTL_C_N ( PC_DATA_CTL_C),
.DATA_CTL_D_N ( PC_DATA_CTL_D),
.DI_DURATION ( PC_DI_DURATION ),
.DO_DURATION ( PC_DO_DURATION ),
.EVENTS_DELAY ( PC_EVENTS_DELAY),
.FOUR_WINDOW_CLOCKS ( PC_FOUR_WINDOW_CLOCKS),
.MULTI_REGION ( PC_MULTI_REGION ),
.PHY_COUNT_ENABLE ( PC_PHY_COUNT_EN),
.DISABLE_SEQ_MATCH ( PC_DISABLE_SEQ_MATCH),
.SYNC_MODE ( PC_SYNC_MODE),
.CMD_OFFSET ( PC_CMD_OFFSET),
.RD_CMD_OFFSET_0 ( PC_RD_CMD_OFFSET_0),
.RD_CMD_OFFSET_1 ( PC_RD_CMD_OFFSET_1),
.RD_CMD_OFFSET_2 ( PC_RD_CMD_OFFSET_2),
.RD_CMD_OFFSET_3 ( PC_RD_CMD_OFFSET_3),
.RD_DURATION_0 ( PC_RD_DURATION_0),
.RD_DURATION_1 ( PC_RD_DURATION_1),
.RD_DURATION_2 ( PC_RD_DURATION_2),
.RD_DURATION_3 ( PC_RD_DURATION_3),
.WR_CMD_OFFSET_0 ( PC_WR_CMD_OFFSET_0),
.WR_CMD_OFFSET_1 ( PC_WR_CMD_OFFSET_1),
.WR_CMD_OFFSET_2 ( PC_WR_CMD_OFFSET_2),
.WR_CMD_OFFSET_3 ( PC_WR_CMD_OFFSET_3),
.WR_DURATION_0 ( PC_WR_DURATION_0),
.WR_DURATION_1 ( PC_WR_DURATION_1),
.WR_DURATION_2 ( PC_WR_DURATION_2),
.WR_DURATION_3 ( PC_WR_DURATION_3)
) phy_control_i (
.AUXOUTPUT (aux_out),
.INBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PI:MSB_BURST_PEND_PI-3]),
.INRANKA (in_rank[1:0]),
.INRANKB (in_rank[3:2]),
.INRANKC (in_rank[5:4]),
.INRANKD (in_rank[7:6]),
.OUTBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PO:MSB_BURST_PEND_PO-3]),
.PCENABLECALIB (phy_encalib),
.PHYCTLALMOSTFULL (phy_ctl_a_full),
.PHYCTLEMPTY (phy_ctl_empty),
.PHYCTLFULL (phy_ctl_full),
.PHYCTLREADY (phy_ctl_ready),
.MEMREFCLK (mem_refclk),
.PHYCLK (phy_ctl_clk),
.PHYCTLMSTREMPTY (phy_ctl_mstr_empty),
.PHYCTLWD (_phy_ctl_wd),
.PHYCTLWRENABLE (phy_ctl_wr),
.PLLLOCK (pll_lock),
.REFDLLLOCK (ref_dll_lock), // is reset while !locked
.RESET (rst),
.SYNCIN (sync_pulse),
.READCALIBENABLE (phy_read_calib),
.WRITECALIBENABLE (phy_write_calib)
`ifdef USE_PHY_CONTROL_TEST
, .TESTINPUT (16'b0),
.TESTOUTPUT (test_output),
.TESTSELECT (test_select),
.SCANENABLEN (scan_enable)
`endif
);
// register outputs to give extra slack in timing
always @(posedge phy_clk ) begin
case (calib_sel[1:0])
2'h0: begin
po_coarse_overflow <= #1 A_po_coarse_overflow;
po_fine_overflow <= #1 A_po_fine_overflow;
po_counter_read_val <= #1 A_po_counter_read_val;
pi_fine_overflow <= #1 A_pi_fine_overflow;
pi_counter_read_val<= #1 A_pi_counter_read_val;
pi_phase_locked <= #1 A_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 A_pi_dqs_found;
pi_dqs_out_of_range <= #1 A_pi_dqs_out_of_range;
end
2'h1: begin
po_coarse_overflow <= #1 B_po_coarse_overflow;
po_fine_overflow <= #1 B_po_fine_overflow;
po_counter_read_val <= #1 B_po_counter_read_val;
pi_fine_overflow <= #1 B_pi_fine_overflow;
pi_counter_read_val <= #1 B_pi_counter_read_val;
pi_phase_locked <= #1 B_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 B_pi_dqs_found;
pi_dqs_out_of_range <= #1 B_pi_dqs_out_of_range;
end
2'h2: begin
po_coarse_overflow <= #1 C_po_coarse_overflow;
po_fine_overflow <= #1 C_po_fine_overflow;
po_counter_read_val <= #1 C_po_counter_read_val;
pi_fine_overflow <= #1 C_pi_fine_overflow;
pi_counter_read_val <= #1 C_pi_counter_read_val;
pi_phase_locked <= #1 C_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 C_pi_dqs_found;
pi_dqs_out_of_range <= #1 C_pi_dqs_out_of_range;
end
2'h3: begin
po_coarse_overflow <= #1 D_po_coarse_overflow;
po_fine_overflow <= #1 D_po_fine_overflow;
po_counter_read_val <= #1 D_po_counter_read_val;
pi_fine_overflow <= #1 D_pi_fine_overflow;
pi_counter_read_val <= #1 D_pi_counter_read_val;
pi_phase_locked <= #1 D_pi_phase_locked;
if ( calib_in_common)
pi_dqs_found <= #1 pi_dqs_found_any;
else
pi_dqs_found <= #1 D_pi_dqs_found;
pi_dqs_out_of_range <= #1 D_pi_dqs_out_of_range;
end
default: begin
po_coarse_overflow <= po_coarse_overflow;
end
endcase
end
wire B_mux_ctrl;
wire C_mux_ctrl;
wire D_mux_ctrl;
generate
if (HIGHEST_LANE > 1)
assign B_mux_ctrl = ( !calib_zero_lanes[1] && ( ! calib_zero_ctrl || DATA_CTL_N[1]));
else
assign B_mux_ctrl = 0;
if (HIGHEST_LANE > 2)
assign C_mux_ctrl = ( !calib_zero_lanes[2] && (! calib_zero_ctrl || DATA_CTL_N[2]));
else
assign C_mux_ctrl = 0;
if (HIGHEST_LANE > 3)
assign D_mux_ctrl = ( !calib_zero_lanes[3] && ( ! calib_zero_ctrl || DATA_CTL_N[3]));
else
assign D_mux_ctrl = 0;
endgenerate
always @(*) begin
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
if ( calib_sel[2]) begin
// if this is asserted, all calib signals are deasserted
A_pi_fine_enable = 0;
A_pi_fine_inc = 0;
A_pi_counter_load_en = 0;
A_pi_counter_read_en = 0;
A_pi_counter_load_val = 0;
A_pi_rst_dqs_find = 0;
A_po_fine_enable = 0;
A_po_coarse_enable = 0;
A_po_fine_inc = 0;
A_po_coarse_inc = 0;
A_po_counter_load_en = 0;
A_po_counter_read_en = 0;
A_po_counter_load_val = 0;
A_po_sel_fine_oclk_delay = 0;
A_idelay_ce = 0;
A_idelay_ld = 0;
A_fine_delay = 0;
A_fine_delay_sel = 0;
B_pi_fine_enable = 0;
B_pi_fine_inc = 0;
B_pi_counter_load_en = 0;
B_pi_counter_read_en = 0;
B_pi_counter_load_val = 0;
B_pi_rst_dqs_find = 0;
B_po_fine_enable = 0;
B_po_coarse_enable = 0;
B_po_fine_inc = 0;
B_po_coarse_inc = 0;
B_po_counter_load_en = 0;
B_po_counter_read_en = 0;
B_po_counter_load_val = 0;
B_po_sel_fine_oclk_delay = 0;
B_idelay_ce = 0;
B_idelay_ld = 0;
B_fine_delay = 0;
B_fine_delay_sel = 0;
C_pi_fine_enable = 0;
C_pi_fine_inc = 0;
C_pi_counter_load_en = 0;
C_pi_counter_read_en = 0;
C_pi_counter_load_val = 0;
C_pi_rst_dqs_find = 0;
C_po_fine_enable = 0;
C_po_coarse_enable = 0;
C_po_fine_inc = 0;
C_po_coarse_inc = 0;
C_po_counter_load_en = 0;
C_po_counter_read_en = 0;
C_po_counter_load_val = 0;
C_po_sel_fine_oclk_delay = 0;
C_idelay_ce = 0;
C_idelay_ld = 0;
C_fine_delay = 0;
C_fine_delay_sel = 0;
D_pi_fine_enable = 0;
D_pi_fine_inc = 0;
D_pi_counter_load_en = 0;
D_pi_counter_read_en = 0;
D_pi_counter_load_val = 0;
D_pi_rst_dqs_find = 0;
D_po_fine_enable = 0;
D_po_coarse_enable = 0;
D_po_fine_inc = 0;
D_po_coarse_inc = 0;
D_po_counter_load_en = 0;
D_po_counter_read_en = 0;
D_po_counter_load_val = 0;
D_po_sel_fine_oclk_delay = 0;
D_idelay_ce = 0;
D_idelay_ld = 0;
D_fine_delay = 0;
D_fine_delay_sel = 0;
end else
if (calib_in_common) begin
// if this is asserted, each signal is broadcast to all phasers
// in common
if ( !calib_zero_lanes[0] && (! calib_zero_ctrl || DATA_CTL_N[0])) begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
if ( B_mux_ctrl) begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
if ( C_mux_ctrl) begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
if ( D_mux_ctrl) begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_read_en = po_counter_read_en;
D_po_counter_load_val = po_counter_load_val;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
end
else begin
// otherwise, only a single phaser is selected
case (calib_sel[1:0])
0: begin
A_pi_fine_enable = pi_fine_enable;
A_pi_fine_inc = pi_fine_inc;
A_pi_counter_load_en = pi_counter_load_en;
A_pi_counter_read_en = pi_counter_read_en;
A_pi_counter_load_val = pi_counter_load_val;
A_pi_rst_dqs_find = pi_rst_dqs_find;
A_po_fine_enable = po_fine_enable;
A_po_coarse_enable = po_coarse_enable;
A_po_fine_inc = po_fine_inc;
A_po_coarse_inc = po_coarse_inc;
A_po_counter_load_en = po_counter_load_en;
A_po_counter_read_en = po_counter_read_en;
A_po_counter_load_val = po_counter_load_val;
A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
A_idelay_ce = idelay_ce;
A_idelay_ld = idelay_ld;
A_fine_delay = fine_delay ;
A_fine_delay_sel = fine_delay_sel;
end
1: begin
B_pi_fine_enable = pi_fine_enable;
B_pi_fine_inc = pi_fine_inc;
B_pi_counter_load_en = pi_counter_load_en;
B_pi_counter_read_en = pi_counter_read_en;
B_pi_counter_load_val = pi_counter_load_val;
B_pi_rst_dqs_find = pi_rst_dqs_find;
B_po_fine_enable = po_fine_enable;
B_po_coarse_enable = po_coarse_enable;
B_po_fine_inc = po_fine_inc;
B_po_coarse_inc = po_coarse_inc;
B_po_counter_load_en = po_counter_load_en;
B_po_counter_read_en = po_counter_read_en;
B_po_counter_load_val = po_counter_load_val;
B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
B_idelay_ce = idelay_ce;
B_idelay_ld = idelay_ld;
B_fine_delay = fine_delay ;
B_fine_delay_sel = fine_delay_sel;
end
2: begin
C_pi_fine_enable = pi_fine_enable;
C_pi_fine_inc = pi_fine_inc;
C_pi_counter_load_en = pi_counter_load_en;
C_pi_counter_read_en = pi_counter_read_en;
C_pi_counter_load_val = pi_counter_load_val;
C_pi_rst_dqs_find = pi_rst_dqs_find;
C_po_fine_enable = po_fine_enable;
C_po_coarse_enable = po_coarse_enable;
C_po_fine_inc = po_fine_inc;
C_po_coarse_inc = po_coarse_inc;
C_po_counter_load_en = po_counter_load_en;
C_po_counter_read_en = po_counter_read_en;
C_po_counter_load_val = po_counter_load_val;
C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
C_idelay_ce = idelay_ce;
C_idelay_ld = idelay_ld;
C_fine_delay = fine_delay ;
C_fine_delay_sel = fine_delay_sel;
end
3: begin
D_pi_fine_enable = pi_fine_enable;
D_pi_fine_inc = pi_fine_inc;
D_pi_counter_load_en = pi_counter_load_en;
D_pi_counter_read_en = pi_counter_read_en;
D_pi_counter_load_val = pi_counter_load_val;
D_pi_rst_dqs_find = pi_rst_dqs_find;
D_po_fine_enable = po_fine_enable;
D_po_coarse_enable = po_coarse_enable;
D_po_fine_inc = po_fine_inc;
D_po_coarse_inc = po_coarse_inc;
D_po_counter_load_en = po_counter_load_en;
D_po_counter_load_val = po_counter_load_val;
D_po_counter_read_en = po_counter_read_en;
D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay;
D_idelay_ce = idelay_ce;
D_idelay_ld = idelay_ld;
D_fine_delay = fine_delay ;
D_fine_delay_sel = fine_delay_sel;
end
endcase
end
end
//obligatory phaser-ref
PHASER_REF phaser_ref_i(
.LOCKED (ref_dll_lock),
.CLKIN (freq_refclk),
.PWRDWN (1'b0),
.RST ( ! pll_lock)
);
// optional idelay_ctrl
generate
if ( GENERATE_IDELAYCTRL == "TRUE")
IDELAYCTRL idelayctrl (
.RDY (/*idelayctrl_rdy*/),
.REFCLK (idelayctrl_refclk),
.RST (rst)
);
endgenerate
endmodule
|
module
.idelayctrl_refclk (),
.phy_dout (phy_dout),
.phy_cmd_wr_en (phy_cmd_wr_en),
.phy_data_wr_en (phy_data_wr_en),
.phy_rd_en (phy_rd_en),
.phy_ctl_wd (phy_ctl_wd_temp),
.phy_ctl_wr (phy_ctl_wr_temp),
.if_empty_def (phy_if_empty_def),
.if_rst (phy_if_reset),
.phyGo ('b1),
.aux_in_1 (aux_in_1),
.aux_in_2 (aux_in_2),
// No support yet for different data offsets for different I/O banks
// (possible use in supporting wider range of skew among bytes)
.data_offset_1 (data_offset_1_temp),
.data_offset_2 (data_offset_2_temp),
.cke_in (),
.if_a_empty (),
.if_empty (if_empty),
.if_empty_or (),
.if_empty_and (),
.of_ctl_a_full (),
// .of_data_a_full (phy_data_full),
.of_ctl_full (phy_cmd_full),
.of_data_full (),
.pre_data_a_full (phy_pre_data_a_full),
.idelay_ld (idelay_ld),
.idelay_ce (idelay_ce),
.idelay_inc (idelay_inc),
.input_sink (),
.phy_din (phy_din),
.phy_ctl_a_full (),
.phy_ctl_full (phy_ctl_full_temp),
.mem_dq_out (mem_dq_out),
.mem_dq_ts (mem_dq_ts),
.mem_dq_in (mem_dq_in),
.mem_dqs_out (mem_dqs_out),
.mem_dqs_ts (mem_dqs_ts),
.mem_dqs_in (mem_dqs_in),
.aux_out (aux_out),
.phy_ctl_ready (),
.rst_out (),
.ddr_clk (ddr_clk),
//.rclk (),
.mcGo (phy_mc_go),
.phy_write_calib (phy_write_calib),
.phy_read_calib (phy_read_calib),
.calib_sel (calib_sel),
.calib_in_common (calib_in_common),
.calib_zero_inputs (calib_zero_inputs),
.calib_zero_ctrl (calib_zero_ctrl),
.calib_zero_lanes ('b0),
.po_fine_enable (po_fine_enable),
.po_coarse_enable (po_coarse_enable),
.po_fine_inc (po_fine_inc),
.po_coarse_inc (po_coarse_inc),
.po_counter_load_en (po_counter_load_en),
.po_sel_fine_oclk_delay (po_sel_fine_oclk_delay),
.po_counter_load_val (po_counter_load_val),
.po_counter_read_en (po_counter_read_en),
.po_coarse_overflow (),
.po_fine_overflow (),
.po_counter_read_val (po_counter_read_val),
.pi_rst_dqs_find (pi_rst_dqs_find),
.pi_fine_enable (pi_fine_enable),
.pi_fine_inc (pi_fine_inc),
.pi_counter_load_en (pi_counter_load_en),
.pi_counter_read_en (dbg_pi_counter_read_en),
.pi_counter_load_val (pi_counter_load_val),
.pi_fine_overflow (),
.pi_counter_read_val (pi_counter_read_val),
.pi_phase_locked (pi_phase_locked),
.pi_phase_locked_all (pi_phase_locked_all),
.pi_dqs_found (),
.pi_dqs_found_any (pi_dqs_found),
.pi_dqs_found_all (pi_dqs_found_all),
.pi_dqs_found_lanes (dbg_pi_dqs_found_lanes_phy4lanes),
// Currently not being used. May be used in future if periodic
// reads become a requirement. This output could be used to signal
// a catastrophic failure in read capture and the need for
// re-calibration.
.pi_dqs_out_of_range (pi_dqs_out_of_range)
,.ref_dll_lock (ref_dll_lock)
,.pi_phase_locked_lanes (dbg_pi_phase_locked_phy4lanes)
,.fine_delay (fine_delay_mod)
,.fine_delay_sel (fine_delay_sel_r)
// ,.rst_phaser_ref (rst_phaser_ref)
);
endmodule
|
module mig_7series_v2_3_ddr_phy_wrcal #
(
parameter TCQ = 100, // clk->out delay (sim only)
parameter nCK_PER_CLK = 2, // # of memory clocks per CLK
parameter CLK_PERIOD = 2500,
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter PRE_REV3ES = "OFF", // Delay O/Ps using Phaser_Out fine dly
parameter SIM_CAL_OPTION = "NONE" // Skip various calibration steps
)
(
input clk,
input rst,
// Calibration status, control signals
input wrcal_start,
input wrcal_rd_wait,
input wrcal_sanity_chk,
input dqsfound_retry_done,
input phy_rddata_en,
output dqsfound_retry,
output wrcal_read_req,
output reg wrcal_act_req,
output reg wrcal_done,
output reg wrcal_pat_err,
output reg wrcal_prech_req,
output reg temp_wrcal_done,
output reg wrcal_sanity_chk_done,
input prech_done,
// Captured data in resync clock domain
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data,
// Write level values of Phaser_Out coarse and fine
// delay taps required to load Phaser_Out register
input [3*DQS_WIDTH-1:0] wl_po_coarse_cnt,
input [6*DQS_WIDTH-1:0] wl_po_fine_cnt,
input wrlvl_byte_done,
output reg wrlvl_byte_redo,
output reg early1_data,
output reg early2_data,
// DQ IDELAY
output reg idelay_ld,
output reg wrcal_pat_resume, // to phy_init for write
output reg [DQS_CNT_WIDTH:0] po_stg2_wrcal_cnt,
output phy_if_reset,
// Debug Port
output [6*DQS_WIDTH-1:0] dbg_final_po_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_final_po_coarse_tap_cnt,
output [99:0] dbg_phy_wrcal
);
// Length of calibration sequence (in # of words)
//localparam CAL_PAT_LEN = 8;
// Read data shift register length
localparam RD_SHIFT_LEN = 1; //(nCK_PER_CLK == 4) ? 1 : 2;
// # of reads for reliable read capture
localparam NUM_READS = 2;
// # of cycles to wait after changing RDEN count value
localparam RDEN_WAIT_CNT = 12;
localparam COARSE_CNT = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 3 : 6;
localparam FINE_CNT = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 22 : 44;
localparam CAL2_IDLE = 4'h0;
localparam CAL2_READ_WAIT = 4'h1;
localparam CAL2_NEXT_DQS = 4'h2;
localparam CAL2_WRLVL_WAIT = 4'h3;
localparam CAL2_IFIFO_RESET = 4'h4;
localparam CAL2_DQ_IDEL_DEC = 4'h5;
localparam CAL2_DONE = 4'h6;
localparam CAL2_SANITY_WAIT = 4'h7;
localparam CAL2_ERR = 4'h8;
integer i,j,k,l,m,p,q,d;
reg [2:0] po_coarse_tap_cnt [0:DQS_WIDTH-1];
reg [3*DQS_WIDTH-1:0] po_coarse_tap_cnt_w;
reg [5:0] po_fine_tap_cnt [0:DQS_WIDTH-1];
reg [6*DQS_WIDTH-1:0] po_fine_tap_cnt_w;
reg [DQS_CNT_WIDTH:0] wrcal_dqs_cnt_r/* synthesis syn_maxfan = 10 */;
reg [4:0] not_empty_wait_cnt;
reg [3:0] tap_inc_wait_cnt;
reg cal2_done_r;
reg cal2_done_r1;
reg cal2_prech_req_r;
reg [3:0] cal2_state_r;
reg [3:0] cal2_state_r1;
reg [2:0] wl_po_coarse_cnt_w [0:DQS_WIDTH-1];
reg [5:0] wl_po_fine_cnt_w [0:DQS_WIDTH-1];
reg cal2_if_reset;
reg wrcal_pat_resume_r;
reg wrcal_pat_resume_r1;
reg wrcal_pat_resume_r2;
reg wrcal_pat_resume_r3;
reg [DRAM_WIDTH-1:0] mux_rd_fall0_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall1_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise0_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise1_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall2_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall3_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise2_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise3_r;
reg pat_data_match_r;
reg pat1_data_match_r;
reg pat1_data_match_r1;
reg pat2_data_match_r;
reg pat_data_match_valid_r;
wire [RD_SHIFT_LEN-1:0] pat_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_fall1 [3:0];
reg [DRAM_WIDTH-1:0] pat_match_fall0_r;
reg pat_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall1_r;
reg pat_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall2_r;
reg pat_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall3_r;
reg pat_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise0_r;
reg pat_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise1_r;
reg pat_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise2_r;
reg pat_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise3_r;
reg pat_match_rise3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise0_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise1_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall0_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall1_r;
reg [DRAM_WIDTH-1:0] pat2_match_rise0_r;
reg [DRAM_WIDTH-1:0] pat2_match_rise1_r;
reg [DRAM_WIDTH-1:0] pat2_match_fall0_r;
reg [DRAM_WIDTH-1:0] pat2_match_fall1_r;
reg pat1_match_rise0_and_r;
reg pat1_match_rise1_and_r;
reg pat1_match_fall0_and_r;
reg pat1_match_fall1_and_r;
reg pat2_match_rise0_and_r;
reg pat2_match_rise1_and_r;
reg pat2_match_fall0_and_r;
reg pat2_match_fall1_and_r;
reg early1_data_match_r;
reg early1_data_match_r1;
reg [DRAM_WIDTH-1:0] early1_match_fall0_r;
reg early1_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall1_r;
reg early1_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall2_r;
reg early1_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall3_r;
reg early1_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise0_r;
reg early1_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise1_r;
reg early1_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise2_r;
reg early1_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise3_r;
reg early1_match_rise3_and_r;
reg early2_data_match_r;
reg [DRAM_WIDTH-1:0] early2_match_fall0_r;
reg early2_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall1_r;
reg early2_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall2_r;
reg early2_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall3_r;
reg early2_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise0_r;
reg early2_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise1_r;
reg early2_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise2_r;
reg early2_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise3_r;
reg early2_match_rise3_and_r;
wire [RD_SHIFT_LEN-1:0] pat_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_rise1 [3:0];
wire [DQ_WIDTH-1:0] rd_data_rise0;
wire [DQ_WIDTH-1:0] rd_data_fall0;
wire [DQ_WIDTH-1:0] rd_data_rise1;
wire [DQ_WIDTH-1:0] rd_data_fall1;
wire [DQ_WIDTH-1:0] rd_data_rise2;
wire [DQ_WIDTH-1:0] rd_data_fall2;
wire [DQ_WIDTH-1:0] rd_data_rise3;
wire [DQ_WIDTH-1:0] rd_data_fall3;
reg [DQS_CNT_WIDTH:0] rd_mux_sel_r;
reg rd_active_posedge_r;
reg rd_active_r;
reg rd_active_r1;
reg rd_active_r2;
reg rd_active_r3;
reg rd_active_r4;
reg rd_active_r5;
reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0];
reg wrlvl_byte_done_r;
reg idelay_ld_done;
reg pat1_detect;
reg early1_detect;
reg wrcal_sanity_chk_r;
reg wrcal_sanity_chk_err;
//***************************************************************************
// Debug
//***************************************************************************
always @(*) begin
for (d = 0; d < DQS_WIDTH; d = d + 1) begin
po_fine_tap_cnt_w[(6*d)+:6] = po_fine_tap_cnt[d];
po_coarse_tap_cnt_w[(3*d)+:3] = po_coarse_tap_cnt[d];
end
end
assign dbg_final_po_fine_tap_cnt = po_fine_tap_cnt_w;
assign dbg_final_po_coarse_tap_cnt = po_coarse_tap_cnt_w;
assign dbg_phy_wrcal[0] = pat_data_match_r;
assign dbg_phy_wrcal[4:1] = cal2_state_r1[3:0];
assign dbg_phy_wrcal[5] = wrcal_sanity_chk_err;
assign dbg_phy_wrcal[6] = wrcal_start;
assign dbg_phy_wrcal[7] = wrcal_done;
assign dbg_phy_wrcal[8] = pat_data_match_valid_r;
assign dbg_phy_wrcal[13+:DQS_CNT_WIDTH]= wrcal_dqs_cnt_r;
assign dbg_phy_wrcal[17+:5] = not_empty_wait_cnt;
assign dbg_phy_wrcal[22] = early1_data;
assign dbg_phy_wrcal[23] = early2_data;
assign dbg_phy_wrcal[24+:8] = mux_rd_rise0_r;
assign dbg_phy_wrcal[32+:8] = mux_rd_fall0_r;
assign dbg_phy_wrcal[40+:8] = mux_rd_rise1_r;
assign dbg_phy_wrcal[48+:8] = mux_rd_fall1_r;
assign dbg_phy_wrcal[56+:8] = mux_rd_rise2_r;
assign dbg_phy_wrcal[64+:8] = mux_rd_fall2_r;
assign dbg_phy_wrcal[72+:8] = mux_rd_rise3_r;
assign dbg_phy_wrcal[80+:8] = mux_rd_fall3_r;
assign dbg_phy_wrcal[88] = early1_data_match_r;
assign dbg_phy_wrcal[89] = early2_data_match_r;
assign dbg_phy_wrcal[90] = wrcal_sanity_chk_r & pat_data_match_valid_r;
assign dbg_phy_wrcal[91] = wrcal_sanity_chk_r;
assign dbg_phy_wrcal[92] = wrcal_sanity_chk_done;
assign dqsfound_retry = 1'b0;
assign wrcal_read_req = 1'b0;
assign phy_if_reset = cal2_if_reset;
//**************************************************************************
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
// coarse delay
//**************************************************************************
always @(posedge clk) begin
po_stg2_wrcal_cnt <= #TCQ wrcal_dqs_cnt_r;
wrlvl_byte_done_r <= #TCQ wrlvl_byte_done;
wrcal_sanity_chk_r <= #TCQ wrcal_sanity_chk;
end
//***************************************************************************
// Data mux to route appropriate byte to calibration logic - i.e. calibration
// is done sequentially, one byte (or DQS group) at a time
//***************************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_rd_data_div4
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH];
assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH];
assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH];
assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH];
end else if (nCK_PER_CLK == 2) begin: gen_rd_data_div2
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
end
endgenerate
//**************************************************************************
// Final Phaser OUT coarse and fine delay taps after write calibration
// Sum of taps used during write leveling taps and write calibration
//**************************************************************************
always @(*) begin
for (m = 0; m < DQS_WIDTH; m = m + 1) begin
wl_po_coarse_cnt_w[m] = wl_po_coarse_cnt[3*m+:3];
wl_po_fine_cnt_w[m] = wl_po_fine_cnt[6*m+:6];
end
end
always @(posedge clk) begin
if (rst) begin
for (p = 0; p < DQS_WIDTH; p = p + 1) begin
po_coarse_tap_cnt[p] <= #TCQ {3{1'b0}};
po_fine_tap_cnt[p] <= #TCQ {6{1'b0}};
end
end else if (cal2_done_r && ~cal2_done_r1) begin
for (q = 0; q < DQS_WIDTH; q = q + 1) begin
po_coarse_tap_cnt[q] <= #TCQ wl_po_coarse_cnt_w[i];
po_fine_tap_cnt[q] <= #TCQ wl_po_fine_cnt_w[i];
end
end
end
always @(posedge clk) begin
rd_mux_sel_r <= #TCQ wrcal_dqs_cnt_r;
end
// Register outputs for improved timing.
// NOTE: Will need to change when per-bit DQ deskew is supported.
// Currenly all bits in DQS group are checked in aggregate
generate
genvar mux_i;
if (nCK_PER_CLK == 4) begin: gen_mux_rd_div4
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r + mux_i];
end
end
end else if (nCK_PER_CLK == 2) begin: gen_mux_rd_div2
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
end
end
end
endgenerate
//***************************************************************************
// generate request to PHY_INIT logic to issue precharged. Required when
// calibration can take a long time (during which there are only constant
// reads present on this bus). In this case need to issue perioidic
// precharges to avoid tRAS violation. This signal must meet the following
// requirements: (1) only transition from 0->1 when prech is first needed,
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
//***************************************************************************
always @(posedge clk)
if (rst)
wrcal_prech_req <= #TCQ 1'b0;
else
// Combine requests from all stages here
wrcal_prech_req <= #TCQ cal2_prech_req_r;
//***************************************************************************
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
// NOTE: Written using discrete flops, but SRL can be used if the matching
// logic does the comparison sequentially, rather than parallel
//***************************************************************************
generate
genvar rd_i;
if (nCK_PER_CLK == 4) begin: gen_sr_div4
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i];
sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i];
sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i];
sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i];
end
end
end else if (nCK_PER_CLK == 2) begin: gen_sr_div2
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
end
end
end
endgenerate
//***************************************************************************
// Write calibration:
// During write leveling DQS is aligned to the nearest CK edge that may not
// be the correct CK edge. Write calibration is required to align the DQS to
// the correct CK edge that clocks the write command.
// The Phaser_Out coarse delay line is adjusted if required to add a memory
// clock cycle of delay in order to read back the expected pattern.
//***************************************************************************
always @(posedge clk) begin
rd_active_r <= #TCQ phy_rddata_en;
rd_active_r1 <= #TCQ rd_active_r;
rd_active_r2 <= #TCQ rd_active_r1;
rd_active_r3 <= #TCQ rd_active_r2;
rd_active_r4 <= #TCQ rd_active_r3;
rd_active_r5 <= #TCQ rd_active_r4;
end
//*****************************************************************
// Expected data pattern when properly received by read capture
// logic:
// Based on pattern of ({rise,fall}) =
// 0xF, 0x0, 0xA, 0x5, 0x5, 0xA, 0x9, 0x6
// Each nibble will look like:
// bit3: 1, 0, 1, 0, 0, 1, 1, 0
// bit2: 1, 0, 0, 1, 1, 0, 0, 1
// bit1: 1, 0, 1, 0, 0, 1, 0, 1
// bit0: 1, 0, 0, 1, 1, 0, 1, 0
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
// and the actual training pattern contents change
//*****************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_pat_div4
// FF00AA5555AA9966
assign pat_rise0[3] = 1'b1;
assign pat_fall0[3] = 1'b0;
assign pat_rise1[3] = 1'b1;
assign pat_fall1[3] = 1'b0;
assign pat_rise2[3] = 1'b0;
assign pat_fall2[3] = 1'b1;
assign pat_rise3[3] = 1'b1;
assign pat_fall3[3] = 1'b0;
assign pat_rise0[2] = 1'b1;
assign pat_fall0[2] = 1'b0;
assign pat_rise1[2] = 1'b0;
assign pat_fall1[2] = 1'b1;
assign pat_rise2[2] = 1'b1;
assign pat_fall2[2] = 1'b0;
assign pat_rise3[2] = 1'b0;
assign pat_fall3[2] = 1'b1;
assign pat_rise0[1] = 1'b1;
assign pat_fall0[1] = 1'b0;
assign pat_rise1[1] = 1'b1;
assign pat_fall1[1] = 1'b0;
assign pat_rise2[1] = 1'b0;
assign pat_fall2[1] = 1'b1;
assign pat_rise3[1] = 1'b0;
assign pat_fall3[1] = 1'b1;
assign pat_rise0[0] = 1'b1;
assign pat_fall0[0] = 1'b0;
assign pat_rise1[0] = 1'b0;
assign pat_fall1[0] = 1'b1;
assign pat_rise2[0] = 1'b1;
assign pat_fall2[0] = 1'b0;
assign pat_rise3[0] = 1'b1;
assign pat_fall3[0] = 1'b0;
// Pattern to distinguish between early write and incorrect read
// BB11EE4444EEDD88
assign early_rise0[3] = 1'b1;
assign early_fall0[3] = 1'b0;
assign early_rise1[3] = 1'b1;
assign early_fall1[3] = 1'b0;
assign early_rise2[3] = 1'b0;
assign early_fall2[3] = 1'b1;
assign early_rise3[3] = 1'b1;
assign early_fall3[3] = 1'b1;
assign early_rise0[2] = 1'b0;
assign early_fall0[2] = 1'b0;
assign early_rise1[2] = 1'b1;
assign early_fall1[2] = 1'b1;
assign early_rise2[2] = 1'b1;
assign early_fall2[2] = 1'b1;
assign early_rise3[2] = 1'b1;
assign early_fall3[2] = 1'b0;
assign early_rise0[1] = 1'b1;
assign early_fall0[1] = 1'b0;
assign early_rise1[1] = 1'b1;
assign early_fall1[1] = 1'b0;
assign early_rise2[1] = 1'b0;
assign early_fall2[1] = 1'b1;
assign early_rise3[1] = 1'b0;
assign early_fall3[1] = 1'b0;
assign early_rise0[0] = 1'b1;
assign early_fall0[0] = 1'b1;
assign early_rise1[0] = 1'b0;
assign early_fall1[0] = 1'b0;
assign early_rise2[0] = 1'b0;
assign early_fall2[0] = 1'b0;
assign early_rise3[0] = 1'b1;
assign early_fall3[0] = 1'b0;
end else if (nCK_PER_CLK == 2) begin: gen_pat_div2
// First cycle pattern FF00AA55
assign pat1_rise0[3] = 1'b1;
assign pat1_fall0[3] = 1'b0;
assign pat1_rise1[3] = 1'b1;
assign pat1_fall1[3] = 1'b0;
assign pat1_rise0[2] = 1'b1;
assign pat1_fall0[2] = 1'b0;
assign pat1_rise1[2] = 1'b0;
assign pat1_fall1[2] = 1'b1;
assign pat1_rise0[1] = 1'b1;
assign pat1_fall0[1] = 1'b0;
assign pat1_rise1[1] = 1'b1;
assign pat1_fall1[1] = 1'b0;
assign pat1_rise0[0] = 1'b1;
assign pat1_fall0[0] = 1'b0;
assign pat1_rise1[0] = 1'b0;
assign pat1_fall1[0] = 1'b1;
// Second cycle pattern 55AA9966
assign pat2_rise0[3] = 1'b0;
assign pat2_fall0[3] = 1'b1;
assign pat2_rise1[3] = 1'b1;
assign pat2_fall1[3] = 1'b0;
assign pat2_rise0[2] = 1'b1;
assign pat2_fall0[2] = 1'b0;
assign pat2_rise1[2] = 1'b0;
assign pat2_fall1[2] = 1'b1;
assign pat2_rise0[1] = 1'b0;
assign pat2_fall0[1] = 1'b1;
assign pat2_rise1[1] = 1'b0;
assign pat2_fall1[1] = 1'b1;
assign pat2_rise0[0] = 1'b1;
assign pat2_fall0[0] = 1'b0;
assign pat2_rise1[0] = 1'b1;
assign pat2_fall1[0] = 1'b0;
//Pattern to distinguish between early write and incorrect read
// First cycle pattern AA5555AA
assign early1_rise0[3] = 2'b1;
assign early1_fall0[3] = 2'b0;
assign early1_rise1[3] = 2'b0;
assign early1_fall1[3] = 2'b1;
assign early1_rise0[2] = 2'b0;
assign early1_fall0[2] = 2'b1;
assign early1_rise1[2] = 2'b1;
assign early1_fall1[2] = 2'b0;
assign early1_rise0[1] = 2'b1;
assign early1_fall0[1] = 2'b0;
assign early1_rise1[1] = 2'b0;
assign early1_fall1[1] = 2'b1;
assign early1_rise0[0] = 2'b0;
assign early1_fall0[0] = 2'b1;
assign early1_rise1[0] = 2'b1;
assign early1_fall1[0] = 2'b0;
// Second cycle pattern 9966BB11
assign early2_rise0[3] = 2'b1;
assign early2_fall0[3] = 2'b0;
assign early2_rise1[3] = 2'b1;
assign early2_fall1[3] = 2'b0;
assign early2_rise0[2] = 2'b0;
assign early2_fall0[2] = 2'b1;
assign early2_rise1[2] = 2'b0;
assign early2_fall1[2] = 2'b0;
assign early2_rise0[1] = 2'b0;
assign early2_fall0[1] = 2'b1;
assign early2_rise1[1] = 2'b1;
assign early2_fall1[1] = 2'b0;
assign early2_rise0[0] = 2'b1;
assign early2_fall0[0] = 2'b0;
assign early2_rise1[0] = 2'b1;
assign early2_fall1[0] = 2'b1;
end
endgenerate
// Each bit of each byte is compared to expected pattern.
// This was done to prevent (and "drastically decrease") the chance that
// invalid data clocked in when the DQ bus is tri-state (along with a
// combination of the correct data) will resemble the expected data
// pattern. A better fix for this is to change the training pattern and/or
// make the pattern longer.
generate
genvar pt_i;
if (nCK_PER_CLK == 4) begin: gen_pat_match_div4
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise0[pt_i%4])
pat_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall0[pt_i%4])
pat_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise1[pt_i%4])
pat_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall1[pt_i%4])
pat_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat_rise2[pt_i%4])
pat_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat_fall2[pt_i%4])
pat_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat_rise3[pt_i%4])
pat_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat_fall3[pt_i%4])
pat_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise1[pt_i%4])
early1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall1[pt_i%4])
early1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise2[pt_i%4])
early1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall2[pt_i%4])
early1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat_rise3[pt_i%4])
early1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat_fall3[pt_i%4])
early1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == early_rise0[pt_i%4])
early1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == early_fall0[pt_i%4])
early1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise2[pt_i%4])
early2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall2[pt_i%4])
early2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise3[pt_i%4])
early2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall3[pt_i%4])
early2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == early_rise0[pt_i%4])
early2_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == early_fall0[pt_i%4])
early2_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == early_rise1[pt_i%4])
early2_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == early_fall1[pt_i%4])
early2_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
end
always @(posedge clk) begin
pat_match_rise0_and_r <= #TCQ &pat_match_rise0_r;
pat_match_fall0_and_r <= #TCQ &pat_match_fall0_r;
pat_match_rise1_and_r <= #TCQ &pat_match_rise1_r;
pat_match_fall1_and_r <= #TCQ &pat_match_fall1_r;
pat_match_rise2_and_r <= #TCQ &pat_match_rise2_r;
pat_match_fall2_and_r <= #TCQ &pat_match_fall2_r;
pat_match_rise3_and_r <= #TCQ &pat_match_rise3_r;
pat_match_fall3_and_r <= #TCQ &pat_match_fall3_r;
pat_data_match_r <= #TCQ (pat_match_rise0_and_r &&
pat_match_fall0_and_r &&
pat_match_rise1_and_r &&
pat_match_fall1_and_r &&
pat_match_rise2_and_r &&
pat_match_fall2_and_r &&
pat_match_rise3_and_r &&
pat_match_fall3_and_r);
pat_data_match_valid_r <= #TCQ rd_active_r3;
end
always @(posedge clk) begin
early1_match_rise0_and_r <= #TCQ &early1_match_rise0_r;
early1_match_fall0_and_r <= #TCQ &early1_match_fall0_r;
early1_match_rise1_and_r <= #TCQ &early1_match_rise1_r;
early1_match_fall1_and_r <= #TCQ &early1_match_fall1_r;
early1_match_rise2_and_r <= #TCQ &early1_match_rise2_r;
early1_match_fall2_and_r <= #TCQ &early1_match_fall2_r;
early1_match_rise3_and_r <= #TCQ &early1_match_rise3_r;
early1_match_fall3_and_r <= #TCQ &early1_match_fall3_r;
early1_data_match_r <= #TCQ (early1_match_rise0_and_r &&
early1_match_fall0_and_r &&
early1_match_rise1_and_r &&
early1_match_fall1_and_r &&
early1_match_rise2_and_r &&
early1_match_fall2_and_r &&
early1_match_rise3_and_r &&
early1_match_fall3_and_r);
end
always @(posedge clk) begin
early2_match_rise0_and_r <= #TCQ &early2_match_rise0_r;
early2_match_fall0_and_r <= #TCQ &early2_match_fall0_r;
early2_match_rise1_and_r <= #TCQ &early2_match_rise1_r;
early2_match_fall1_and_r <= #TCQ &early2_match_fall1_r;
early2_match_rise2_and_r <= #TCQ &early2_match_rise2_r;
early2_match_fall2_and_r <= #TCQ &early2_match_fall2_r;
early2_match_rise3_and_r <= #TCQ &early2_match_rise3_r;
early2_match_fall3_and_r <= #TCQ &early2_match_fall3_r;
early2_data_match_r <= #TCQ (early2_match_rise0_and_r &&
early2_match_fall0_and_r &&
early2_match_rise1_and_r &&
early2_match_fall1_and_r &&
early2_match_rise2_and_r &&
early2_match_fall2_and_r &&
early2_match_rise3_and_r &&
early2_match_fall3_and_r);
end
end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat2_rise0[pt_i%4])
pat2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat2_fall0[pt_i%4])
pat2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat2_rise1[pt_i%4])
pat2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat2_fall1[pt_i%4])
pat2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == early1_rise0[pt_i%4])
early1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == early1_fall0[pt_i%4])
early1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == early1_rise1[pt_i%4])
early1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == early1_fall1[pt_i%4])
early1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
// early2 in this case does not mean 2 cycles early but
// the second cycle of read data in 2:1 mode
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == early2_rise0[pt_i%4])
early2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == early2_fall0[pt_i%4])
early2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == early2_rise1[pt_i%4])
early2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == early2_fall1[pt_i%4])
early2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r);
pat1_data_match_r1 <= #TCQ pat1_data_match_r;
pat2_match_rise0_and_r <= #TCQ &pat2_match_rise0_r && rd_active_r3;
pat2_match_fall0_and_r <= #TCQ &pat2_match_fall0_r && rd_active_r3;
pat2_match_rise1_and_r <= #TCQ &pat2_match_rise1_r && rd_active_r3;
pat2_match_fall1_and_r <= #TCQ &pat2_match_fall1_r && rd_active_r3;
pat2_data_match_r <= #TCQ (pat2_match_rise0_and_r &&
pat2_match_fall0_and_r &&
pat2_match_rise1_and_r &&
pat2_match_fall1_and_r);
// For 2:1 mode, read valid is asserted for 2 clock cycles -
// here we generate a "match valid" pulse that is only 1 clock
// cycle wide that is simulatenous when the match calculation
// is complete
pat_data_match_valid_r <= #TCQ rd_active_r4 & ~rd_active_r5;
end
always @(posedge clk) begin
early1_match_rise0_and_r <= #TCQ &early1_match_rise0_r;
early1_match_fall0_and_r <= #TCQ &early1_match_fall0_r;
early1_match_rise1_and_r <= #TCQ &early1_match_rise1_r;
early1_match_fall1_and_r <= #TCQ &early1_match_fall1_r;
early1_data_match_r <= #TCQ (early1_match_rise0_and_r &&
early1_match_fall0_and_r &&
early1_match_rise1_and_r &&
early1_match_fall1_and_r);
early1_data_match_r1 <= #TCQ early1_data_match_r;
early2_match_rise0_and_r <= #TCQ &early2_match_rise0_r && rd_active_r3;
early2_match_fall0_and_r <= #TCQ &early2_match_fall0_r && rd_active_r3;
early2_match_rise1_and_r <= #TCQ &early2_match_rise1_r && rd_active_r3;
early2_match_fall1_and_r <= #TCQ &early2_match_fall1_r && rd_active_r3;
early2_data_match_r <= #TCQ (early2_match_rise0_and_r &&
early2_match_fall0_and_r &&
early2_match_rise1_and_r &&
early2_match_fall1_and_r);
end
end
endgenerate
// Need to delay it by 3 cycles in order to wait for Phaser_Out
// coarse delay to take effect before issuing a write command
always @(posedge clk) begin
wrcal_pat_resume_r1 <= #TCQ wrcal_pat_resume_r;
wrcal_pat_resume_r2 <= #TCQ wrcal_pat_resume_r1;
wrcal_pat_resume <= #TCQ wrcal_pat_resume_r2;
end
always @(posedge clk) begin
if (rst)
tap_inc_wait_cnt <= #TCQ 'd0;
else if ((cal2_state_r == CAL2_DQ_IDEL_DEC) ||
(cal2_state_r == CAL2_IFIFO_RESET) ||
(cal2_state_r == CAL2_SANITY_WAIT))
tap_inc_wait_cnt <= #TCQ tap_inc_wait_cnt + 1;
else
tap_inc_wait_cnt <= #TCQ 'd0;
end
always @(posedge clk) begin
if (rst)
not_empty_wait_cnt <= #TCQ 'd0;
else if ((cal2_state_r == CAL2_READ_WAIT) && wrcal_rd_wait)
not_empty_wait_cnt <= #TCQ not_empty_wait_cnt + 1;
else
not_empty_wait_cnt <= #TCQ 'd0;
end
always @(posedge clk)
cal2_state_r1 <= #TCQ cal2_state_r;
//*****************************************************************
// Write Calibration state machine
//*****************************************************************
// when calibrating, check to see if the expected pattern is received.
// Otherwise delay DQS to align to correct CK edge.
// NOTES:
// 1. An error condition can occur due to two reasons:
// a. If the matching logic does not receive the expected data
// pattern. However, the error may be "recoverable" because
// the write calibration is still in progress. If an error is
// found the write calibration logic delays DQS by an additional
// clock cycle and restarts the pattern detection process.
// By design, if the write path timing is incorrect, the correct
// data pattern will never be detected.
// b. Valid data not found even after incrementing Phaser_Out
// coarse delay line.
always @(posedge clk) begin
if (rst) begin
wrcal_dqs_cnt_r <= #TCQ 'b0;
cal2_done_r <= #TCQ 1'b0;
cal2_prech_req_r <= #TCQ 1'b0;
cal2_state_r <= #TCQ CAL2_IDLE;
wrcal_pat_err <= #TCQ 1'b0;
wrcal_pat_resume_r <= #TCQ 1'b0;
wrcal_act_req <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
temp_wrcal_done <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b0;
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b0;
idelay_ld <= #TCQ 1'b0;
idelay_ld_done <= #TCQ 1'b0;
pat1_detect <= #TCQ 1'b0;
early1_detect <= #TCQ 1'b0;
wrcal_sanity_chk_done <= #TCQ 1'b0;
wrcal_sanity_chk_err <= #TCQ 1'b0;
end else begin
cal2_prech_req_r <= #TCQ 1'b0;
case (cal2_state_r)
CAL2_IDLE: begin
wrcal_pat_err <= #TCQ 1'b0;
if (wrcal_start) begin
cal2_if_reset <= #TCQ 1'b0;
if (SIM_CAL_OPTION == "SKIP_CAL")
// If skip write calibration, then proceed to end.
cal2_state_r <= #TCQ CAL2_DONE;
else
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end
end
// General wait state to wait for read data to be output by the
// IN_FIFO
CAL2_READ_WAIT: begin
wrcal_pat_resume_r <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
// Wait until read data is received, and pattern matching
// calculation is complete. NOTE: Need to add a timeout here
// in case for some reason data is never received (or rather
// the PHASER_IN and IN_FIFO think they never receives data)
if (pat_data_match_valid_r && (nCK_PER_CLK == 4)) begin
if (pat_data_match_r)
// If found data match, then move on to next DQS group
cal2_state_r <= #TCQ CAL2_NEXT_DQS;
else begin
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_ERR;
// If writes are one or two cycles early then redo
// write leveling for the byte
else if (early1_data_match_r) begin
early1_data <= #TCQ 1'b1;
early2_data <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
end else if (early2_data_match_r) begin
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b1;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
// Read late due to incorrect MPR idelay value
// Decrement Idelay to '0'for the current byte
end else if (~idelay_ld_done) begin
cal2_state_r <= #TCQ CAL2_DQ_IDEL_DEC;
idelay_ld <= #TCQ 1'b1;
end else
cal2_state_r <= #TCQ CAL2_ERR;
end
end else if (pat_data_match_valid_r && (nCK_PER_CLK == 2)) begin
if ((pat1_data_match_r1 && pat2_data_match_r) ||
(pat1_detect && pat2_data_match_r))
// If found data match, then move on to next DQS group
cal2_state_r <= #TCQ CAL2_NEXT_DQS;
else if (pat1_data_match_r1 && ~pat2_data_match_r) begin
cal2_state_r <= #TCQ CAL2_READ_WAIT;
pat1_detect <= #TCQ 1'b1;
end else begin
// If writes are one or two cycles early then redo
// write leveling for the byte
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_ERR;
else if ((early1_data_match_r1 && early2_data_match_r) ||
(early1_detect && early2_data_match_r)) begin
early1_data <= #TCQ 1'b1;
early2_data <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
end else if (early1_data_match_r1 && ~early2_data_match_r) begin
early1_detect <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
// Read late due to incorrect MPR idelay value
// Decrement Idelay to '0'for the current byte
end else if (~idelay_ld_done) begin
cal2_state_r <= #TCQ CAL2_DQ_IDEL_DEC;
idelay_ld <= #TCQ 1'b1;
end else
cal2_state_r <= #TCQ CAL2_ERR;
end
end else if (not_empty_wait_cnt == 'd31)
cal2_state_r <= #TCQ CAL2_ERR;
end
CAL2_WRLVL_WAIT: begin
early1_detect <= #TCQ 1'b0;
if (wrlvl_byte_done && ~wrlvl_byte_done_r)
wrlvl_byte_redo <= #TCQ 1'b0;
if (wrlvl_byte_done) begin
if (rd_active_r1 && ~rd_active_r) begin
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
cal2_if_reset <= #TCQ 1'b1;
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b0;
end
end
end
CAL2_DQ_IDEL_DEC: begin
if (tap_inc_wait_cnt == 'd4) begin
idelay_ld <= #TCQ 1'b0;
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
cal2_if_reset <= #TCQ 1'b1;
idelay_ld_done <= #TCQ 1'b1;
end
end
CAL2_IFIFO_RESET: begin
if (tap_inc_wait_cnt == 'd15) begin
cal2_if_reset <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_DONE;
else if (idelay_ld_done) begin
wrcal_pat_resume_r <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end else
cal2_state_r <= #TCQ CAL2_IDLE;
end
end
// Final processing for current DQS group. Move on to next group
CAL2_NEXT_DQS: begin
// At this point, we've just found the correct pattern for the
// current DQS group.
// Request bank/row precharge, and wait for its completion. Always
// precharge after each DQS group to avoid tRAS(max) violation
//verilint STARC-2.2.3.3 off
if (wrcal_sanity_chk_r && (wrcal_dqs_cnt_r != DQS_WIDTH-1)) begin
cal2_prech_req_r <= #TCQ 1'b0;
wrcal_dqs_cnt_r <= #TCQ wrcal_dqs_cnt_r + 1;
cal2_state_r <= #TCQ CAL2_SANITY_WAIT;
end else
cal2_prech_req_r <= #TCQ 1'b1;
idelay_ld_done <= #TCQ 1'b0;
pat1_detect <= #TCQ 1'b0;
if (prech_done)
if (((DQS_WIDTH == 1) || (SIM_CAL_OPTION == "FAST_CAL")) ||
(wrcal_dqs_cnt_r == DQS_WIDTH-1)) begin
// If either FAST_CAL is enabled and first DQS group is
// finished, or if the last DQS group was just finished,
// then end of write calibration
if (wrcal_sanity_chk_r) begin
cal2_if_reset <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
end else
cal2_state_r <= #TCQ CAL2_DONE;
end else begin
// Continue to next DQS group
wrcal_dqs_cnt_r <= #TCQ wrcal_dqs_cnt_r + 1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end
end
//verilint STARC-2.2.3.3 on
CAL2_SANITY_WAIT: begin
if (tap_inc_wait_cnt == 'd15) begin
cal2_state_r <= #TCQ CAL2_READ_WAIT;
wrcal_pat_resume_r <= #TCQ 1'b1;
end
end
// Finished with read enable calibration
CAL2_DONE: begin
if (wrcal_sanity_chk && ~wrcal_sanity_chk_r) begin
cal2_done_r <= #TCQ 1'b0;
wrcal_dqs_cnt_r <= #TCQ 'd0;
cal2_state_r <= #TCQ CAL2_IDLE;
end else
cal2_done_r <= #TCQ 1'b1;
cal2_prech_req_r <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
wrcal_sanity_chk_done <= #TCQ 1'b1;
end
// Assert error signal indicating that writes timing is incorrect
CAL2_ERR: begin
wrcal_pat_resume_r <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
wrcal_sanity_chk_err <= #TCQ 1'b1;
else
wrcal_pat_err <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_ERR;
end
endcase
end
end
// Delay assertion of wrcal_done for write calibration by a few cycles after
// we've reached CAL2_DONE
always @(posedge clk)
if (rst)
cal2_done_r1 <= #TCQ 1'b0;
else
cal2_done_r1 <= #TCQ cal2_done_r;
always @(posedge clk)
if (rst || (wrcal_sanity_chk && ~wrcal_sanity_chk_r))
wrcal_done <= #TCQ 1'b0;
else if (cal2_done_r)
wrcal_done <= #TCQ 1'b1;
endmodule
|
module mig_7series_v2_3_ddr_phy_wrcal #
(
parameter TCQ = 100, // clk->out delay (sim only)
parameter nCK_PER_CLK = 2, // # of memory clocks per CLK
parameter CLK_PERIOD = 2500,
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter PRE_REV3ES = "OFF", // Delay O/Ps using Phaser_Out fine dly
parameter SIM_CAL_OPTION = "NONE" // Skip various calibration steps
)
(
input clk,
input rst,
// Calibration status, control signals
input wrcal_start,
input wrcal_rd_wait,
input wrcal_sanity_chk,
input dqsfound_retry_done,
input phy_rddata_en,
output dqsfound_retry,
output wrcal_read_req,
output reg wrcal_act_req,
output reg wrcal_done,
output reg wrcal_pat_err,
output reg wrcal_prech_req,
output reg temp_wrcal_done,
output reg wrcal_sanity_chk_done,
input prech_done,
// Captured data in resync clock domain
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data,
// Write level values of Phaser_Out coarse and fine
// delay taps required to load Phaser_Out register
input [3*DQS_WIDTH-1:0] wl_po_coarse_cnt,
input [6*DQS_WIDTH-1:0] wl_po_fine_cnt,
input wrlvl_byte_done,
output reg wrlvl_byte_redo,
output reg early1_data,
output reg early2_data,
// DQ IDELAY
output reg idelay_ld,
output reg wrcal_pat_resume, // to phy_init for write
output reg [DQS_CNT_WIDTH:0] po_stg2_wrcal_cnt,
output phy_if_reset,
// Debug Port
output [6*DQS_WIDTH-1:0] dbg_final_po_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_final_po_coarse_tap_cnt,
output [99:0] dbg_phy_wrcal
);
// Length of calibration sequence (in # of words)
//localparam CAL_PAT_LEN = 8;
// Read data shift register length
localparam RD_SHIFT_LEN = 1; //(nCK_PER_CLK == 4) ? 1 : 2;
// # of reads for reliable read capture
localparam NUM_READS = 2;
// # of cycles to wait after changing RDEN count value
localparam RDEN_WAIT_CNT = 12;
localparam COARSE_CNT = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 3 : 6;
localparam FINE_CNT = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 22 : 44;
localparam CAL2_IDLE = 4'h0;
localparam CAL2_READ_WAIT = 4'h1;
localparam CAL2_NEXT_DQS = 4'h2;
localparam CAL2_WRLVL_WAIT = 4'h3;
localparam CAL2_IFIFO_RESET = 4'h4;
localparam CAL2_DQ_IDEL_DEC = 4'h5;
localparam CAL2_DONE = 4'h6;
localparam CAL2_SANITY_WAIT = 4'h7;
localparam CAL2_ERR = 4'h8;
integer i,j,k,l,m,p,q,d;
reg [2:0] po_coarse_tap_cnt [0:DQS_WIDTH-1];
reg [3*DQS_WIDTH-1:0] po_coarse_tap_cnt_w;
reg [5:0] po_fine_tap_cnt [0:DQS_WIDTH-1];
reg [6*DQS_WIDTH-1:0] po_fine_tap_cnt_w;
reg [DQS_CNT_WIDTH:0] wrcal_dqs_cnt_r/* synthesis syn_maxfan = 10 */;
reg [4:0] not_empty_wait_cnt;
reg [3:0] tap_inc_wait_cnt;
reg cal2_done_r;
reg cal2_done_r1;
reg cal2_prech_req_r;
reg [3:0] cal2_state_r;
reg [3:0] cal2_state_r1;
reg [2:0] wl_po_coarse_cnt_w [0:DQS_WIDTH-1];
reg [5:0] wl_po_fine_cnt_w [0:DQS_WIDTH-1];
reg cal2_if_reset;
reg wrcal_pat_resume_r;
reg wrcal_pat_resume_r1;
reg wrcal_pat_resume_r2;
reg wrcal_pat_resume_r3;
reg [DRAM_WIDTH-1:0] mux_rd_fall0_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall1_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise0_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise1_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall2_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall3_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise2_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise3_r;
reg pat_data_match_r;
reg pat1_data_match_r;
reg pat1_data_match_r1;
reg pat2_data_match_r;
reg pat_data_match_valid_r;
wire [RD_SHIFT_LEN-1:0] pat_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_fall1 [3:0];
reg [DRAM_WIDTH-1:0] pat_match_fall0_r;
reg pat_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall1_r;
reg pat_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall2_r;
reg pat_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall3_r;
reg pat_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise0_r;
reg pat_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise1_r;
reg pat_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise2_r;
reg pat_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise3_r;
reg pat_match_rise3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise0_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise1_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall0_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall1_r;
reg [DRAM_WIDTH-1:0] pat2_match_rise0_r;
reg [DRAM_WIDTH-1:0] pat2_match_rise1_r;
reg [DRAM_WIDTH-1:0] pat2_match_fall0_r;
reg [DRAM_WIDTH-1:0] pat2_match_fall1_r;
reg pat1_match_rise0_and_r;
reg pat1_match_rise1_and_r;
reg pat1_match_fall0_and_r;
reg pat1_match_fall1_and_r;
reg pat2_match_rise0_and_r;
reg pat2_match_rise1_and_r;
reg pat2_match_fall0_and_r;
reg pat2_match_fall1_and_r;
reg early1_data_match_r;
reg early1_data_match_r1;
reg [DRAM_WIDTH-1:0] early1_match_fall0_r;
reg early1_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall1_r;
reg early1_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall2_r;
reg early1_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall3_r;
reg early1_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise0_r;
reg early1_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise1_r;
reg early1_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise2_r;
reg early1_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise3_r;
reg early1_match_rise3_and_r;
reg early2_data_match_r;
reg [DRAM_WIDTH-1:0] early2_match_fall0_r;
reg early2_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall1_r;
reg early2_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall2_r;
reg early2_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall3_r;
reg early2_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise0_r;
reg early2_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise1_r;
reg early2_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise2_r;
reg early2_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise3_r;
reg early2_match_rise3_and_r;
wire [RD_SHIFT_LEN-1:0] pat_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_rise1 [3:0];
wire [DQ_WIDTH-1:0] rd_data_rise0;
wire [DQ_WIDTH-1:0] rd_data_fall0;
wire [DQ_WIDTH-1:0] rd_data_rise1;
wire [DQ_WIDTH-1:0] rd_data_fall1;
wire [DQ_WIDTH-1:0] rd_data_rise2;
wire [DQ_WIDTH-1:0] rd_data_fall2;
wire [DQ_WIDTH-1:0] rd_data_rise3;
wire [DQ_WIDTH-1:0] rd_data_fall3;
reg [DQS_CNT_WIDTH:0] rd_mux_sel_r;
reg rd_active_posedge_r;
reg rd_active_r;
reg rd_active_r1;
reg rd_active_r2;
reg rd_active_r3;
reg rd_active_r4;
reg rd_active_r5;
reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0];
reg wrlvl_byte_done_r;
reg idelay_ld_done;
reg pat1_detect;
reg early1_detect;
reg wrcal_sanity_chk_r;
reg wrcal_sanity_chk_err;
//***************************************************************************
// Debug
//***************************************************************************
always @(*) begin
for (d = 0; d < DQS_WIDTH; d = d + 1) begin
po_fine_tap_cnt_w[(6*d)+:6] = po_fine_tap_cnt[d];
po_coarse_tap_cnt_w[(3*d)+:3] = po_coarse_tap_cnt[d];
end
end
assign dbg_final_po_fine_tap_cnt = po_fine_tap_cnt_w;
assign dbg_final_po_coarse_tap_cnt = po_coarse_tap_cnt_w;
assign dbg_phy_wrcal[0] = pat_data_match_r;
assign dbg_phy_wrcal[4:1] = cal2_state_r1[3:0];
assign dbg_phy_wrcal[5] = wrcal_sanity_chk_err;
assign dbg_phy_wrcal[6] = wrcal_start;
assign dbg_phy_wrcal[7] = wrcal_done;
assign dbg_phy_wrcal[8] = pat_data_match_valid_r;
assign dbg_phy_wrcal[13+:DQS_CNT_WIDTH]= wrcal_dqs_cnt_r;
assign dbg_phy_wrcal[17+:5] = not_empty_wait_cnt;
assign dbg_phy_wrcal[22] = early1_data;
assign dbg_phy_wrcal[23] = early2_data;
assign dbg_phy_wrcal[24+:8] = mux_rd_rise0_r;
assign dbg_phy_wrcal[32+:8] = mux_rd_fall0_r;
assign dbg_phy_wrcal[40+:8] = mux_rd_rise1_r;
assign dbg_phy_wrcal[48+:8] = mux_rd_fall1_r;
assign dbg_phy_wrcal[56+:8] = mux_rd_rise2_r;
assign dbg_phy_wrcal[64+:8] = mux_rd_fall2_r;
assign dbg_phy_wrcal[72+:8] = mux_rd_rise3_r;
assign dbg_phy_wrcal[80+:8] = mux_rd_fall3_r;
assign dbg_phy_wrcal[88] = early1_data_match_r;
assign dbg_phy_wrcal[89] = early2_data_match_r;
assign dbg_phy_wrcal[90] = wrcal_sanity_chk_r & pat_data_match_valid_r;
assign dbg_phy_wrcal[91] = wrcal_sanity_chk_r;
assign dbg_phy_wrcal[92] = wrcal_sanity_chk_done;
assign dqsfound_retry = 1'b0;
assign wrcal_read_req = 1'b0;
assign phy_if_reset = cal2_if_reset;
//**************************************************************************
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
// coarse delay
//**************************************************************************
always @(posedge clk) begin
po_stg2_wrcal_cnt <= #TCQ wrcal_dqs_cnt_r;
wrlvl_byte_done_r <= #TCQ wrlvl_byte_done;
wrcal_sanity_chk_r <= #TCQ wrcal_sanity_chk;
end
//***************************************************************************
// Data mux to route appropriate byte to calibration logic - i.e. calibration
// is done sequentially, one byte (or DQS group) at a time
//***************************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_rd_data_div4
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH];
assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH];
assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH];
assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH];
end else if (nCK_PER_CLK == 2) begin: gen_rd_data_div2
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
end
endgenerate
//**************************************************************************
// Final Phaser OUT coarse and fine delay taps after write calibration
// Sum of taps used during write leveling taps and write calibration
//**************************************************************************
always @(*) begin
for (m = 0; m < DQS_WIDTH; m = m + 1) begin
wl_po_coarse_cnt_w[m] = wl_po_coarse_cnt[3*m+:3];
wl_po_fine_cnt_w[m] = wl_po_fine_cnt[6*m+:6];
end
end
always @(posedge clk) begin
if (rst) begin
for (p = 0; p < DQS_WIDTH; p = p + 1) begin
po_coarse_tap_cnt[p] <= #TCQ {3{1'b0}};
po_fine_tap_cnt[p] <= #TCQ {6{1'b0}};
end
end else if (cal2_done_r && ~cal2_done_r1) begin
for (q = 0; q < DQS_WIDTH; q = q + 1) begin
po_coarse_tap_cnt[q] <= #TCQ wl_po_coarse_cnt_w[i];
po_fine_tap_cnt[q] <= #TCQ wl_po_fine_cnt_w[i];
end
end
end
always @(posedge clk) begin
rd_mux_sel_r <= #TCQ wrcal_dqs_cnt_r;
end
// Register outputs for improved timing.
// NOTE: Will need to change when per-bit DQ deskew is supported.
// Currenly all bits in DQS group are checked in aggregate
generate
genvar mux_i;
if (nCK_PER_CLK == 4) begin: gen_mux_rd_div4
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r + mux_i];
end
end
end else if (nCK_PER_CLK == 2) begin: gen_mux_rd_div2
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
end
end
end
endgenerate
//***************************************************************************
// generate request to PHY_INIT logic to issue precharged. Required when
// calibration can take a long time (during which there are only constant
// reads present on this bus). In this case need to issue perioidic
// precharges to avoid tRAS violation. This signal must meet the following
// requirements: (1) only transition from 0->1 when prech is first needed,
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
//***************************************************************************
always @(posedge clk)
if (rst)
wrcal_prech_req <= #TCQ 1'b0;
else
// Combine requests from all stages here
wrcal_prech_req <= #TCQ cal2_prech_req_r;
//***************************************************************************
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
// NOTE: Written using discrete flops, but SRL can be used if the matching
// logic does the comparison sequentially, rather than parallel
//***************************************************************************
generate
genvar rd_i;
if (nCK_PER_CLK == 4) begin: gen_sr_div4
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i];
sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i];
sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i];
sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i];
end
end
end else if (nCK_PER_CLK == 2) begin: gen_sr_div2
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
end
end
end
endgenerate
//***************************************************************************
// Write calibration:
// During write leveling DQS is aligned to the nearest CK edge that may not
// be the correct CK edge. Write calibration is required to align the DQS to
// the correct CK edge that clocks the write command.
// The Phaser_Out coarse delay line is adjusted if required to add a memory
// clock cycle of delay in order to read back the expected pattern.
//***************************************************************************
always @(posedge clk) begin
rd_active_r <= #TCQ phy_rddata_en;
rd_active_r1 <= #TCQ rd_active_r;
rd_active_r2 <= #TCQ rd_active_r1;
rd_active_r3 <= #TCQ rd_active_r2;
rd_active_r4 <= #TCQ rd_active_r3;
rd_active_r5 <= #TCQ rd_active_r4;
end
//*****************************************************************
// Expected data pattern when properly received by read capture
// logic:
// Based on pattern of ({rise,fall}) =
// 0xF, 0x0, 0xA, 0x5, 0x5, 0xA, 0x9, 0x6
// Each nibble will look like:
// bit3: 1, 0, 1, 0, 0, 1, 1, 0
// bit2: 1, 0, 0, 1, 1, 0, 0, 1
// bit1: 1, 0, 1, 0, 0, 1, 0, 1
// bit0: 1, 0, 0, 1, 1, 0, 1, 0
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
// and the actual training pattern contents change
//*****************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_pat_div4
// FF00AA5555AA9966
assign pat_rise0[3] = 1'b1;
assign pat_fall0[3] = 1'b0;
assign pat_rise1[3] = 1'b1;
assign pat_fall1[3] = 1'b0;
assign pat_rise2[3] = 1'b0;
assign pat_fall2[3] = 1'b1;
assign pat_rise3[3] = 1'b1;
assign pat_fall3[3] = 1'b0;
assign pat_rise0[2] = 1'b1;
assign pat_fall0[2] = 1'b0;
assign pat_rise1[2] = 1'b0;
assign pat_fall1[2] = 1'b1;
assign pat_rise2[2] = 1'b1;
assign pat_fall2[2] = 1'b0;
assign pat_rise3[2] = 1'b0;
assign pat_fall3[2] = 1'b1;
assign pat_rise0[1] = 1'b1;
assign pat_fall0[1] = 1'b0;
assign pat_rise1[1] = 1'b1;
assign pat_fall1[1] = 1'b0;
assign pat_rise2[1] = 1'b0;
assign pat_fall2[1] = 1'b1;
assign pat_rise3[1] = 1'b0;
assign pat_fall3[1] = 1'b1;
assign pat_rise0[0] = 1'b1;
assign pat_fall0[0] = 1'b0;
assign pat_rise1[0] = 1'b0;
assign pat_fall1[0] = 1'b1;
assign pat_rise2[0] = 1'b1;
assign pat_fall2[0] = 1'b0;
assign pat_rise3[0] = 1'b1;
assign pat_fall3[0] = 1'b0;
// Pattern to distinguish between early write and incorrect read
// BB11EE4444EEDD88
assign early_rise0[3] = 1'b1;
assign early_fall0[3] = 1'b0;
assign early_rise1[3] = 1'b1;
assign early_fall1[3] = 1'b0;
assign early_rise2[3] = 1'b0;
assign early_fall2[3] = 1'b1;
assign early_rise3[3] = 1'b1;
assign early_fall3[3] = 1'b1;
assign early_rise0[2] = 1'b0;
assign early_fall0[2] = 1'b0;
assign early_rise1[2] = 1'b1;
assign early_fall1[2] = 1'b1;
assign early_rise2[2] = 1'b1;
assign early_fall2[2] = 1'b1;
assign early_rise3[2] = 1'b1;
assign early_fall3[2] = 1'b0;
assign early_rise0[1] = 1'b1;
assign early_fall0[1] = 1'b0;
assign early_rise1[1] = 1'b1;
assign early_fall1[1] = 1'b0;
assign early_rise2[1] = 1'b0;
assign early_fall2[1] = 1'b1;
assign early_rise3[1] = 1'b0;
assign early_fall3[1] = 1'b0;
assign early_rise0[0] = 1'b1;
assign early_fall0[0] = 1'b1;
assign early_rise1[0] = 1'b0;
assign early_fall1[0] = 1'b0;
assign early_rise2[0] = 1'b0;
assign early_fall2[0] = 1'b0;
assign early_rise3[0] = 1'b1;
assign early_fall3[0] = 1'b0;
end else if (nCK_PER_CLK == 2) begin: gen_pat_div2
// First cycle pattern FF00AA55
assign pat1_rise0[3] = 1'b1;
assign pat1_fall0[3] = 1'b0;
assign pat1_rise1[3] = 1'b1;
assign pat1_fall1[3] = 1'b0;
assign pat1_rise0[2] = 1'b1;
assign pat1_fall0[2] = 1'b0;
assign pat1_rise1[2] = 1'b0;
assign pat1_fall1[2] = 1'b1;
assign pat1_rise0[1] = 1'b1;
assign pat1_fall0[1] = 1'b0;
assign pat1_rise1[1] = 1'b1;
assign pat1_fall1[1] = 1'b0;
assign pat1_rise0[0] = 1'b1;
assign pat1_fall0[0] = 1'b0;
assign pat1_rise1[0] = 1'b0;
assign pat1_fall1[0] = 1'b1;
// Second cycle pattern 55AA9966
assign pat2_rise0[3] = 1'b0;
assign pat2_fall0[3] = 1'b1;
assign pat2_rise1[3] = 1'b1;
assign pat2_fall1[3] = 1'b0;
assign pat2_rise0[2] = 1'b1;
assign pat2_fall0[2] = 1'b0;
assign pat2_rise1[2] = 1'b0;
assign pat2_fall1[2] = 1'b1;
assign pat2_rise0[1] = 1'b0;
assign pat2_fall0[1] = 1'b1;
assign pat2_rise1[1] = 1'b0;
assign pat2_fall1[1] = 1'b1;
assign pat2_rise0[0] = 1'b1;
assign pat2_fall0[0] = 1'b0;
assign pat2_rise1[0] = 1'b1;
assign pat2_fall1[0] = 1'b0;
//Pattern to distinguish between early write and incorrect read
// First cycle pattern AA5555AA
assign early1_rise0[3] = 2'b1;
assign early1_fall0[3] = 2'b0;
assign early1_rise1[3] = 2'b0;
assign early1_fall1[3] = 2'b1;
assign early1_rise0[2] = 2'b0;
assign early1_fall0[2] = 2'b1;
assign early1_rise1[2] = 2'b1;
assign early1_fall1[2] = 2'b0;
assign early1_rise0[1] = 2'b1;
assign early1_fall0[1] = 2'b0;
assign early1_rise1[1] = 2'b0;
assign early1_fall1[1] = 2'b1;
assign early1_rise0[0] = 2'b0;
assign early1_fall0[0] = 2'b1;
assign early1_rise1[0] = 2'b1;
assign early1_fall1[0] = 2'b0;
// Second cycle pattern 9966BB11
assign early2_rise0[3] = 2'b1;
assign early2_fall0[3] = 2'b0;
assign early2_rise1[3] = 2'b1;
assign early2_fall1[3] = 2'b0;
assign early2_rise0[2] = 2'b0;
assign early2_fall0[2] = 2'b1;
assign early2_rise1[2] = 2'b0;
assign early2_fall1[2] = 2'b0;
assign early2_rise0[1] = 2'b0;
assign early2_fall0[1] = 2'b1;
assign early2_rise1[1] = 2'b1;
assign early2_fall1[1] = 2'b0;
assign early2_rise0[0] = 2'b1;
assign early2_fall0[0] = 2'b0;
assign early2_rise1[0] = 2'b1;
assign early2_fall1[0] = 2'b1;
end
endgenerate
// Each bit of each byte is compared to expected pattern.
// This was done to prevent (and "drastically decrease") the chance that
// invalid data clocked in when the DQ bus is tri-state (along with a
// combination of the correct data) will resemble the expected data
// pattern. A better fix for this is to change the training pattern and/or
// make the pattern longer.
generate
genvar pt_i;
if (nCK_PER_CLK == 4) begin: gen_pat_match_div4
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise0[pt_i%4])
pat_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall0[pt_i%4])
pat_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise1[pt_i%4])
pat_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall1[pt_i%4])
pat_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat_rise2[pt_i%4])
pat_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat_fall2[pt_i%4])
pat_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat_rise3[pt_i%4])
pat_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat_fall3[pt_i%4])
pat_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise1[pt_i%4])
early1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall1[pt_i%4])
early1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise2[pt_i%4])
early1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall2[pt_i%4])
early1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat_rise3[pt_i%4])
early1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat_fall3[pt_i%4])
early1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == early_rise0[pt_i%4])
early1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == early_fall0[pt_i%4])
early1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise2[pt_i%4])
early2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall2[pt_i%4])
early2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise3[pt_i%4])
early2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall3[pt_i%4])
early2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == early_rise0[pt_i%4])
early2_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == early_fall0[pt_i%4])
early2_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == early_rise1[pt_i%4])
early2_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == early_fall1[pt_i%4])
early2_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
end
always @(posedge clk) begin
pat_match_rise0_and_r <= #TCQ &pat_match_rise0_r;
pat_match_fall0_and_r <= #TCQ &pat_match_fall0_r;
pat_match_rise1_and_r <= #TCQ &pat_match_rise1_r;
pat_match_fall1_and_r <= #TCQ &pat_match_fall1_r;
pat_match_rise2_and_r <= #TCQ &pat_match_rise2_r;
pat_match_fall2_and_r <= #TCQ &pat_match_fall2_r;
pat_match_rise3_and_r <= #TCQ &pat_match_rise3_r;
pat_match_fall3_and_r <= #TCQ &pat_match_fall3_r;
pat_data_match_r <= #TCQ (pat_match_rise0_and_r &&
pat_match_fall0_and_r &&
pat_match_rise1_and_r &&
pat_match_fall1_and_r &&
pat_match_rise2_and_r &&
pat_match_fall2_and_r &&
pat_match_rise3_and_r &&
pat_match_fall3_and_r);
pat_data_match_valid_r <= #TCQ rd_active_r3;
end
always @(posedge clk) begin
early1_match_rise0_and_r <= #TCQ &early1_match_rise0_r;
early1_match_fall0_and_r <= #TCQ &early1_match_fall0_r;
early1_match_rise1_and_r <= #TCQ &early1_match_rise1_r;
early1_match_fall1_and_r <= #TCQ &early1_match_fall1_r;
early1_match_rise2_and_r <= #TCQ &early1_match_rise2_r;
early1_match_fall2_and_r <= #TCQ &early1_match_fall2_r;
early1_match_rise3_and_r <= #TCQ &early1_match_rise3_r;
early1_match_fall3_and_r <= #TCQ &early1_match_fall3_r;
early1_data_match_r <= #TCQ (early1_match_rise0_and_r &&
early1_match_fall0_and_r &&
early1_match_rise1_and_r &&
early1_match_fall1_and_r &&
early1_match_rise2_and_r &&
early1_match_fall2_and_r &&
early1_match_rise3_and_r &&
early1_match_fall3_and_r);
end
always @(posedge clk) begin
early2_match_rise0_and_r <= #TCQ &early2_match_rise0_r;
early2_match_fall0_and_r <= #TCQ &early2_match_fall0_r;
early2_match_rise1_and_r <= #TCQ &early2_match_rise1_r;
early2_match_fall1_and_r <= #TCQ &early2_match_fall1_r;
early2_match_rise2_and_r <= #TCQ &early2_match_rise2_r;
early2_match_fall2_and_r <= #TCQ &early2_match_fall2_r;
early2_match_rise3_and_r <= #TCQ &early2_match_rise3_r;
early2_match_fall3_and_r <= #TCQ &early2_match_fall3_r;
early2_data_match_r <= #TCQ (early2_match_rise0_and_r &&
early2_match_fall0_and_r &&
early2_match_rise1_and_r &&
early2_match_fall1_and_r &&
early2_match_rise2_and_r &&
early2_match_fall2_and_r &&
early2_match_rise3_and_r &&
early2_match_fall3_and_r);
end
end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat2_rise0[pt_i%4])
pat2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat2_fall0[pt_i%4])
pat2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat2_rise1[pt_i%4])
pat2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat2_fall1[pt_i%4])
pat2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == early1_rise0[pt_i%4])
early1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == early1_fall0[pt_i%4])
early1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == early1_rise1[pt_i%4])
early1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == early1_fall1[pt_i%4])
early1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
// early2 in this case does not mean 2 cycles early but
// the second cycle of read data in 2:1 mode
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == early2_rise0[pt_i%4])
early2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == early2_fall0[pt_i%4])
early2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == early2_rise1[pt_i%4])
early2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == early2_fall1[pt_i%4])
early2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r);
pat1_data_match_r1 <= #TCQ pat1_data_match_r;
pat2_match_rise0_and_r <= #TCQ &pat2_match_rise0_r && rd_active_r3;
pat2_match_fall0_and_r <= #TCQ &pat2_match_fall0_r && rd_active_r3;
pat2_match_rise1_and_r <= #TCQ &pat2_match_rise1_r && rd_active_r3;
pat2_match_fall1_and_r <= #TCQ &pat2_match_fall1_r && rd_active_r3;
pat2_data_match_r <= #TCQ (pat2_match_rise0_and_r &&
pat2_match_fall0_and_r &&
pat2_match_rise1_and_r &&
pat2_match_fall1_and_r);
// For 2:1 mode, read valid is asserted for 2 clock cycles -
// here we generate a "match valid" pulse that is only 1 clock
// cycle wide that is simulatenous when the match calculation
// is complete
pat_data_match_valid_r <= #TCQ rd_active_r4 & ~rd_active_r5;
end
always @(posedge clk) begin
early1_match_rise0_and_r <= #TCQ &early1_match_rise0_r;
early1_match_fall0_and_r <= #TCQ &early1_match_fall0_r;
early1_match_rise1_and_r <= #TCQ &early1_match_rise1_r;
early1_match_fall1_and_r <= #TCQ &early1_match_fall1_r;
early1_data_match_r <= #TCQ (early1_match_rise0_and_r &&
early1_match_fall0_and_r &&
early1_match_rise1_and_r &&
early1_match_fall1_and_r);
early1_data_match_r1 <= #TCQ early1_data_match_r;
early2_match_rise0_and_r <= #TCQ &early2_match_rise0_r && rd_active_r3;
early2_match_fall0_and_r <= #TCQ &early2_match_fall0_r && rd_active_r3;
early2_match_rise1_and_r <= #TCQ &early2_match_rise1_r && rd_active_r3;
early2_match_fall1_and_r <= #TCQ &early2_match_fall1_r && rd_active_r3;
early2_data_match_r <= #TCQ (early2_match_rise0_and_r &&
early2_match_fall0_and_r &&
early2_match_rise1_and_r &&
early2_match_fall1_and_r);
end
end
endgenerate
// Need to delay it by 3 cycles in order to wait for Phaser_Out
// coarse delay to take effect before issuing a write command
always @(posedge clk) begin
wrcal_pat_resume_r1 <= #TCQ wrcal_pat_resume_r;
wrcal_pat_resume_r2 <= #TCQ wrcal_pat_resume_r1;
wrcal_pat_resume <= #TCQ wrcal_pat_resume_r2;
end
always @(posedge clk) begin
if (rst)
tap_inc_wait_cnt <= #TCQ 'd0;
else if ((cal2_state_r == CAL2_DQ_IDEL_DEC) ||
(cal2_state_r == CAL2_IFIFO_RESET) ||
(cal2_state_r == CAL2_SANITY_WAIT))
tap_inc_wait_cnt <= #TCQ tap_inc_wait_cnt + 1;
else
tap_inc_wait_cnt <= #TCQ 'd0;
end
always @(posedge clk) begin
if (rst)
not_empty_wait_cnt <= #TCQ 'd0;
else if ((cal2_state_r == CAL2_READ_WAIT) && wrcal_rd_wait)
not_empty_wait_cnt <= #TCQ not_empty_wait_cnt + 1;
else
not_empty_wait_cnt <= #TCQ 'd0;
end
always @(posedge clk)
cal2_state_r1 <= #TCQ cal2_state_r;
//*****************************************************************
// Write Calibration state machine
//*****************************************************************
// when calibrating, check to see if the expected pattern is received.
// Otherwise delay DQS to align to correct CK edge.
// NOTES:
// 1. An error condition can occur due to two reasons:
// a. If the matching logic does not receive the expected data
// pattern. However, the error may be "recoverable" because
// the write calibration is still in progress. If an error is
// found the write calibration logic delays DQS by an additional
// clock cycle and restarts the pattern detection process.
// By design, if the write path timing is incorrect, the correct
// data pattern will never be detected.
// b. Valid data not found even after incrementing Phaser_Out
// coarse delay line.
always @(posedge clk) begin
if (rst) begin
wrcal_dqs_cnt_r <= #TCQ 'b0;
cal2_done_r <= #TCQ 1'b0;
cal2_prech_req_r <= #TCQ 1'b0;
cal2_state_r <= #TCQ CAL2_IDLE;
wrcal_pat_err <= #TCQ 1'b0;
wrcal_pat_resume_r <= #TCQ 1'b0;
wrcal_act_req <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
temp_wrcal_done <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b0;
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b0;
idelay_ld <= #TCQ 1'b0;
idelay_ld_done <= #TCQ 1'b0;
pat1_detect <= #TCQ 1'b0;
early1_detect <= #TCQ 1'b0;
wrcal_sanity_chk_done <= #TCQ 1'b0;
wrcal_sanity_chk_err <= #TCQ 1'b0;
end else begin
cal2_prech_req_r <= #TCQ 1'b0;
case (cal2_state_r)
CAL2_IDLE: begin
wrcal_pat_err <= #TCQ 1'b0;
if (wrcal_start) begin
cal2_if_reset <= #TCQ 1'b0;
if (SIM_CAL_OPTION == "SKIP_CAL")
// If skip write calibration, then proceed to end.
cal2_state_r <= #TCQ CAL2_DONE;
else
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end
end
// General wait state to wait for read data to be output by the
// IN_FIFO
CAL2_READ_WAIT: begin
wrcal_pat_resume_r <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
// Wait until read data is received, and pattern matching
// calculation is complete. NOTE: Need to add a timeout here
// in case for some reason data is never received (or rather
// the PHASER_IN and IN_FIFO think they never receives data)
if (pat_data_match_valid_r && (nCK_PER_CLK == 4)) begin
if (pat_data_match_r)
// If found data match, then move on to next DQS group
cal2_state_r <= #TCQ CAL2_NEXT_DQS;
else begin
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_ERR;
// If writes are one or two cycles early then redo
// write leveling for the byte
else if (early1_data_match_r) begin
early1_data <= #TCQ 1'b1;
early2_data <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
end else if (early2_data_match_r) begin
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b1;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
// Read late due to incorrect MPR idelay value
// Decrement Idelay to '0'for the current byte
end else if (~idelay_ld_done) begin
cal2_state_r <= #TCQ CAL2_DQ_IDEL_DEC;
idelay_ld <= #TCQ 1'b1;
end else
cal2_state_r <= #TCQ CAL2_ERR;
end
end else if (pat_data_match_valid_r && (nCK_PER_CLK == 2)) begin
if ((pat1_data_match_r1 && pat2_data_match_r) ||
(pat1_detect && pat2_data_match_r))
// If found data match, then move on to next DQS group
cal2_state_r <= #TCQ CAL2_NEXT_DQS;
else if (pat1_data_match_r1 && ~pat2_data_match_r) begin
cal2_state_r <= #TCQ CAL2_READ_WAIT;
pat1_detect <= #TCQ 1'b1;
end else begin
// If writes are one or two cycles early then redo
// write leveling for the byte
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_ERR;
else if ((early1_data_match_r1 && early2_data_match_r) ||
(early1_detect && early2_data_match_r)) begin
early1_data <= #TCQ 1'b1;
early2_data <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
end else if (early1_data_match_r1 && ~early2_data_match_r) begin
early1_detect <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
// Read late due to incorrect MPR idelay value
// Decrement Idelay to '0'for the current byte
end else if (~idelay_ld_done) begin
cal2_state_r <= #TCQ CAL2_DQ_IDEL_DEC;
idelay_ld <= #TCQ 1'b1;
end else
cal2_state_r <= #TCQ CAL2_ERR;
end
end else if (not_empty_wait_cnt == 'd31)
cal2_state_r <= #TCQ CAL2_ERR;
end
CAL2_WRLVL_WAIT: begin
early1_detect <= #TCQ 1'b0;
if (wrlvl_byte_done && ~wrlvl_byte_done_r)
wrlvl_byte_redo <= #TCQ 1'b0;
if (wrlvl_byte_done) begin
if (rd_active_r1 && ~rd_active_r) begin
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
cal2_if_reset <= #TCQ 1'b1;
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b0;
end
end
end
CAL2_DQ_IDEL_DEC: begin
if (tap_inc_wait_cnt == 'd4) begin
idelay_ld <= #TCQ 1'b0;
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
cal2_if_reset <= #TCQ 1'b1;
idelay_ld_done <= #TCQ 1'b1;
end
end
CAL2_IFIFO_RESET: begin
if (tap_inc_wait_cnt == 'd15) begin
cal2_if_reset <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_DONE;
else if (idelay_ld_done) begin
wrcal_pat_resume_r <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end else
cal2_state_r <= #TCQ CAL2_IDLE;
end
end
// Final processing for current DQS group. Move on to next group
CAL2_NEXT_DQS: begin
// At this point, we've just found the correct pattern for the
// current DQS group.
// Request bank/row precharge, and wait for its completion. Always
// precharge after each DQS group to avoid tRAS(max) violation
//verilint STARC-2.2.3.3 off
if (wrcal_sanity_chk_r && (wrcal_dqs_cnt_r != DQS_WIDTH-1)) begin
cal2_prech_req_r <= #TCQ 1'b0;
wrcal_dqs_cnt_r <= #TCQ wrcal_dqs_cnt_r + 1;
cal2_state_r <= #TCQ CAL2_SANITY_WAIT;
end else
cal2_prech_req_r <= #TCQ 1'b1;
idelay_ld_done <= #TCQ 1'b0;
pat1_detect <= #TCQ 1'b0;
if (prech_done)
if (((DQS_WIDTH == 1) || (SIM_CAL_OPTION == "FAST_CAL")) ||
(wrcal_dqs_cnt_r == DQS_WIDTH-1)) begin
// If either FAST_CAL is enabled and first DQS group is
// finished, or if the last DQS group was just finished,
// then end of write calibration
if (wrcal_sanity_chk_r) begin
cal2_if_reset <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
end else
cal2_state_r <= #TCQ CAL2_DONE;
end else begin
// Continue to next DQS group
wrcal_dqs_cnt_r <= #TCQ wrcal_dqs_cnt_r + 1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end
end
//verilint STARC-2.2.3.3 on
CAL2_SANITY_WAIT: begin
if (tap_inc_wait_cnt == 'd15) begin
cal2_state_r <= #TCQ CAL2_READ_WAIT;
wrcal_pat_resume_r <= #TCQ 1'b1;
end
end
// Finished with read enable calibration
CAL2_DONE: begin
if (wrcal_sanity_chk && ~wrcal_sanity_chk_r) begin
cal2_done_r <= #TCQ 1'b0;
wrcal_dqs_cnt_r <= #TCQ 'd0;
cal2_state_r <= #TCQ CAL2_IDLE;
end else
cal2_done_r <= #TCQ 1'b1;
cal2_prech_req_r <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
wrcal_sanity_chk_done <= #TCQ 1'b1;
end
// Assert error signal indicating that writes timing is incorrect
CAL2_ERR: begin
wrcal_pat_resume_r <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
wrcal_sanity_chk_err <= #TCQ 1'b1;
else
wrcal_pat_err <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_ERR;
end
endcase
end
end
// Delay assertion of wrcal_done for write calibration by a few cycles after
// we've reached CAL2_DONE
always @(posedge clk)
if (rst)
cal2_done_r1 <= #TCQ 1'b0;
else
cal2_done_r1 <= #TCQ cal2_done_r;
always @(posedge clk)
if (rst || (wrcal_sanity_chk && ~wrcal_sanity_chk_r))
wrcal_done <= #TCQ 1'b0;
else if (cal2_done_r)
wrcal_done <= #TCQ 1'b1;
endmodule
|
module mig_7series_v2_3_ddr_phy_wrcal #
(
parameter TCQ = 100, // clk->out delay (sim only)
parameter nCK_PER_CLK = 2, // # of memory clocks per CLK
parameter CLK_PERIOD = 2500,
parameter DQ_WIDTH = 64, // # of DQ (data)
parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH))
parameter DQS_WIDTH = 8, // # of DQS (strobe)
parameter DRAM_WIDTH = 8, // # of DQ per DQS
parameter PRE_REV3ES = "OFF", // Delay O/Ps using Phaser_Out fine dly
parameter SIM_CAL_OPTION = "NONE" // Skip various calibration steps
)
(
input clk,
input rst,
// Calibration status, control signals
input wrcal_start,
input wrcal_rd_wait,
input wrcal_sanity_chk,
input dqsfound_retry_done,
input phy_rddata_en,
output dqsfound_retry,
output wrcal_read_req,
output reg wrcal_act_req,
output reg wrcal_done,
output reg wrcal_pat_err,
output reg wrcal_prech_req,
output reg temp_wrcal_done,
output reg wrcal_sanity_chk_done,
input prech_done,
// Captured data in resync clock domain
input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data,
// Write level values of Phaser_Out coarse and fine
// delay taps required to load Phaser_Out register
input [3*DQS_WIDTH-1:0] wl_po_coarse_cnt,
input [6*DQS_WIDTH-1:0] wl_po_fine_cnt,
input wrlvl_byte_done,
output reg wrlvl_byte_redo,
output reg early1_data,
output reg early2_data,
// DQ IDELAY
output reg idelay_ld,
output reg wrcal_pat_resume, // to phy_init for write
output reg [DQS_CNT_WIDTH:0] po_stg2_wrcal_cnt,
output phy_if_reset,
// Debug Port
output [6*DQS_WIDTH-1:0] dbg_final_po_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_final_po_coarse_tap_cnt,
output [99:0] dbg_phy_wrcal
);
// Length of calibration sequence (in # of words)
//localparam CAL_PAT_LEN = 8;
// Read data shift register length
localparam RD_SHIFT_LEN = 1; //(nCK_PER_CLK == 4) ? 1 : 2;
// # of reads for reliable read capture
localparam NUM_READS = 2;
// # of cycles to wait after changing RDEN count value
localparam RDEN_WAIT_CNT = 12;
localparam COARSE_CNT = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 3 : 6;
localparam FINE_CNT = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 22 : 44;
localparam CAL2_IDLE = 4'h0;
localparam CAL2_READ_WAIT = 4'h1;
localparam CAL2_NEXT_DQS = 4'h2;
localparam CAL2_WRLVL_WAIT = 4'h3;
localparam CAL2_IFIFO_RESET = 4'h4;
localparam CAL2_DQ_IDEL_DEC = 4'h5;
localparam CAL2_DONE = 4'h6;
localparam CAL2_SANITY_WAIT = 4'h7;
localparam CAL2_ERR = 4'h8;
integer i,j,k,l,m,p,q,d;
reg [2:0] po_coarse_tap_cnt [0:DQS_WIDTH-1];
reg [3*DQS_WIDTH-1:0] po_coarse_tap_cnt_w;
reg [5:0] po_fine_tap_cnt [0:DQS_WIDTH-1];
reg [6*DQS_WIDTH-1:0] po_fine_tap_cnt_w;
reg [DQS_CNT_WIDTH:0] wrcal_dqs_cnt_r/* synthesis syn_maxfan = 10 */;
reg [4:0] not_empty_wait_cnt;
reg [3:0] tap_inc_wait_cnt;
reg cal2_done_r;
reg cal2_done_r1;
reg cal2_prech_req_r;
reg [3:0] cal2_state_r;
reg [3:0] cal2_state_r1;
reg [2:0] wl_po_coarse_cnt_w [0:DQS_WIDTH-1];
reg [5:0] wl_po_fine_cnt_w [0:DQS_WIDTH-1];
reg cal2_if_reset;
reg wrcal_pat_resume_r;
reg wrcal_pat_resume_r1;
reg wrcal_pat_resume_r2;
reg wrcal_pat_resume_r3;
reg [DRAM_WIDTH-1:0] mux_rd_fall0_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall1_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise0_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise1_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall2_r;
reg [DRAM_WIDTH-1:0] mux_rd_fall3_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise2_r;
reg [DRAM_WIDTH-1:0] mux_rd_rise3_r;
reg pat_data_match_r;
reg pat1_data_match_r;
reg pat1_data_match_r1;
reg pat2_data_match_r;
reg pat_data_match_valid_r;
wire [RD_SHIFT_LEN-1:0] pat_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall2 [3:0];
wire [RD_SHIFT_LEN-1:0] early_fall3 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_fall1 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_fall0 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_fall1 [3:0];
reg [DRAM_WIDTH-1:0] pat_match_fall0_r;
reg pat_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall1_r;
reg pat_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall2_r;
reg pat_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] pat_match_fall3_r;
reg pat_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise0_r;
reg pat_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise1_r;
reg pat_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise2_r;
reg pat_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] pat_match_rise3_r;
reg pat_match_rise3_and_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise0_r;
reg [DRAM_WIDTH-1:0] pat1_match_rise1_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall0_r;
reg [DRAM_WIDTH-1:0] pat1_match_fall1_r;
reg [DRAM_WIDTH-1:0] pat2_match_rise0_r;
reg [DRAM_WIDTH-1:0] pat2_match_rise1_r;
reg [DRAM_WIDTH-1:0] pat2_match_fall0_r;
reg [DRAM_WIDTH-1:0] pat2_match_fall1_r;
reg pat1_match_rise0_and_r;
reg pat1_match_rise1_and_r;
reg pat1_match_fall0_and_r;
reg pat1_match_fall1_and_r;
reg pat2_match_rise0_and_r;
reg pat2_match_rise1_and_r;
reg pat2_match_fall0_and_r;
reg pat2_match_fall1_and_r;
reg early1_data_match_r;
reg early1_data_match_r1;
reg [DRAM_WIDTH-1:0] early1_match_fall0_r;
reg early1_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall1_r;
reg early1_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall2_r;
reg early1_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] early1_match_fall3_r;
reg early1_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise0_r;
reg early1_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise1_r;
reg early1_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise2_r;
reg early1_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] early1_match_rise3_r;
reg early1_match_rise3_and_r;
reg early2_data_match_r;
reg [DRAM_WIDTH-1:0] early2_match_fall0_r;
reg early2_match_fall0_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall1_r;
reg early2_match_fall1_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall2_r;
reg early2_match_fall2_and_r;
reg [DRAM_WIDTH-1:0] early2_match_fall3_r;
reg early2_match_fall3_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise0_r;
reg early2_match_rise0_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise1_r;
reg early2_match_rise1_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise2_r;
reg early2_match_rise2_and_r;
reg [DRAM_WIDTH-1:0] early2_match_rise3_r;
reg early2_match_rise3_and_r;
wire [RD_SHIFT_LEN-1:0] pat_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] pat_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] pat2_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise2 [3:0];
wire [RD_SHIFT_LEN-1:0] early_rise3 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early1_rise1 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_rise0 [3:0];
wire [RD_SHIFT_LEN-1:0] early2_rise1 [3:0];
wire [DQ_WIDTH-1:0] rd_data_rise0;
wire [DQ_WIDTH-1:0] rd_data_fall0;
wire [DQ_WIDTH-1:0] rd_data_rise1;
wire [DQ_WIDTH-1:0] rd_data_fall1;
wire [DQ_WIDTH-1:0] rd_data_rise2;
wire [DQ_WIDTH-1:0] rd_data_fall2;
wire [DQ_WIDTH-1:0] rd_data_rise3;
wire [DQ_WIDTH-1:0] rd_data_fall3;
reg [DQS_CNT_WIDTH:0] rd_mux_sel_r;
reg rd_active_posedge_r;
reg rd_active_r;
reg rd_active_r1;
reg rd_active_r2;
reg rd_active_r3;
reg rd_active_r4;
reg rd_active_r5;
reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0];
reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0];
reg wrlvl_byte_done_r;
reg idelay_ld_done;
reg pat1_detect;
reg early1_detect;
reg wrcal_sanity_chk_r;
reg wrcal_sanity_chk_err;
//***************************************************************************
// Debug
//***************************************************************************
always @(*) begin
for (d = 0; d < DQS_WIDTH; d = d + 1) begin
po_fine_tap_cnt_w[(6*d)+:6] = po_fine_tap_cnt[d];
po_coarse_tap_cnt_w[(3*d)+:3] = po_coarse_tap_cnt[d];
end
end
assign dbg_final_po_fine_tap_cnt = po_fine_tap_cnt_w;
assign dbg_final_po_coarse_tap_cnt = po_coarse_tap_cnt_w;
assign dbg_phy_wrcal[0] = pat_data_match_r;
assign dbg_phy_wrcal[4:1] = cal2_state_r1[3:0];
assign dbg_phy_wrcal[5] = wrcal_sanity_chk_err;
assign dbg_phy_wrcal[6] = wrcal_start;
assign dbg_phy_wrcal[7] = wrcal_done;
assign dbg_phy_wrcal[8] = pat_data_match_valid_r;
assign dbg_phy_wrcal[13+:DQS_CNT_WIDTH]= wrcal_dqs_cnt_r;
assign dbg_phy_wrcal[17+:5] = not_empty_wait_cnt;
assign dbg_phy_wrcal[22] = early1_data;
assign dbg_phy_wrcal[23] = early2_data;
assign dbg_phy_wrcal[24+:8] = mux_rd_rise0_r;
assign dbg_phy_wrcal[32+:8] = mux_rd_fall0_r;
assign dbg_phy_wrcal[40+:8] = mux_rd_rise1_r;
assign dbg_phy_wrcal[48+:8] = mux_rd_fall1_r;
assign dbg_phy_wrcal[56+:8] = mux_rd_rise2_r;
assign dbg_phy_wrcal[64+:8] = mux_rd_fall2_r;
assign dbg_phy_wrcal[72+:8] = mux_rd_rise3_r;
assign dbg_phy_wrcal[80+:8] = mux_rd_fall3_r;
assign dbg_phy_wrcal[88] = early1_data_match_r;
assign dbg_phy_wrcal[89] = early2_data_match_r;
assign dbg_phy_wrcal[90] = wrcal_sanity_chk_r & pat_data_match_valid_r;
assign dbg_phy_wrcal[91] = wrcal_sanity_chk_r;
assign dbg_phy_wrcal[92] = wrcal_sanity_chk_done;
assign dqsfound_retry = 1'b0;
assign wrcal_read_req = 1'b0;
assign phy_if_reset = cal2_if_reset;
//**************************************************************************
// DQS count to hard PHY during write calibration using Phaser_OUT Stage2
// coarse delay
//**************************************************************************
always @(posedge clk) begin
po_stg2_wrcal_cnt <= #TCQ wrcal_dqs_cnt_r;
wrlvl_byte_done_r <= #TCQ wrlvl_byte_done;
wrcal_sanity_chk_r <= #TCQ wrcal_sanity_chk;
end
//***************************************************************************
// Data mux to route appropriate byte to calibration logic - i.e. calibration
// is done sequentially, one byte (or DQS group) at a time
//***************************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_rd_data_div4
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH];
assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH];
assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH];
assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH];
end else if (nCK_PER_CLK == 2) begin: gen_rd_data_div2
assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0];
assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH];
assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH];
assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH];
end
endgenerate
//**************************************************************************
// Final Phaser OUT coarse and fine delay taps after write calibration
// Sum of taps used during write leveling taps and write calibration
//**************************************************************************
always @(*) begin
for (m = 0; m < DQS_WIDTH; m = m + 1) begin
wl_po_coarse_cnt_w[m] = wl_po_coarse_cnt[3*m+:3];
wl_po_fine_cnt_w[m] = wl_po_fine_cnt[6*m+:6];
end
end
always @(posedge clk) begin
if (rst) begin
for (p = 0; p < DQS_WIDTH; p = p + 1) begin
po_coarse_tap_cnt[p] <= #TCQ {3{1'b0}};
po_fine_tap_cnt[p] <= #TCQ {6{1'b0}};
end
end else if (cal2_done_r && ~cal2_done_r1) begin
for (q = 0; q < DQS_WIDTH; q = q + 1) begin
po_coarse_tap_cnt[q] <= #TCQ wl_po_coarse_cnt_w[i];
po_fine_tap_cnt[q] <= #TCQ wl_po_fine_cnt_w[i];
end
end
end
always @(posedge clk) begin
rd_mux_sel_r <= #TCQ wrcal_dqs_cnt_r;
end
// Register outputs for improved timing.
// NOTE: Will need to change when per-bit DQ deskew is supported.
// Currenly all bits in DQS group are checked in aggregate
generate
genvar mux_i;
if (nCK_PER_CLK == 4) begin: gen_mux_rd_div4
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r + mux_i];
end
end
end else if (nCK_PER_CLK == 2) begin: gen_mux_rd_div2
for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd
always @(posedge clk) begin
mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r + mux_i];
end
end
end
endgenerate
//***************************************************************************
// generate request to PHY_INIT logic to issue precharged. Required when
// calibration can take a long time (during which there are only constant
// reads present on this bus). In this case need to issue perioidic
// precharges to avoid tRAS violation. This signal must meet the following
// requirements: (1) only transition from 0->1 when prech is first needed,
// (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted
//***************************************************************************
always @(posedge clk)
if (rst)
wrcal_prech_req <= #TCQ 1'b0;
else
// Combine requests from all stages here
wrcal_prech_req <= #TCQ cal2_prech_req_r;
//***************************************************************************
// Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES
// NOTE: Written using discrete flops, but SRL can be used if the matching
// logic does the comparison sequentially, rather than parallel
//***************************************************************************
generate
genvar rd_i;
if (nCK_PER_CLK == 4) begin: gen_sr_div4
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i];
sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i];
sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i];
sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i];
end
end
end else if (nCK_PER_CLK == 2) begin: gen_sr_div2
for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr
always @(posedge clk) begin
sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i];
sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i];
sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i];
sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i];
end
end
end
endgenerate
//***************************************************************************
// Write calibration:
// During write leveling DQS is aligned to the nearest CK edge that may not
// be the correct CK edge. Write calibration is required to align the DQS to
// the correct CK edge that clocks the write command.
// The Phaser_Out coarse delay line is adjusted if required to add a memory
// clock cycle of delay in order to read back the expected pattern.
//***************************************************************************
always @(posedge clk) begin
rd_active_r <= #TCQ phy_rddata_en;
rd_active_r1 <= #TCQ rd_active_r;
rd_active_r2 <= #TCQ rd_active_r1;
rd_active_r3 <= #TCQ rd_active_r2;
rd_active_r4 <= #TCQ rd_active_r3;
rd_active_r5 <= #TCQ rd_active_r4;
end
//*****************************************************************
// Expected data pattern when properly received by read capture
// logic:
// Based on pattern of ({rise,fall}) =
// 0xF, 0x0, 0xA, 0x5, 0x5, 0xA, 0x9, 0x6
// Each nibble will look like:
// bit3: 1, 0, 1, 0, 0, 1, 1, 0
// bit2: 1, 0, 0, 1, 1, 0, 0, 1
// bit1: 1, 0, 1, 0, 0, 1, 0, 1
// bit0: 1, 0, 0, 1, 1, 0, 1, 0
// Change the hard-coded pattern below accordingly as RD_SHIFT_LEN
// and the actual training pattern contents change
//*****************************************************************
generate
if (nCK_PER_CLK == 4) begin: gen_pat_div4
// FF00AA5555AA9966
assign pat_rise0[3] = 1'b1;
assign pat_fall0[3] = 1'b0;
assign pat_rise1[3] = 1'b1;
assign pat_fall1[3] = 1'b0;
assign pat_rise2[3] = 1'b0;
assign pat_fall2[3] = 1'b1;
assign pat_rise3[3] = 1'b1;
assign pat_fall3[3] = 1'b0;
assign pat_rise0[2] = 1'b1;
assign pat_fall0[2] = 1'b0;
assign pat_rise1[2] = 1'b0;
assign pat_fall1[2] = 1'b1;
assign pat_rise2[2] = 1'b1;
assign pat_fall2[2] = 1'b0;
assign pat_rise3[2] = 1'b0;
assign pat_fall3[2] = 1'b1;
assign pat_rise0[1] = 1'b1;
assign pat_fall0[1] = 1'b0;
assign pat_rise1[1] = 1'b1;
assign pat_fall1[1] = 1'b0;
assign pat_rise2[1] = 1'b0;
assign pat_fall2[1] = 1'b1;
assign pat_rise3[1] = 1'b0;
assign pat_fall3[1] = 1'b1;
assign pat_rise0[0] = 1'b1;
assign pat_fall0[0] = 1'b0;
assign pat_rise1[0] = 1'b0;
assign pat_fall1[0] = 1'b1;
assign pat_rise2[0] = 1'b1;
assign pat_fall2[0] = 1'b0;
assign pat_rise3[0] = 1'b1;
assign pat_fall3[0] = 1'b0;
// Pattern to distinguish between early write and incorrect read
// BB11EE4444EEDD88
assign early_rise0[3] = 1'b1;
assign early_fall0[3] = 1'b0;
assign early_rise1[3] = 1'b1;
assign early_fall1[3] = 1'b0;
assign early_rise2[3] = 1'b0;
assign early_fall2[3] = 1'b1;
assign early_rise3[3] = 1'b1;
assign early_fall3[3] = 1'b1;
assign early_rise0[2] = 1'b0;
assign early_fall0[2] = 1'b0;
assign early_rise1[2] = 1'b1;
assign early_fall1[2] = 1'b1;
assign early_rise2[2] = 1'b1;
assign early_fall2[2] = 1'b1;
assign early_rise3[2] = 1'b1;
assign early_fall3[2] = 1'b0;
assign early_rise0[1] = 1'b1;
assign early_fall0[1] = 1'b0;
assign early_rise1[1] = 1'b1;
assign early_fall1[1] = 1'b0;
assign early_rise2[1] = 1'b0;
assign early_fall2[1] = 1'b1;
assign early_rise3[1] = 1'b0;
assign early_fall3[1] = 1'b0;
assign early_rise0[0] = 1'b1;
assign early_fall0[0] = 1'b1;
assign early_rise1[0] = 1'b0;
assign early_fall1[0] = 1'b0;
assign early_rise2[0] = 1'b0;
assign early_fall2[0] = 1'b0;
assign early_rise3[0] = 1'b1;
assign early_fall3[0] = 1'b0;
end else if (nCK_PER_CLK == 2) begin: gen_pat_div2
// First cycle pattern FF00AA55
assign pat1_rise0[3] = 1'b1;
assign pat1_fall0[3] = 1'b0;
assign pat1_rise1[3] = 1'b1;
assign pat1_fall1[3] = 1'b0;
assign pat1_rise0[2] = 1'b1;
assign pat1_fall0[2] = 1'b0;
assign pat1_rise1[2] = 1'b0;
assign pat1_fall1[2] = 1'b1;
assign pat1_rise0[1] = 1'b1;
assign pat1_fall0[1] = 1'b0;
assign pat1_rise1[1] = 1'b1;
assign pat1_fall1[1] = 1'b0;
assign pat1_rise0[0] = 1'b1;
assign pat1_fall0[0] = 1'b0;
assign pat1_rise1[0] = 1'b0;
assign pat1_fall1[0] = 1'b1;
// Second cycle pattern 55AA9966
assign pat2_rise0[3] = 1'b0;
assign pat2_fall0[3] = 1'b1;
assign pat2_rise1[3] = 1'b1;
assign pat2_fall1[3] = 1'b0;
assign pat2_rise0[2] = 1'b1;
assign pat2_fall0[2] = 1'b0;
assign pat2_rise1[2] = 1'b0;
assign pat2_fall1[2] = 1'b1;
assign pat2_rise0[1] = 1'b0;
assign pat2_fall0[1] = 1'b1;
assign pat2_rise1[1] = 1'b0;
assign pat2_fall1[1] = 1'b1;
assign pat2_rise0[0] = 1'b1;
assign pat2_fall0[0] = 1'b0;
assign pat2_rise1[0] = 1'b1;
assign pat2_fall1[0] = 1'b0;
//Pattern to distinguish between early write and incorrect read
// First cycle pattern AA5555AA
assign early1_rise0[3] = 2'b1;
assign early1_fall0[3] = 2'b0;
assign early1_rise1[3] = 2'b0;
assign early1_fall1[3] = 2'b1;
assign early1_rise0[2] = 2'b0;
assign early1_fall0[2] = 2'b1;
assign early1_rise1[2] = 2'b1;
assign early1_fall1[2] = 2'b0;
assign early1_rise0[1] = 2'b1;
assign early1_fall0[1] = 2'b0;
assign early1_rise1[1] = 2'b0;
assign early1_fall1[1] = 2'b1;
assign early1_rise0[0] = 2'b0;
assign early1_fall0[0] = 2'b1;
assign early1_rise1[0] = 2'b1;
assign early1_fall1[0] = 2'b0;
// Second cycle pattern 9966BB11
assign early2_rise0[3] = 2'b1;
assign early2_fall0[3] = 2'b0;
assign early2_rise1[3] = 2'b1;
assign early2_fall1[3] = 2'b0;
assign early2_rise0[2] = 2'b0;
assign early2_fall0[2] = 2'b1;
assign early2_rise1[2] = 2'b0;
assign early2_fall1[2] = 2'b0;
assign early2_rise0[1] = 2'b0;
assign early2_fall0[1] = 2'b1;
assign early2_rise1[1] = 2'b1;
assign early2_fall1[1] = 2'b0;
assign early2_rise0[0] = 2'b1;
assign early2_fall0[0] = 2'b0;
assign early2_rise1[0] = 2'b1;
assign early2_fall1[0] = 2'b1;
end
endgenerate
// Each bit of each byte is compared to expected pattern.
// This was done to prevent (and "drastically decrease") the chance that
// invalid data clocked in when the DQ bus is tri-state (along with a
// combination of the correct data) will resemble the expected data
// pattern. A better fix for this is to change the training pattern and/or
// make the pattern longer.
generate
genvar pt_i;
if (nCK_PER_CLK == 4) begin: gen_pat_match_div4
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise0[pt_i%4])
pat_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall0[pt_i%4])
pat_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise1[pt_i%4])
pat_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall1[pt_i%4])
pat_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat_rise2[pt_i%4])
pat_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat_fall2[pt_i%4])
pat_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == pat_rise3[pt_i%4])
pat_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
pat_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == pat_fall3[pt_i%4])
pat_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
pat_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise1[pt_i%4])
early1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall1[pt_i%4])
early1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise2[pt_i%4])
early1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall2[pt_i%4])
early1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == pat_rise3[pt_i%4])
early1_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == pat_fall3[pt_i%4])
early1_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == early_rise0[pt_i%4])
early1_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == early_fall0[pt_i%4])
early1_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat_rise2[pt_i%4])
early2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat_fall2[pt_i%4])
early2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat_rise3[pt_i%4])
early2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat_fall3[pt_i%4])
early2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall1_r[pt_i] <= #TCQ 1'b0;
if (sr_rise2_r[pt_i] == early_rise0[pt_i%4])
early2_match_rise2_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise2_r[pt_i] <= #TCQ 1'b0;
if (sr_fall2_r[pt_i] == early_fall0[pt_i%4])
early2_match_fall2_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall2_r[pt_i] <= #TCQ 1'b0;
if (sr_rise3_r[pt_i] == early_rise1[pt_i%4])
early2_match_rise3_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise3_r[pt_i] <= #TCQ 1'b0;
if (sr_fall3_r[pt_i] == early_fall1[pt_i%4])
early2_match_fall3_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall3_r[pt_i] <= #TCQ 1'b0;
end
end
always @(posedge clk) begin
pat_match_rise0_and_r <= #TCQ &pat_match_rise0_r;
pat_match_fall0_and_r <= #TCQ &pat_match_fall0_r;
pat_match_rise1_and_r <= #TCQ &pat_match_rise1_r;
pat_match_fall1_and_r <= #TCQ &pat_match_fall1_r;
pat_match_rise2_and_r <= #TCQ &pat_match_rise2_r;
pat_match_fall2_and_r <= #TCQ &pat_match_fall2_r;
pat_match_rise3_and_r <= #TCQ &pat_match_rise3_r;
pat_match_fall3_and_r <= #TCQ &pat_match_fall3_r;
pat_data_match_r <= #TCQ (pat_match_rise0_and_r &&
pat_match_fall0_and_r &&
pat_match_rise1_and_r &&
pat_match_fall1_and_r &&
pat_match_rise2_and_r &&
pat_match_fall2_and_r &&
pat_match_rise3_and_r &&
pat_match_fall3_and_r);
pat_data_match_valid_r <= #TCQ rd_active_r3;
end
always @(posedge clk) begin
early1_match_rise0_and_r <= #TCQ &early1_match_rise0_r;
early1_match_fall0_and_r <= #TCQ &early1_match_fall0_r;
early1_match_rise1_and_r <= #TCQ &early1_match_rise1_r;
early1_match_fall1_and_r <= #TCQ &early1_match_fall1_r;
early1_match_rise2_and_r <= #TCQ &early1_match_rise2_r;
early1_match_fall2_and_r <= #TCQ &early1_match_fall2_r;
early1_match_rise3_and_r <= #TCQ &early1_match_rise3_r;
early1_match_fall3_and_r <= #TCQ &early1_match_fall3_r;
early1_data_match_r <= #TCQ (early1_match_rise0_and_r &&
early1_match_fall0_and_r &&
early1_match_rise1_and_r &&
early1_match_fall1_and_r &&
early1_match_rise2_and_r &&
early1_match_fall2_and_r &&
early1_match_rise3_and_r &&
early1_match_fall3_and_r);
end
always @(posedge clk) begin
early2_match_rise0_and_r <= #TCQ &early2_match_rise0_r;
early2_match_fall0_and_r <= #TCQ &early2_match_fall0_r;
early2_match_rise1_and_r <= #TCQ &early2_match_rise1_r;
early2_match_fall1_and_r <= #TCQ &early2_match_fall1_r;
early2_match_rise2_and_r <= #TCQ &early2_match_rise2_r;
early2_match_fall2_and_r <= #TCQ &early2_match_fall2_r;
early2_match_rise3_and_r <= #TCQ &early2_match_rise3_r;
early2_match_fall3_and_r <= #TCQ &early2_match_fall3_r;
early2_data_match_r <= #TCQ (early2_match_rise0_and_r &&
early2_match_fall0_and_r &&
early2_match_rise1_and_r &&
early2_match_fall1_and_r &&
early2_match_rise2_and_r &&
early2_match_fall2_and_r &&
early2_match_rise3_and_r &&
early2_match_fall3_and_r);
end
end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2
for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4])
pat1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4])
pat1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4])
pat1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4])
pat1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == pat2_rise0[pt_i%4])
pat2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == pat2_fall0[pt_i%4])
pat2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == pat2_rise1[pt_i%4])
pat2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == pat2_fall1[pt_i%4])
pat2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
pat2_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == early1_rise0[pt_i%4])
early1_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == early1_fall0[pt_i%4])
early1_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == early1_rise1[pt_i%4])
early1_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == early1_fall1[pt_i%4])
early1_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early1_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
// early2 in this case does not mean 2 cycles early but
// the second cycle of read data in 2:1 mode
always @(posedge clk) begin
if (sr_rise0_r[pt_i] == early2_rise0[pt_i%4])
early2_match_rise0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise0_r[pt_i] <= #TCQ 1'b0;
if (sr_fall0_r[pt_i] == early2_fall0[pt_i%4])
early2_match_fall0_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall0_r[pt_i] <= #TCQ 1'b0;
if (sr_rise1_r[pt_i] == early2_rise1[pt_i%4])
early2_match_rise1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_rise1_r[pt_i] <= #TCQ 1'b0;
if (sr_fall1_r[pt_i] == early2_fall1[pt_i%4])
early2_match_fall1_r[pt_i] <= #TCQ 1'b1;
else
early2_match_fall1_r[pt_i] <= #TCQ 1'b0;
end
end
always @(posedge clk) begin
pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r;
pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r;
pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r;
pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r;
pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r &&
pat1_match_fall0_and_r &&
pat1_match_rise1_and_r &&
pat1_match_fall1_and_r);
pat1_data_match_r1 <= #TCQ pat1_data_match_r;
pat2_match_rise0_and_r <= #TCQ &pat2_match_rise0_r && rd_active_r3;
pat2_match_fall0_and_r <= #TCQ &pat2_match_fall0_r && rd_active_r3;
pat2_match_rise1_and_r <= #TCQ &pat2_match_rise1_r && rd_active_r3;
pat2_match_fall1_and_r <= #TCQ &pat2_match_fall1_r && rd_active_r3;
pat2_data_match_r <= #TCQ (pat2_match_rise0_and_r &&
pat2_match_fall0_and_r &&
pat2_match_rise1_and_r &&
pat2_match_fall1_and_r);
// For 2:1 mode, read valid is asserted for 2 clock cycles -
// here we generate a "match valid" pulse that is only 1 clock
// cycle wide that is simulatenous when the match calculation
// is complete
pat_data_match_valid_r <= #TCQ rd_active_r4 & ~rd_active_r5;
end
always @(posedge clk) begin
early1_match_rise0_and_r <= #TCQ &early1_match_rise0_r;
early1_match_fall0_and_r <= #TCQ &early1_match_fall0_r;
early1_match_rise1_and_r <= #TCQ &early1_match_rise1_r;
early1_match_fall1_and_r <= #TCQ &early1_match_fall1_r;
early1_data_match_r <= #TCQ (early1_match_rise0_and_r &&
early1_match_fall0_and_r &&
early1_match_rise1_and_r &&
early1_match_fall1_and_r);
early1_data_match_r1 <= #TCQ early1_data_match_r;
early2_match_rise0_and_r <= #TCQ &early2_match_rise0_r && rd_active_r3;
early2_match_fall0_and_r <= #TCQ &early2_match_fall0_r && rd_active_r3;
early2_match_rise1_and_r <= #TCQ &early2_match_rise1_r && rd_active_r3;
early2_match_fall1_and_r <= #TCQ &early2_match_fall1_r && rd_active_r3;
early2_data_match_r <= #TCQ (early2_match_rise0_and_r &&
early2_match_fall0_and_r &&
early2_match_rise1_and_r &&
early2_match_fall1_and_r);
end
end
endgenerate
// Need to delay it by 3 cycles in order to wait for Phaser_Out
// coarse delay to take effect before issuing a write command
always @(posedge clk) begin
wrcal_pat_resume_r1 <= #TCQ wrcal_pat_resume_r;
wrcal_pat_resume_r2 <= #TCQ wrcal_pat_resume_r1;
wrcal_pat_resume <= #TCQ wrcal_pat_resume_r2;
end
always @(posedge clk) begin
if (rst)
tap_inc_wait_cnt <= #TCQ 'd0;
else if ((cal2_state_r == CAL2_DQ_IDEL_DEC) ||
(cal2_state_r == CAL2_IFIFO_RESET) ||
(cal2_state_r == CAL2_SANITY_WAIT))
tap_inc_wait_cnt <= #TCQ tap_inc_wait_cnt + 1;
else
tap_inc_wait_cnt <= #TCQ 'd0;
end
always @(posedge clk) begin
if (rst)
not_empty_wait_cnt <= #TCQ 'd0;
else if ((cal2_state_r == CAL2_READ_WAIT) && wrcal_rd_wait)
not_empty_wait_cnt <= #TCQ not_empty_wait_cnt + 1;
else
not_empty_wait_cnt <= #TCQ 'd0;
end
always @(posedge clk)
cal2_state_r1 <= #TCQ cal2_state_r;
//*****************************************************************
// Write Calibration state machine
//*****************************************************************
// when calibrating, check to see if the expected pattern is received.
// Otherwise delay DQS to align to correct CK edge.
// NOTES:
// 1. An error condition can occur due to two reasons:
// a. If the matching logic does not receive the expected data
// pattern. However, the error may be "recoverable" because
// the write calibration is still in progress. If an error is
// found the write calibration logic delays DQS by an additional
// clock cycle and restarts the pattern detection process.
// By design, if the write path timing is incorrect, the correct
// data pattern will never be detected.
// b. Valid data not found even after incrementing Phaser_Out
// coarse delay line.
always @(posedge clk) begin
if (rst) begin
wrcal_dqs_cnt_r <= #TCQ 'b0;
cal2_done_r <= #TCQ 1'b0;
cal2_prech_req_r <= #TCQ 1'b0;
cal2_state_r <= #TCQ CAL2_IDLE;
wrcal_pat_err <= #TCQ 1'b0;
wrcal_pat_resume_r <= #TCQ 1'b0;
wrcal_act_req <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
temp_wrcal_done <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b0;
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b0;
idelay_ld <= #TCQ 1'b0;
idelay_ld_done <= #TCQ 1'b0;
pat1_detect <= #TCQ 1'b0;
early1_detect <= #TCQ 1'b0;
wrcal_sanity_chk_done <= #TCQ 1'b0;
wrcal_sanity_chk_err <= #TCQ 1'b0;
end else begin
cal2_prech_req_r <= #TCQ 1'b0;
case (cal2_state_r)
CAL2_IDLE: begin
wrcal_pat_err <= #TCQ 1'b0;
if (wrcal_start) begin
cal2_if_reset <= #TCQ 1'b0;
if (SIM_CAL_OPTION == "SKIP_CAL")
// If skip write calibration, then proceed to end.
cal2_state_r <= #TCQ CAL2_DONE;
else
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end
end
// General wait state to wait for read data to be output by the
// IN_FIFO
CAL2_READ_WAIT: begin
wrcal_pat_resume_r <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
// Wait until read data is received, and pattern matching
// calculation is complete. NOTE: Need to add a timeout here
// in case for some reason data is never received (or rather
// the PHASER_IN and IN_FIFO think they never receives data)
if (pat_data_match_valid_r && (nCK_PER_CLK == 4)) begin
if (pat_data_match_r)
// If found data match, then move on to next DQS group
cal2_state_r <= #TCQ CAL2_NEXT_DQS;
else begin
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_ERR;
// If writes are one or two cycles early then redo
// write leveling for the byte
else if (early1_data_match_r) begin
early1_data <= #TCQ 1'b1;
early2_data <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
end else if (early2_data_match_r) begin
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b1;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
// Read late due to incorrect MPR idelay value
// Decrement Idelay to '0'for the current byte
end else if (~idelay_ld_done) begin
cal2_state_r <= #TCQ CAL2_DQ_IDEL_DEC;
idelay_ld <= #TCQ 1'b1;
end else
cal2_state_r <= #TCQ CAL2_ERR;
end
end else if (pat_data_match_valid_r && (nCK_PER_CLK == 2)) begin
if ((pat1_data_match_r1 && pat2_data_match_r) ||
(pat1_detect && pat2_data_match_r))
// If found data match, then move on to next DQS group
cal2_state_r <= #TCQ CAL2_NEXT_DQS;
else if (pat1_data_match_r1 && ~pat2_data_match_r) begin
cal2_state_r <= #TCQ CAL2_READ_WAIT;
pat1_detect <= #TCQ 1'b1;
end else begin
// If writes are one or two cycles early then redo
// write leveling for the byte
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_ERR;
else if ((early1_data_match_r1 && early2_data_match_r) ||
(early1_detect && early2_data_match_r)) begin
early1_data <= #TCQ 1'b1;
early2_data <= #TCQ 1'b0;
wrlvl_byte_redo <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_WRLVL_WAIT;
end else if (early1_data_match_r1 && ~early2_data_match_r) begin
early1_detect <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
// Read late due to incorrect MPR idelay value
// Decrement Idelay to '0'for the current byte
end else if (~idelay_ld_done) begin
cal2_state_r <= #TCQ CAL2_DQ_IDEL_DEC;
idelay_ld <= #TCQ 1'b1;
end else
cal2_state_r <= #TCQ CAL2_ERR;
end
end else if (not_empty_wait_cnt == 'd31)
cal2_state_r <= #TCQ CAL2_ERR;
end
CAL2_WRLVL_WAIT: begin
early1_detect <= #TCQ 1'b0;
if (wrlvl_byte_done && ~wrlvl_byte_done_r)
wrlvl_byte_redo <= #TCQ 1'b0;
if (wrlvl_byte_done) begin
if (rd_active_r1 && ~rd_active_r) begin
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
cal2_if_reset <= #TCQ 1'b1;
early1_data <= #TCQ 1'b0;
early2_data <= #TCQ 1'b0;
end
end
end
CAL2_DQ_IDEL_DEC: begin
if (tap_inc_wait_cnt == 'd4) begin
idelay_ld <= #TCQ 1'b0;
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
cal2_if_reset <= #TCQ 1'b1;
idelay_ld_done <= #TCQ 1'b1;
end
end
CAL2_IFIFO_RESET: begin
if (tap_inc_wait_cnt == 'd15) begin
cal2_if_reset <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
cal2_state_r <= #TCQ CAL2_DONE;
else if (idelay_ld_done) begin
wrcal_pat_resume_r <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end else
cal2_state_r <= #TCQ CAL2_IDLE;
end
end
// Final processing for current DQS group. Move on to next group
CAL2_NEXT_DQS: begin
// At this point, we've just found the correct pattern for the
// current DQS group.
// Request bank/row precharge, and wait for its completion. Always
// precharge after each DQS group to avoid tRAS(max) violation
//verilint STARC-2.2.3.3 off
if (wrcal_sanity_chk_r && (wrcal_dqs_cnt_r != DQS_WIDTH-1)) begin
cal2_prech_req_r <= #TCQ 1'b0;
wrcal_dqs_cnt_r <= #TCQ wrcal_dqs_cnt_r + 1;
cal2_state_r <= #TCQ CAL2_SANITY_WAIT;
end else
cal2_prech_req_r <= #TCQ 1'b1;
idelay_ld_done <= #TCQ 1'b0;
pat1_detect <= #TCQ 1'b0;
if (prech_done)
if (((DQS_WIDTH == 1) || (SIM_CAL_OPTION == "FAST_CAL")) ||
(wrcal_dqs_cnt_r == DQS_WIDTH-1)) begin
// If either FAST_CAL is enabled and first DQS group is
// finished, or if the last DQS group was just finished,
// then end of write calibration
if (wrcal_sanity_chk_r) begin
cal2_if_reset <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_IFIFO_RESET;
end else
cal2_state_r <= #TCQ CAL2_DONE;
end else begin
// Continue to next DQS group
wrcal_dqs_cnt_r <= #TCQ wrcal_dqs_cnt_r + 1;
cal2_state_r <= #TCQ CAL2_READ_WAIT;
end
end
//verilint STARC-2.2.3.3 on
CAL2_SANITY_WAIT: begin
if (tap_inc_wait_cnt == 'd15) begin
cal2_state_r <= #TCQ CAL2_READ_WAIT;
wrcal_pat_resume_r <= #TCQ 1'b1;
end
end
// Finished with read enable calibration
CAL2_DONE: begin
if (wrcal_sanity_chk && ~wrcal_sanity_chk_r) begin
cal2_done_r <= #TCQ 1'b0;
wrcal_dqs_cnt_r <= #TCQ 'd0;
cal2_state_r <= #TCQ CAL2_IDLE;
end else
cal2_done_r <= #TCQ 1'b1;
cal2_prech_req_r <= #TCQ 1'b0;
cal2_if_reset <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
wrcal_sanity_chk_done <= #TCQ 1'b1;
end
// Assert error signal indicating that writes timing is incorrect
CAL2_ERR: begin
wrcal_pat_resume_r <= #TCQ 1'b0;
if (wrcal_sanity_chk_r)
wrcal_sanity_chk_err <= #TCQ 1'b1;
else
wrcal_pat_err <= #TCQ 1'b1;
cal2_state_r <= #TCQ CAL2_ERR;
end
endcase
end
end
// Delay assertion of wrcal_done for write calibration by a few cycles after
// we've reached CAL2_DONE
always @(posedge clk)
if (rst)
cal2_done_r1 <= #TCQ 1'b0;
else
cal2_done_r1 <= #TCQ cal2_done_r;
always @(posedge clk)
if (rst || (wrcal_sanity_chk && ~wrcal_sanity_chk_r))
wrcal_done <= #TCQ 1'b0;
else if (cal2_done_r)
wrcal_done <= #TCQ 1'b1;
endmodule
|
module mig_7series_v2_3_ddr_phy_wrlvl #
(
parameter TCQ = 100,
parameter DQS_CNT_WIDTH = 3,
parameter DQ_WIDTH = 64,
parameter DQS_WIDTH = 2,
parameter DRAM_WIDTH = 8,
parameter RANKS = 1,
parameter nCK_PER_CLK = 4,
parameter CLK_PERIOD = 4,
parameter SIM_CAL_OPTION = "NONE"
)
(
input clk,
input rst,
input phy_ctl_ready,
input wr_level_start,
input wl_sm_start,
input wrlvl_final,
input wrlvl_byte_redo,
input [DQS_CNT_WIDTH:0] wrcal_cnt,
input early1_data,
input early2_data,
input [DQS_CNT_WIDTH:0] oclkdelay_calib_cnt,
input oclkdelay_calib_done,
input [(DQ_WIDTH)-1:0] rd_data_rise0,
output reg wrlvl_byte_done,
output reg dqs_po_dec_done /* synthesis syn_maxfan = 2 */,
output phy_ctl_rdy_dly,
output reg wr_level_done /* synthesis syn_maxfan = 2 */,
// to phy_init for cs logic
output wrlvl_rank_done,
output done_dqs_tap_inc,
output [DQS_CNT_WIDTH:0] po_stg2_wl_cnt,
// Fine delay line used only during write leveling
// Inc/dec Phaser_Out fine delay line
output reg dqs_po_stg2_f_incdec,
// Enable Phaser_Out fine delay inc/dec
output reg dqs_po_en_stg2_f,
// Coarse delay line used during write leveling
// only if 64 taps of fine delay line were not
// sufficient to detect a 0->1 transition
// Inc Phaser_Out coarse delay line
output reg dqs_wl_po_stg2_c_incdec,
// Enable Phaser_Out coarse delay inc/dec
output reg dqs_wl_po_en_stg2_c,
// Read Phaser_Out delay value
input [8:0] po_counter_read_val,
// output reg dqs_wl_po_stg2_load,
// output reg [8:0] dqs_wl_po_stg2_reg_l,
// CK edge undetected
output reg wrlvl_err,
output reg [3*DQS_WIDTH-1:0] wl_po_coarse_cnt,
output reg [6*DQS_WIDTH-1:0] wl_po_fine_cnt,
// Debug ports
output [5:0] dbg_wl_tap_cnt,
output dbg_wl_edge_detect_valid,
output [(DQS_WIDTH)-1:0] dbg_rd_data_edge_detect,
output [DQS_CNT_WIDTH:0] dbg_dqs_count,
output [4:0] dbg_wl_state,
output [6*DQS_WIDTH-1:0] dbg_wrlvl_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_wrlvl_coarse_tap_cnt,
output [255:0] dbg_phy_wrlvl
);
localparam WL_IDLE = 5'h0;
localparam WL_INIT = 5'h1;
localparam WL_INIT_FINE_INC = 5'h2;
localparam WL_INIT_FINE_INC_WAIT1= 5'h3;
localparam WL_INIT_FINE_INC_WAIT = 5'h4;
localparam WL_INIT_FINE_DEC = 5'h5;
localparam WL_INIT_FINE_DEC_WAIT = 5'h6;
localparam WL_FINE_INC = 5'h7;
localparam WL_WAIT = 5'h8;
localparam WL_EDGE_CHECK = 5'h9;
localparam WL_DQS_CHECK = 5'hA;
localparam WL_DQS_CNT = 5'hB;
localparam WL_2RANK_TAP_DEC = 5'hC;
localparam WL_2RANK_DQS_CNT = 5'hD;
localparam WL_FINE_DEC = 5'hE;
localparam WL_FINE_DEC_WAIT = 5'hF;
localparam WL_CORSE_INC = 5'h10;
localparam WL_CORSE_INC_WAIT = 5'h11;
localparam WL_CORSE_INC_WAIT1 = 5'h12;
localparam WL_CORSE_INC_WAIT2 = 5'h13;
localparam WL_CORSE_DEC = 5'h14;
localparam WL_CORSE_DEC_WAIT = 5'h15;
localparam WL_CORSE_DEC_WAIT1 = 5'h16;
localparam WL_FINE_INC_WAIT = 5'h17;
localparam WL_2RANK_FINAL_TAP = 5'h18;
localparam WL_INIT_FINE_DEC_WAIT1= 5'h19;
localparam WL_FINE_DEC_WAIT1 = 5'h1A;
localparam WL_CORSE_INC_WAIT_TMP = 5'h1B;
localparam COARSE_TAPS = 7;
localparam FAST_CAL_FINE = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 45 : 48;
localparam FAST_CAL_COARSE = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 1 : 2;
localparam REDO_COARSE = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 2 : 5;
integer i, j, k, l, p, q, r, s, t, m, n, u, v, w, x,y;
reg phy_ctl_ready_r1;
reg phy_ctl_ready_r2;
reg phy_ctl_ready_r3;
reg phy_ctl_ready_r4;
reg phy_ctl_ready_r5;
reg phy_ctl_ready_r6;
(* max_fanout = 50 *) reg [DQS_CNT_WIDTH:0] dqs_count_r;
reg [1:0] rank_cnt_r;
reg [DQS_WIDTH-1:0] rd_data_rise_wl_r;
reg [DQS_WIDTH-1:0] rd_data_previous_r;
reg [DQS_WIDTH-1:0] rd_data_edge_detect_r;
reg wr_level_done_r;
reg wrlvl_rank_done_r;
reg wr_level_start_r;
reg [4:0] wl_state_r, wl_state_r1;
reg inhibit_edge_detect_r;
reg wl_edge_detect_valid_r;
reg [5:0] wl_tap_count_r;
reg [5:0] fine_dec_cnt;
reg [5:0] fine_inc[0:DQS_WIDTH-1]; // DQS_WIDTH number of counters 6-bit each
reg [2:0] corse_dec[0:DQS_WIDTH-1];
reg [2:0] corse_inc[0:DQS_WIDTH-1];
reg dq_cnt_inc;
reg [3:0] stable_cnt;
reg flag_ck_negedge;
//reg past_negedge;
reg flag_init;
reg [2:0] corse_cnt[0:DQS_WIDTH-1];
reg [3*DQS_WIDTH-1:0] corse_cnt_dbg;
reg [2:0] wl_corse_cnt[0:RANKS-1][0:DQS_WIDTH-1];
//reg [3*DQS_WIDTH-1:0] coarse_tap_inc;
reg [2:0] final_coarse_tap[0:DQS_WIDTH-1];
reg [5:0] add_smallest[0:DQS_WIDTH-1];
reg [5:0] add_largest[0:DQS_WIDTH-1];
//reg [6*DQS_WIDTH-1:0] fine_tap_inc;
//reg [6*DQS_WIDTH-1:0] fine_tap_dec;
reg wr_level_done_r1;
reg wr_level_done_r2;
reg wr_level_done_r3;
reg wr_level_done_r4;
reg wr_level_done_r5;
reg [5:0] wl_dqs_tap_count_r[0:RANKS-1][0:DQS_WIDTH-1];
reg [5:0] smallest[0:DQS_WIDTH-1];
reg [5:0] largest[0:DQS_WIDTH-1];
reg [5:0] final_val[0:DQS_WIDTH-1];
reg [5:0] po_dec_cnt[0:DQS_WIDTH-1];
reg done_dqs_dec;
reg [8:0] po_rdval_cnt;
reg po_cnt_dec;
reg po_dec_done;
reg dual_rnk_dec;
wire [DQS_CNT_WIDTH+2:0] dqs_count_w;
reg [5:0] fast_cal_fine_cnt;
reg [2:0] fast_cal_coarse_cnt;
reg wrlvl_byte_redo_r;
reg [2:0] wrlvl_redo_corse_inc;
reg wrlvl_final_r;
reg final_corse_dec;
wire [DQS_CNT_WIDTH+2:0] oclk_count_w;
reg wrlvl_tap_done_r ;
reg [3:0] wait_cnt;
reg [3:0] incdec_wait_cnt;
// Debug ports
assign dbg_wl_edge_detect_valid = wl_edge_detect_valid_r;
assign dbg_rd_data_edge_detect = rd_data_edge_detect_r;
assign dbg_wl_tap_cnt = wl_tap_count_r;
assign dbg_dqs_count = dqs_count_r;
assign dbg_wl_state = wl_state_r;
assign dbg_wrlvl_fine_tap_cnt = wl_po_fine_cnt;
assign dbg_wrlvl_coarse_tap_cnt = wl_po_coarse_cnt;
always @(*) begin
for (v = 0; v < DQS_WIDTH; v = v + 1)
corse_cnt_dbg[3*v+:3] = corse_cnt[v];
end
assign dbg_phy_wrlvl[0+:27] = corse_cnt_dbg;
assign dbg_phy_wrlvl[27+:5] = wl_state_r;
assign dbg_phy_wrlvl[32+:4] = dqs_count_r;
assign dbg_phy_wrlvl[36+:9] = rd_data_rise_wl_r;
assign dbg_phy_wrlvl[45+:9] = rd_data_previous_r;
assign dbg_phy_wrlvl[54+:4] = stable_cnt;
assign dbg_phy_wrlvl[58] = 'd0;
assign dbg_phy_wrlvl[59] = flag_ck_negedge;
assign dbg_phy_wrlvl [60] = wl_edge_detect_valid_r;
assign dbg_phy_wrlvl [61+:6] = wl_tap_count_r;
assign dbg_phy_wrlvl [67+:9] = rd_data_edge_detect_r;
assign dbg_phy_wrlvl [76+:54] = wl_po_fine_cnt;
assign dbg_phy_wrlvl [130+:27] = wl_po_coarse_cnt;
//**************************************************************************
// DQS count to hard PHY during write leveling using Phaser_OUT Stage2 delay
//**************************************************************************
assign po_stg2_wl_cnt = dqs_count_r;
assign wrlvl_rank_done = wrlvl_rank_done_r;
assign done_dqs_tap_inc = done_dqs_dec;
assign phy_ctl_rdy_dly = phy_ctl_ready_r6;
always @(posedge clk) begin
phy_ctl_ready_r1 <= #TCQ phy_ctl_ready;
phy_ctl_ready_r2 <= #TCQ phy_ctl_ready_r1;
phy_ctl_ready_r3 <= #TCQ phy_ctl_ready_r2;
phy_ctl_ready_r4 <= #TCQ phy_ctl_ready_r3;
phy_ctl_ready_r5 <= #TCQ phy_ctl_ready_r4;
phy_ctl_ready_r6 <= #TCQ phy_ctl_ready_r5;
wrlvl_byte_redo_r <= #TCQ wrlvl_byte_redo;
wrlvl_final_r <= #TCQ wrlvl_final;
if ((wrlvl_byte_redo && ~wrlvl_byte_redo_r) ||
(wrlvl_final && ~wrlvl_final_r))
wr_level_done <= #TCQ 1'b0;
else
wr_level_done <= #TCQ done_dqs_dec;
end
// Status signal that will be asserted once the first
// pass of write leveling is done.
always @(posedge clk) begin
if(rst) begin
wrlvl_tap_done_r <= #TCQ 1'b0 ;
end else begin
if(wrlvl_tap_done_r == 1'b0) begin
if(oclkdelay_calib_done) begin
wrlvl_tap_done_r <= #TCQ 1'b1 ;
end
end
end
end
always @(posedge clk) begin
if (rst || po_cnt_dec)
wait_cnt <= #TCQ 'd8;
else if (phy_ctl_ready_r6 && (wait_cnt > 'd0))
wait_cnt <= #TCQ wait_cnt - 1;
end
always @(posedge clk) begin
if (rst) begin
po_rdval_cnt <= #TCQ 'd0;
end else if (phy_ctl_ready_r5 && ~phy_ctl_ready_r6) begin
po_rdval_cnt <= #TCQ po_counter_read_val;
end else if (po_rdval_cnt > 'd0) begin
if (po_cnt_dec)
po_rdval_cnt <= #TCQ po_rdval_cnt - 1;
else
po_rdval_cnt <= #TCQ po_rdval_cnt;
end else if (po_rdval_cnt == 'd0) begin
po_rdval_cnt <= #TCQ po_rdval_cnt;
end
end
always @(posedge clk) begin
if (rst || (po_rdval_cnt == 'd0))
po_cnt_dec <= #TCQ 1'b0;
else if (phy_ctl_ready_r6 && (po_rdval_cnt > 'd0) && (wait_cnt == 'd1))
po_cnt_dec <= #TCQ 1'b1;
else
po_cnt_dec <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (rst)
po_dec_done <= #TCQ 1'b0;
else if (((po_cnt_dec == 'd1) && (po_rdval_cnt == 'd1)) ||
(phy_ctl_ready_r6 && (po_rdval_cnt == 'd0))) begin
po_dec_done <= #TCQ 1'b1;
end
end
always @(posedge clk) begin
dqs_po_dec_done <= #TCQ po_dec_done;
wr_level_done_r1 <= #TCQ wr_level_done_r;
wr_level_done_r2 <= #TCQ wr_level_done_r1;
wr_level_done_r3 <= #TCQ wr_level_done_r2;
wr_level_done_r4 <= #TCQ wr_level_done_r3;
wr_level_done_r5 <= #TCQ wr_level_done_r4;
for (l = 0; l < DQS_WIDTH; l = l + 1) begin
wl_po_coarse_cnt[3*l+:3] <= #TCQ final_coarse_tap[l];
if ((RANKS == 1) || ~oclkdelay_calib_done)
wl_po_fine_cnt[6*l+:6] <= #TCQ smallest[l];
else
wl_po_fine_cnt[6*l+:6] <= #TCQ final_val[l];
end
end
generate
if (RANKS == 2) begin: dual_rank
always @(posedge clk) begin
if (rst || (wrlvl_byte_redo && ~wrlvl_byte_redo_r) ||
(wrlvl_final && ~wrlvl_final_r))
done_dqs_dec <= #TCQ 1'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") || ~oclkdelay_calib_done)
done_dqs_dec <= #TCQ wr_level_done_r;
else if (wr_level_done_r5 && (wl_state_r == WL_IDLE))
done_dqs_dec <= #TCQ 1'b1;
end
end else begin: single_rank
always @(posedge clk) begin
if (rst || (wrlvl_byte_redo && ~wrlvl_byte_redo_r) ||
(wrlvl_final && ~wrlvl_final_r))
done_dqs_dec <= #TCQ 1'b0;
else if (~oclkdelay_calib_done)
done_dqs_dec <= #TCQ wr_level_done_r;
else if (wr_level_done_r3 && ~wr_level_done_r4)
done_dqs_dec <= #TCQ 1'b1;
end
end
endgenerate
always @(posedge clk)
if (rst || (wrlvl_byte_redo && ~wrlvl_byte_redo_r))
wrlvl_byte_done <= #TCQ 1'b0;
else if (wrlvl_byte_redo && wr_level_done_r3 && ~wr_level_done_r4)
wrlvl_byte_done <= #TCQ 1'b1;
// Storing DQS tap values at the end of each DQS write leveling
always @(posedge clk) begin
if (rst) begin
for (k = 0; k < RANKS; k = k + 1) begin: rst_wl_dqs_tap_count_loop
for (n = 0; n < DQS_WIDTH; n = n + 1) begin
wl_corse_cnt[k][n] <= #TCQ 'b0;
wl_dqs_tap_count_r[k][n] <= #TCQ 'b0;
end
end
end else if ((wl_state_r == WL_DQS_CNT) | (wl_state_r == WL_WAIT) |
(wl_state_r == WL_FINE_DEC_WAIT1) |
(wl_state_r == WL_2RANK_TAP_DEC)) begin
wl_dqs_tap_count_r[rank_cnt_r][dqs_count_r] <= #TCQ wl_tap_count_r;
wl_corse_cnt[rank_cnt_r][dqs_count_r] <= #TCQ corse_cnt[dqs_count_r];
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (wl_state_r == WL_DQS_CHECK)) begin
for (p = 0; p < RANKS; p = p +1) begin: dqs_tap_rank_cnt
for(q = 0; q < DQS_WIDTH; q = q +1) begin: dqs_tap_dqs_cnt
wl_dqs_tap_count_r[p][q] <= #TCQ wl_tap_count_r;
wl_corse_cnt[p][q] <= #TCQ corse_cnt[0];
end
end
end
end
// Convert coarse delay to fine taps in case of unequal number of coarse
// taps between ranks. Assuming a difference of 1 coarse tap counts
// between ranks. A common fine and coarse tap value must be used for both ranks
// because Phaser_Out has only one rank register.
// Coarse tap1 = period(ps)*93/360 = 34 fine taps
// Other coarse taps = period(ps)*103/360 = 38 fine taps
generate
genvar cnt;
if (RANKS == 2) begin // Dual rank
for(cnt = 0; cnt < DQS_WIDTH; cnt = cnt +1) begin: coarse_dqs_cnt
always @(posedge clk) begin
if (rst) begin
//coarse_tap_inc[3*cnt+:3] <= #TCQ 'b0;
add_smallest[cnt] <= #TCQ 'd0;
add_largest[cnt] <= #TCQ 'd0;
final_coarse_tap[cnt] <= #TCQ 'd0;
end else if (wr_level_done_r1 & ~wr_level_done_r2) begin
if (~oclkdelay_calib_done) begin
for(y = 0 ; y < DQS_WIDTH; y = y+1) begin
final_coarse_tap[y] <= #TCQ wl_corse_cnt[0][y];
add_smallest[y] <= #TCQ 'd0;
add_largest[y] <= #TCQ 'd0;
end
end else
if (wl_corse_cnt[0][cnt] == wl_corse_cnt[1][cnt]) begin
// Both ranks have use the same number of coarse delay taps.
// No conversion of coarse tap to fine taps required.
//coarse_tap_inc[3*cnt+:3] <= #TCQ wl_corse_cnt[1][3*cnt+:3];
final_coarse_tap[cnt] <= #TCQ wl_corse_cnt[1][cnt];
add_smallest[cnt] <= #TCQ 'd0;
add_largest[cnt] <= #TCQ 'd0;
end else if (wl_corse_cnt[0][cnt] < wl_corse_cnt[1][cnt]) begin
// Rank 0 uses fewer coarse delay taps than rank1.
// conversion of coarse tap to fine taps required for rank1.
// The final coarse count will the smaller value.
//coarse_tap_inc[3*cnt+:3] <= #TCQ wl_corse_cnt[1][3*cnt+:3] - 1;
final_coarse_tap[cnt] <= #TCQ wl_corse_cnt[1][cnt] - 1;
if (|wl_corse_cnt[0][cnt])
// Coarse tap 2 or higher being converted to fine taps
// This will be added to 'largest' value in final_val
// computation
add_largest[cnt] <= #TCQ 'd38;
else
// Coarse tap 1 being converted to fine taps
// This will be added to 'largest' value in final_val
// computation
add_largest[cnt] <= #TCQ 'd34;
end else if (wl_corse_cnt[0][cnt] > wl_corse_cnt[1][cnt]) begin
// This may be an unlikely scenario in a real system.
// Rank 0 uses more coarse delay taps than rank1.
// conversion of coarse tap to fine taps required.
//coarse_tap_inc[3*cnt+:3] <= #TCQ 'd0;
final_coarse_tap[cnt] <= #TCQ wl_corse_cnt[1][cnt];
if (|wl_corse_cnt[1][cnt])
// Coarse tap 2 or higher being converted to fine taps
// This will be added to 'smallest' value in final_val
// computation
add_smallest[cnt] <= #TCQ 'd38;
else
// Coarse tap 1 being converted to fine taps
// This will be added to 'smallest' value in
// final_val computation
add_smallest[cnt] <= #TCQ 'd34;
end
end
end
end
end else begin
// Single rank
always @(posedge clk) begin
//coarse_tap_inc <= #TCQ 'd0;
for(w = 0; w < DQS_WIDTH; w = w + 1) begin
final_coarse_tap[w] <= #TCQ wl_corse_cnt[0][w];
add_smallest[w] <= #TCQ 'd0;
add_largest[w] <= #TCQ 'd0;
end
end
end
endgenerate
// Determine delay value for DQS in multirank system
// Assuming delay value is the smallest for rank 0 DQS
// and largest delay value for rank 4 DQS
// Set to smallest + ((largest-smallest)/2)
always @(posedge clk) begin
if (rst) begin
for(x = 0; x < DQS_WIDTH; x = x +1) begin
smallest[x] <= #TCQ 'b0;
largest[x] <= #TCQ 'b0;
end
end else if ((wl_state_r == WL_DQS_CNT) & wrlvl_byte_redo) begin
smallest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[0][dqs_count_r];
largest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[0][dqs_count_r];
end else if ((wl_state_r == WL_DQS_CNT) |
(wl_state_r == WL_2RANK_TAP_DEC)) begin
smallest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[0][dqs_count_r];
largest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[RANKS-1][dqs_count_r];
end else if (((SIM_CAL_OPTION == "FAST_CAL") |
(~oclkdelay_calib_done & ~wrlvl_byte_redo)) &
wr_level_done_r1 & ~wr_level_done_r2) begin
for(i = 0; i < DQS_WIDTH; i = i +1) begin: smallest_dqs
smallest[i] <= #TCQ wl_dqs_tap_count_r[0][i];
largest[i] <= #TCQ wl_dqs_tap_count_r[0][i];
end
end
end
// final_val to be used for all DQSs in all ranks
genvar wr_i;
generate
for (wr_i = 0; wr_i < DQS_WIDTH; wr_i = wr_i +1) begin: gen_final_tap
always @(posedge clk) begin
if (rst)
final_val[wr_i] <= #TCQ 'b0;
else if (wr_level_done_r2 && ~wr_level_done_r3) begin
if (~oclkdelay_calib_done)
final_val[wr_i] <= #TCQ (smallest[wr_i] + add_smallest[wr_i]);
else if ((smallest[wr_i] + add_smallest[wr_i]) <
(largest[wr_i] + add_largest[wr_i]))
final_val[wr_i] <= #TCQ ((smallest[wr_i] + add_smallest[wr_i]) +
(((largest[wr_i] + add_largest[wr_i]) -
(smallest[wr_i] + add_smallest[wr_i]))/2));
else if ((smallest[wr_i] + add_smallest[wr_i]) >
(largest[wr_i] + add_largest[wr_i]))
final_val[wr_i] <= #TCQ ((largest[wr_i] + add_largest[wr_i]) +
(((smallest[wr_i] + add_smallest[wr_i]) -
(largest[wr_i] + add_largest[wr_i]))/2));
else if ((smallest[wr_i] + add_smallest[wr_i]) ==
(largest[wr_i] + add_largest[wr_i]))
final_val[wr_i] <= #TCQ (largest[wr_i] + add_largest[wr_i]);
end
end
end
endgenerate
// // fine tap inc/dec value for all DQSs in all ranks
// genvar dqs_i;
// generate
// for (dqs_i = 0; dqs_i < DQS_WIDTH; dqs_i = dqs_i +1) begin: gen_fine_tap
// always @(posedge clk) begin
// if (rst)
// fine_tap_inc[6*dqs_i+:6] <= #TCQ 'd0;
// //fine_tap_dec[6*dqs_i+:6] <= #TCQ 'd0;
// else if (wr_level_done_r3 && ~wr_level_done_r4) begin
// fine_tap_inc[6*dqs_i+:6] <= #TCQ final_val[6*dqs_i+:6];
// //fine_tap_dec[6*dqs_i+:6] <= #TCQ 'd0;
// end
// end
// endgenerate
// Inc/Dec Phaser_Out stage 2 fine delay line
always @(posedge clk) begin
if (rst) begin
// Fine delay line used only during write leveling
dqs_po_stg2_f_incdec <= #TCQ 1'b0;
dqs_po_en_stg2_f <= #TCQ 1'b0;
// Dec Phaser_Out fine delay (1)before write leveling,
// (2)if no 0 to 1 transition detected with 63 fine delay taps, or
// (3)dual rank case where fine taps for the first rank need to be 0
end else if (po_cnt_dec || (wl_state_r == WL_INIT_FINE_DEC) ||
(wl_state_r == WL_FINE_DEC)) begin
dqs_po_stg2_f_incdec <= #TCQ 1'b0;
dqs_po_en_stg2_f <= #TCQ 1'b1;
// Inc Phaser_Out fine delay during write leveling
end else if ((wl_state_r == WL_INIT_FINE_INC) ||
(wl_state_r == WL_FINE_INC)) begin
dqs_po_stg2_f_incdec <= #TCQ 1'b1;
dqs_po_en_stg2_f <= #TCQ 1'b1;
end else begin
dqs_po_stg2_f_incdec <= #TCQ 1'b0;
dqs_po_en_stg2_f <= #TCQ 1'b0;
end
end
// Inc Phaser_Out stage 2 Coarse delay line
always @(posedge clk) begin
if (rst) begin
// Coarse delay line used during write leveling
// only if no 0->1 transition undetected with 64
// fine delay line taps
dqs_wl_po_stg2_c_incdec <= #TCQ 1'b0;
dqs_wl_po_en_stg2_c <= #TCQ 1'b0;
end else if (wl_state_r == WL_CORSE_INC) begin
// Inc Phaser_Out coarse delay during write leveling
dqs_wl_po_stg2_c_incdec <= #TCQ 1'b1;
dqs_wl_po_en_stg2_c <= #TCQ 1'b1;
end else begin
dqs_wl_po_stg2_c_incdec <= #TCQ 1'b0;
dqs_wl_po_en_stg2_c <= #TCQ 1'b0;
end
end
// only storing the rise data for checking. The data comming back during
// write leveling will be a static value. Just checking for rise data is
// enough.
genvar rd_i;
generate
for(rd_i = 0; rd_i < DQS_WIDTH; rd_i = rd_i +1)begin: gen_rd
always @(posedge clk)
rd_data_rise_wl_r[rd_i] <=
#TCQ |rd_data_rise0[(rd_i*DRAM_WIDTH)+DRAM_WIDTH-1:rd_i*DRAM_WIDTH];
end
endgenerate
// storing the previous data for checking later.
always @(posedge clk)begin
if ((wl_state_r == WL_INIT) || //(wl_state_r == WL_INIT_FINE_INC_WAIT) ||
//(wl_state_r == WL_INIT_FINE_INC_WAIT1) ||
((wl_state_r1 == WL_INIT_FINE_INC_WAIT) & (wl_state_r == WL_INIT_FINE_INC)) ||
(wl_state_r == WL_FINE_DEC) || (wl_state_r == WL_FINE_DEC_WAIT1) || (wl_state_r == WL_FINE_DEC_WAIT) ||
(wl_state_r == WL_CORSE_INC) || (wl_state_r == WL_CORSE_INC_WAIT) || (wl_state_r == WL_CORSE_INC_WAIT_TMP) ||
(wl_state_r == WL_CORSE_INC_WAIT1) || (wl_state_r == WL_CORSE_INC_WAIT2) ||
((wl_state_r == WL_EDGE_CHECK) & (wl_edge_detect_valid_r)))
rd_data_previous_r <= #TCQ rd_data_rise_wl_r;
end
// changed stable count from 3 to 7 because of fine tap resolution
always @(posedge clk)begin
if (rst | (wl_state_r == WL_DQS_CNT) |
(wl_state_r == WL_2RANK_TAP_DEC) |
(wl_state_r == WL_FINE_DEC) |
(rd_data_previous_r[dqs_count_r] != rd_data_rise_wl_r[dqs_count_r]) |
(wl_state_r1 == WL_INIT_FINE_DEC))
stable_cnt <= #TCQ 'd0;
else if ((wl_tap_count_r > 6'd0) &
(((wl_state_r == WL_EDGE_CHECK) & (wl_edge_detect_valid_r)) |
((wl_state_r1 == WL_INIT_FINE_INC_WAIT) & (wl_state_r == WL_INIT_FINE_INC)))) begin
if ((rd_data_previous_r[dqs_count_r] == rd_data_rise_wl_r[dqs_count_r])
& (stable_cnt < 'd14))
stable_cnt <= #TCQ stable_cnt + 1;
end
end
// Signal to ensure that flag_ck_negedge does not incorrectly assert
// when DQS is very close to CK rising edge
//always @(posedge clk) begin
// if (rst | (wl_state_r == WL_DQS_CNT) |
// (wl_state_r == WL_DQS_CHECK) | wr_level_done_r)
// past_negedge <= #TCQ 1'b0;
// else if (~flag_ck_negedge && ~rd_data_previous_r[dqs_count_r] &&
// (stable_cnt == 'd0) && ((wl_state_r == WL_CORSE_INC_WAIT1) |
// (wl_state_r == WL_CORSE_INC_WAIT2)))
// past_negedge <= #TCQ 1'b1;
//end
// Flag to indicate negedge of CK detected and ignore 0->1 transitions
// in this region
always @(posedge clk)begin
if (rst | (wl_state_r == WL_DQS_CNT) |
(wl_state_r == WL_DQS_CHECK) | wr_level_done_r |
(wl_state_r1 == WL_INIT_FINE_DEC))
flag_ck_negedge <= #TCQ 1'd0;
else if ((rd_data_previous_r[dqs_count_r] && ((stable_cnt > 'd0) |
(wl_state_r == WL_FINE_DEC) | (wl_state_r == WL_FINE_DEC_WAIT) | (wl_state_r == WL_FINE_DEC_WAIT1))) |
(wl_state_r == WL_CORSE_INC))
flag_ck_negedge <= #TCQ 1'd1;
else if (~rd_data_previous_r[dqs_count_r] && (stable_cnt == 'd14))
//&& flag_ck_negedge)
flag_ck_negedge <= #TCQ 1'd0;
end
// Flag to inhibit rd_data_edge_detect_r before stable DQ
always @(posedge clk) begin
if (rst)
flag_init <= #TCQ 1'b1;
else if ((wl_state_r == WL_WAIT) && ((wl_state_r1 == WL_INIT_FINE_INC_WAIT) ||
(wl_state_r1 == WL_INIT_FINE_DEC_WAIT)))
flag_init <= #TCQ 1'b0;
end
//checking for transition from 0 to 1
always @(posedge clk)begin
if (rst | flag_ck_negedge | flag_init | (wl_tap_count_r < 'd1) |
inhibit_edge_detect_r)
rd_data_edge_detect_r <= #TCQ {DQS_WIDTH{1'b0}};
else if (rd_data_edge_detect_r[dqs_count_r] == 1'b1) begin
if ((wl_state_r == WL_FINE_DEC) || (wl_state_r == WL_FINE_DEC_WAIT) || (wl_state_r == WL_FINE_DEC_WAIT1) ||
(wl_state_r == WL_CORSE_INC) || (wl_state_r == WL_CORSE_INC_WAIT) || (wl_state_r == WL_CORSE_INC_WAIT_TMP) ||
(wl_state_r == WL_CORSE_INC_WAIT1) || (wl_state_r == WL_CORSE_INC_WAIT2))
rd_data_edge_detect_r <= #TCQ {DQS_WIDTH{1'b0}};
else
rd_data_edge_detect_r <= #TCQ rd_data_edge_detect_r;
end else if (rd_data_previous_r[dqs_count_r] && (stable_cnt < 'd14))
rd_data_edge_detect_r <= #TCQ {DQS_WIDTH{1'b0}};
else
rd_data_edge_detect_r <= #TCQ (~rd_data_previous_r & rd_data_rise_wl_r);
end
// registring the write level start signal
always@(posedge clk) begin
wr_level_start_r <= #TCQ wr_level_start;
end
// Assign dqs_count_r to dqs_count_w to perform the shift operation
// instead of multiply operation
assign dqs_count_w = {2'b00, dqs_count_r};
assign oclk_count_w = {2'b00, oclkdelay_calib_cnt};
always @(posedge clk) begin
if (rst)
incdec_wait_cnt <= #TCQ 'd0;
else if ((wl_state_r == WL_FINE_DEC_WAIT1) ||
(wl_state_r == WL_INIT_FINE_DEC_WAIT1) ||
(wl_state_r == WL_CORSE_INC_WAIT_TMP))
incdec_wait_cnt <= #TCQ incdec_wait_cnt + 1;
else
incdec_wait_cnt <= #TCQ 'd0;
end
// state machine to initiate the write leveling sequence
// The state machine operates on one byte at a time.
// It will increment the delays to the DQS OSERDES
// and sample the DQ from the memory. When it detects
// a transition from 1 to 0 then the write leveling is considered
// done.
always @(posedge clk) begin
if(rst)begin
wrlvl_err <= #TCQ 1'b0;
wr_level_done_r <= #TCQ 1'b0;
wrlvl_rank_done_r <= #TCQ 1'b0;
dqs_count_r <= #TCQ {DQS_CNT_WIDTH+1{1'b0}};
dq_cnt_inc <= #TCQ 1'b1;
rank_cnt_r <= #TCQ 2'b00;
wl_state_r <= #TCQ WL_IDLE;
wl_state_r1 <= #TCQ WL_IDLE;
inhibit_edge_detect_r <= #TCQ 1'b1;
wl_edge_detect_valid_r <= #TCQ 1'b0;
wl_tap_count_r <= #TCQ 6'd0;
fine_dec_cnt <= #TCQ 6'd0;
for (r = 0; r < DQS_WIDTH; r = r + 1) begin
fine_inc[r] <= #TCQ 6'b0;
corse_dec[r] <= #TCQ 3'b0;
corse_inc[r] <= #TCQ 3'b0;
corse_cnt[r] <= #TCQ 3'b0;
end
dual_rnk_dec <= #TCQ 1'b0;
fast_cal_fine_cnt <= #TCQ FAST_CAL_FINE;
fast_cal_coarse_cnt <= #TCQ FAST_CAL_COARSE;
final_corse_dec <= #TCQ 1'b0;
//zero_tran_r <= #TCQ 1'b0;
wrlvl_redo_corse_inc <= #TCQ 'd0;
end else begin
wl_state_r1 <= #TCQ wl_state_r;
case (wl_state_r)
WL_IDLE: begin
wrlvl_rank_done_r <= #TCQ 1'd0;
inhibit_edge_detect_r <= #TCQ 1'b1;
if (wrlvl_byte_redo && ~wrlvl_byte_redo_r) begin
wr_level_done_r <= #TCQ 1'b0;
dqs_count_r <= #TCQ wrcal_cnt;
corse_cnt[wrcal_cnt] <= #TCQ final_coarse_tap[wrcal_cnt];
wl_tap_count_r <= #TCQ smallest[wrcal_cnt];
if (early1_data &&
(((final_coarse_tap[wrcal_cnt] < 'd6) && (CLK_PERIOD/nCK_PER_CLK <= 2500)) ||
((final_coarse_tap[wrcal_cnt] < 'd3) && (CLK_PERIOD/nCK_PER_CLK > 2500))))
wrlvl_redo_corse_inc <= #TCQ REDO_COARSE;
else if (early2_data && (final_coarse_tap[wrcal_cnt] < 'd2))
wrlvl_redo_corse_inc <= #TCQ 3'd6;
else begin
wl_state_r <= #TCQ WL_IDLE;
wrlvl_err <= #TCQ 1'b1;
end
end else if (wrlvl_final && ~wrlvl_final_r) begin
wr_level_done_r <= #TCQ 1'b0;
dqs_count_r <= #TCQ 'd0;
end
// verilint STARC-2.2.3.3 off
if(!wr_level_done_r & wr_level_start_r & wl_sm_start) begin
if (SIM_CAL_OPTION == "FAST_CAL")
wl_state_r <= #TCQ WL_FINE_INC;
else
wl_state_r <= #TCQ WL_INIT;
end
end
// verilint STARC-2.2.3.3 on
WL_INIT: begin
wl_edge_detect_valid_r <= #TCQ 1'b0;
inhibit_edge_detect_r <= #TCQ 1'b1;
wrlvl_rank_done_r <= #TCQ 1'd0;
//zero_tran_r <= #TCQ 1'b0;
if (wrlvl_final)
corse_cnt[dqs_count_w ] <= #TCQ final_coarse_tap[dqs_count_w ];
if (wrlvl_byte_redo) begin
if (|wl_tap_count_r) begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end else if ((corse_cnt[dqs_count_w] + wrlvl_redo_corse_inc) <= 'd7)
wl_state_r <= #TCQ WL_CORSE_INC;
else begin
wl_state_r <= #TCQ WL_IDLE;
wrlvl_err <= #TCQ 1'b1;
end
end else if(wl_sm_start)
wl_state_r <= #TCQ WL_INIT_FINE_INC;
end
// Initially Phaser_Out fine delay taps incremented
// until stable_cnt=14. A stable_cnt of 14 indicates
// that rd_data_rise_wl_r=rd_data_previous_r for 14 fine
// tap increments. This is done to inhibit false 0->1
// edge detection when DQS is initially aligned to the
// negedge of CK
WL_INIT_FINE_INC: begin
wl_state_r <= #TCQ WL_INIT_FINE_INC_WAIT1;
wl_tap_count_r <= #TCQ wl_tap_count_r + 1'b1;
final_corse_dec <= #TCQ 1'b0;
end
WL_INIT_FINE_INC_WAIT1: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_INIT_FINE_INC_WAIT;
end
// Case1: stable value of rd_data_previous_r=0 then
// proceed to 0->1 edge detection.
// Case2: stable value of rd_data_previous_r=1 then
// decrement fine taps to '0' and proceed to 0->1
// edge detection. Need to decrement in this case to
// make sure a valid 0->1 transition was not left
// undetected.
WL_INIT_FINE_INC_WAIT: begin
if (wl_sm_start) begin
if (stable_cnt < 'd14)
wl_state_r <= #TCQ WL_INIT_FINE_INC;
else if (~rd_data_previous_r[dqs_count_r]) begin
wl_state_r <= #TCQ WL_WAIT;
inhibit_edge_detect_r <= #TCQ 1'b0;
end else begin
wl_state_r <= #TCQ WL_INIT_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end
end
end
// Case2: stable value of rd_data_previous_r=1 then
// decrement fine taps to '0' and proceed to 0->1
// edge detection. Need to decrement in this case to
// make sure a valid 0->1 transition was not left
// undetected.
WL_INIT_FINE_DEC: begin
wl_tap_count_r <= #TCQ 'd0;
wl_state_r <= #TCQ WL_INIT_FINE_DEC_WAIT1;
if (fine_dec_cnt > 6'd0)
fine_dec_cnt <= #TCQ fine_dec_cnt - 1;
else
fine_dec_cnt <= #TCQ fine_dec_cnt;
end
WL_INIT_FINE_DEC_WAIT1: begin
if (incdec_wait_cnt == 'd8)
wl_state_r <= #TCQ WL_INIT_FINE_DEC_WAIT;
end
WL_INIT_FINE_DEC_WAIT: begin
if (fine_dec_cnt > 6'd0) begin
wl_state_r <= #TCQ WL_INIT_FINE_DEC;
inhibit_edge_detect_r <= #TCQ 1'b1;
end else begin
wl_state_r <= #TCQ WL_WAIT;
inhibit_edge_detect_r <= #TCQ 1'b0;
end
end
// Inc DQS Phaser_Out Stage2 Fine Delay line
WL_FINE_INC: begin
wl_edge_detect_valid_r <= #TCQ 1'b0;
if (SIM_CAL_OPTION == "FAST_CAL") begin
wl_state_r <= #TCQ WL_FINE_INC_WAIT;
if (fast_cal_fine_cnt > 'd0)
fast_cal_fine_cnt <= #TCQ fast_cal_fine_cnt - 1;
else
fast_cal_fine_cnt <= #TCQ fast_cal_fine_cnt;
end else if (wr_level_done_r5) begin
wl_tap_count_r <= #TCQ 'd0;
wl_state_r <= #TCQ WL_FINE_INC_WAIT;
if (|fine_inc[dqs_count_w])
fine_inc[dqs_count_w] <= #TCQ fine_inc[dqs_count_w] - 1;
end else begin
wl_state_r <= #TCQ WL_WAIT;
wl_tap_count_r <= #TCQ wl_tap_count_r + 1'b1;
end
end
WL_FINE_INC_WAIT: begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
if (fast_cal_fine_cnt > 'd0)
wl_state_r <= #TCQ WL_FINE_INC;
else if (fast_cal_coarse_cnt > 'd0)
wl_state_r <= #TCQ WL_CORSE_INC;
else
wl_state_r <= #TCQ WL_DQS_CNT;
end else if (|fine_inc[dqs_count_w])
wl_state_r <= #TCQ WL_FINE_INC;
else if (dqs_count_r == (DQS_WIDTH-1))
wl_state_r <= #TCQ WL_IDLE;
else begin
wl_state_r <= #TCQ WL_2RANK_FINAL_TAP;
dqs_count_r <= #TCQ dqs_count_r + 1;
end
end
WL_FINE_DEC: begin
wl_edge_detect_valid_r <= #TCQ 1'b0;
wl_tap_count_r <= #TCQ 'd0;
wl_state_r <= #TCQ WL_FINE_DEC_WAIT1;
if (fine_dec_cnt > 6'd0)
fine_dec_cnt <= #TCQ fine_dec_cnt - 1;
else
fine_dec_cnt <= #TCQ fine_dec_cnt;
end
WL_FINE_DEC_WAIT1: begin
if (incdec_wait_cnt == 'd8)
wl_state_r <= #TCQ WL_FINE_DEC_WAIT;
end
WL_FINE_DEC_WAIT: begin
if (fine_dec_cnt > 6'd0)
wl_state_r <= #TCQ WL_FINE_DEC;
//else if (zero_tran_r)
// wl_state_r <= #TCQ WL_DQS_CNT;
else if (dual_rnk_dec) begin
if (|corse_dec[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_DEC;
else
wl_state_r <= #TCQ WL_2RANK_DQS_CNT;
end else if (wrlvl_byte_redo) begin
if ((corse_cnt[dqs_count_w] + wrlvl_redo_corse_inc) <= 'd7)
wl_state_r <= #TCQ WL_CORSE_INC;
else begin
wl_state_r <= #TCQ WL_IDLE;
wrlvl_err <= #TCQ 1'b1;
end
end else
wl_state_r <= #TCQ WL_CORSE_INC;
end
WL_CORSE_DEC: begin
wl_state_r <= #TCQ WL_CORSE_DEC_WAIT;
dual_rnk_dec <= #TCQ 1'b0;
if (|corse_dec[dqs_count_r])
corse_dec[dqs_count_r] <= #TCQ corse_dec[dqs_count_r] - 1;
else
corse_dec[dqs_count_r] <= #TCQ corse_dec[dqs_count_r];
end
WL_CORSE_DEC_WAIT: begin
if (wl_sm_start) begin
//if (|corse_dec[dqs_count_r])
// wl_state_r <= #TCQ WL_CORSE_DEC;
if (|corse_dec[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_DEC_WAIT1;
else
wl_state_r <= #TCQ WL_2RANK_DQS_CNT;
end
end
WL_CORSE_DEC_WAIT1: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_CORSE_DEC;
end
WL_CORSE_INC: begin
wl_state_r <= #TCQ WL_CORSE_INC_WAIT_TMP;
if (SIM_CAL_OPTION == "FAST_CAL") begin
if (fast_cal_coarse_cnt > 'd0)
fast_cal_coarse_cnt <= #TCQ fast_cal_coarse_cnt - 1;
else
fast_cal_coarse_cnt <= #TCQ fast_cal_coarse_cnt;
end else if (wrlvl_byte_redo) begin
corse_cnt[dqs_count_w] <= #TCQ corse_cnt[dqs_count_w] + 1;
if (|wrlvl_redo_corse_inc)
wrlvl_redo_corse_inc <= #TCQ wrlvl_redo_corse_inc - 1;
end else if (~wr_level_done_r5)
corse_cnt[dqs_count_r] <= #TCQ corse_cnt[dqs_count_r] + 1;
else if (|corse_inc[dqs_count_w])
corse_inc[dqs_count_w] <= #TCQ corse_inc[dqs_count_w] - 1;
end
WL_CORSE_INC_WAIT_TMP: begin
if (incdec_wait_cnt == 'd8)
wl_state_r <= #TCQ WL_CORSE_INC_WAIT;
end
WL_CORSE_INC_WAIT: begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
if (fast_cal_coarse_cnt > 'd0)
wl_state_r <= #TCQ WL_CORSE_INC;
else
wl_state_r <= #TCQ WL_DQS_CNT;
end else if (wrlvl_byte_redo) begin
if (|wrlvl_redo_corse_inc)
wl_state_r <= #TCQ WL_CORSE_INC;
else begin
wl_state_r <= #TCQ WL_INIT_FINE_INC;
inhibit_edge_detect_r <= #TCQ 1'b1;
end
end else if (~wr_level_done_r5 && wl_sm_start)
wl_state_r <= #TCQ WL_CORSE_INC_WAIT1;
else if (wr_level_done_r5) begin
if (|corse_inc[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_INC;
else if (|fine_inc[dqs_count_w])
wl_state_r <= #TCQ WL_FINE_INC;
else if (dqs_count_r == (DQS_WIDTH-1))
wl_state_r <= #TCQ WL_IDLE;
else begin
wl_state_r <= #TCQ WL_2RANK_FINAL_TAP;
dqs_count_r <= #TCQ dqs_count_r + 1;
end
end
end
WL_CORSE_INC_WAIT1: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_CORSE_INC_WAIT2;
end
WL_CORSE_INC_WAIT2: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_WAIT;
end
WL_WAIT: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_EDGE_CHECK;
end
WL_EDGE_CHECK: begin // Look for the edge
if (wl_edge_detect_valid_r == 1'b0) begin
wl_state_r <= #TCQ WL_WAIT;
wl_edge_detect_valid_r <= #TCQ 1'b1;
end
// 0->1 transition detected with DQS
else if(rd_data_edge_detect_r[dqs_count_r] &&
wl_edge_detect_valid_r)
begin
wl_tap_count_r <= #TCQ wl_tap_count_r;
if ((SIM_CAL_OPTION == "FAST_CAL") || (RANKS < 2) ||
~oclkdelay_calib_done)
wl_state_r <= #TCQ WL_DQS_CNT;
else
wl_state_r <= #TCQ WL_2RANK_TAP_DEC;
end
// For initial writes check only upto 56 taps. Reserving the
// remaining taps for OCLK calibration.
else if((~wrlvl_tap_done_r) && (wl_tap_count_r > 6'd55)) begin
if (corse_cnt[dqs_count_r] < COARSE_TAPS) begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end else begin
wrlvl_err <= #TCQ 1'b1;
wl_state_r <= #TCQ WL_IDLE;
end
end else begin
if (wl_tap_count_r < 6'd56) //for reuse wrlvl for complex ocal
wl_state_r <= #TCQ WL_FINE_INC;
else if (corse_cnt[dqs_count_r] < COARSE_TAPS) begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end else begin
wrlvl_err <= #TCQ 1'b1;
wl_state_r <= #TCQ WL_IDLE;
end
end
end
WL_2RANK_TAP_DEC: begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
for (m = 0; m < DQS_WIDTH; m = m + 1)
corse_dec[m] <= #TCQ corse_cnt[m];
wl_edge_detect_valid_r <= #TCQ 1'b0;
dual_rnk_dec <= #TCQ 1'b1;
end
WL_DQS_CNT: begin
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(dqs_count_r == (DQS_WIDTH-1)) ||
wrlvl_byte_redo) begin
dqs_count_r <= #TCQ dqs_count_r;
dq_cnt_inc <= #TCQ 1'b0;
end else begin
dqs_count_r <= #TCQ dqs_count_r + 1'b1;
dq_cnt_inc <= #TCQ 1'b1;
end
wl_state_r <= #TCQ WL_DQS_CHECK;
wl_edge_detect_valid_r <= #TCQ 1'b0;
end
WL_2RANK_DQS_CNT: begin
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(dqs_count_r == (DQS_WIDTH-1))) begin
dqs_count_r <= #TCQ dqs_count_r;
dq_cnt_inc <= #TCQ 1'b0;
end else begin
dqs_count_r <= #TCQ dqs_count_r + 1'b1;
dq_cnt_inc <= #TCQ 1'b1;
end
wl_state_r <= #TCQ WL_DQS_CHECK;
wl_edge_detect_valid_r <= #TCQ 1'b0;
dual_rnk_dec <= #TCQ 1'b0;
end
WL_DQS_CHECK: begin // check if all DQS have been calibrated
wl_tap_count_r <= #TCQ 'd0;
if (dq_cnt_inc == 1'b0)begin
wrlvl_rank_done_r <= #TCQ 1'd1;
for (t = 0; t < DQS_WIDTH; t = t + 1)
corse_cnt[t] <= #TCQ 3'b0;
if ((SIM_CAL_OPTION == "FAST_CAL") || (RANKS < 2) || ~oclkdelay_calib_done) begin
wl_state_r <= #TCQ WL_IDLE;
if (wrlvl_byte_redo)
dqs_count_r <= #TCQ dqs_count_r;
else
dqs_count_r <= #TCQ 'd0;
end else if (rank_cnt_r == RANKS-1) begin
dqs_count_r <= #TCQ dqs_count_r;
if (RANKS > 1)
wl_state_r <= #TCQ WL_2RANK_FINAL_TAP;
else
wl_state_r <= #TCQ WL_IDLE;
end else begin
wl_state_r <= #TCQ WL_INIT;
dqs_count_r <= #TCQ 'd0;
end
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(rank_cnt_r == RANKS-1)) begin
wr_level_done_r <= #TCQ 1'd1;
rank_cnt_r <= #TCQ 2'b00;
end else begin
wr_level_done_r <= #TCQ 1'd0;
rank_cnt_r <= #TCQ rank_cnt_r + 1'b1;
end
end else
wl_state_r <= #TCQ WL_INIT;
end
WL_2RANK_FINAL_TAP: begin
if (wr_level_done_r4 && ~wr_level_done_r5) begin
for(u = 0; u < DQS_WIDTH; u = u + 1) begin
corse_inc[u] <= #TCQ final_coarse_tap[u];
fine_inc[u] <= #TCQ final_val[u];
end
dqs_count_r <= #TCQ 'd0;
end else if (wr_level_done_r5) begin
if (|corse_inc[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_INC;
else if (|fine_inc[dqs_count_w])
wl_state_r <= #TCQ WL_FINE_INC;
end
end
endcase
end
end // always @ (posedge clk)
endmodule
|
module mig_7series_v2_3_ddr_phy_wrlvl #
(
parameter TCQ = 100,
parameter DQS_CNT_WIDTH = 3,
parameter DQ_WIDTH = 64,
parameter DQS_WIDTH = 2,
parameter DRAM_WIDTH = 8,
parameter RANKS = 1,
parameter nCK_PER_CLK = 4,
parameter CLK_PERIOD = 4,
parameter SIM_CAL_OPTION = "NONE"
)
(
input clk,
input rst,
input phy_ctl_ready,
input wr_level_start,
input wl_sm_start,
input wrlvl_final,
input wrlvl_byte_redo,
input [DQS_CNT_WIDTH:0] wrcal_cnt,
input early1_data,
input early2_data,
input [DQS_CNT_WIDTH:0] oclkdelay_calib_cnt,
input oclkdelay_calib_done,
input [(DQ_WIDTH)-1:0] rd_data_rise0,
output reg wrlvl_byte_done,
output reg dqs_po_dec_done /* synthesis syn_maxfan = 2 */,
output phy_ctl_rdy_dly,
output reg wr_level_done /* synthesis syn_maxfan = 2 */,
// to phy_init for cs logic
output wrlvl_rank_done,
output done_dqs_tap_inc,
output [DQS_CNT_WIDTH:0] po_stg2_wl_cnt,
// Fine delay line used only during write leveling
// Inc/dec Phaser_Out fine delay line
output reg dqs_po_stg2_f_incdec,
// Enable Phaser_Out fine delay inc/dec
output reg dqs_po_en_stg2_f,
// Coarse delay line used during write leveling
// only if 64 taps of fine delay line were not
// sufficient to detect a 0->1 transition
// Inc Phaser_Out coarse delay line
output reg dqs_wl_po_stg2_c_incdec,
// Enable Phaser_Out coarse delay inc/dec
output reg dqs_wl_po_en_stg2_c,
// Read Phaser_Out delay value
input [8:0] po_counter_read_val,
// output reg dqs_wl_po_stg2_load,
// output reg [8:0] dqs_wl_po_stg2_reg_l,
// CK edge undetected
output reg wrlvl_err,
output reg [3*DQS_WIDTH-1:0] wl_po_coarse_cnt,
output reg [6*DQS_WIDTH-1:0] wl_po_fine_cnt,
// Debug ports
output [5:0] dbg_wl_tap_cnt,
output dbg_wl_edge_detect_valid,
output [(DQS_WIDTH)-1:0] dbg_rd_data_edge_detect,
output [DQS_CNT_WIDTH:0] dbg_dqs_count,
output [4:0] dbg_wl_state,
output [6*DQS_WIDTH-1:0] dbg_wrlvl_fine_tap_cnt,
output [3*DQS_WIDTH-1:0] dbg_wrlvl_coarse_tap_cnt,
output [255:0] dbg_phy_wrlvl
);
localparam WL_IDLE = 5'h0;
localparam WL_INIT = 5'h1;
localparam WL_INIT_FINE_INC = 5'h2;
localparam WL_INIT_FINE_INC_WAIT1= 5'h3;
localparam WL_INIT_FINE_INC_WAIT = 5'h4;
localparam WL_INIT_FINE_DEC = 5'h5;
localparam WL_INIT_FINE_DEC_WAIT = 5'h6;
localparam WL_FINE_INC = 5'h7;
localparam WL_WAIT = 5'h8;
localparam WL_EDGE_CHECK = 5'h9;
localparam WL_DQS_CHECK = 5'hA;
localparam WL_DQS_CNT = 5'hB;
localparam WL_2RANK_TAP_DEC = 5'hC;
localparam WL_2RANK_DQS_CNT = 5'hD;
localparam WL_FINE_DEC = 5'hE;
localparam WL_FINE_DEC_WAIT = 5'hF;
localparam WL_CORSE_INC = 5'h10;
localparam WL_CORSE_INC_WAIT = 5'h11;
localparam WL_CORSE_INC_WAIT1 = 5'h12;
localparam WL_CORSE_INC_WAIT2 = 5'h13;
localparam WL_CORSE_DEC = 5'h14;
localparam WL_CORSE_DEC_WAIT = 5'h15;
localparam WL_CORSE_DEC_WAIT1 = 5'h16;
localparam WL_FINE_INC_WAIT = 5'h17;
localparam WL_2RANK_FINAL_TAP = 5'h18;
localparam WL_INIT_FINE_DEC_WAIT1= 5'h19;
localparam WL_FINE_DEC_WAIT1 = 5'h1A;
localparam WL_CORSE_INC_WAIT_TMP = 5'h1B;
localparam COARSE_TAPS = 7;
localparam FAST_CAL_FINE = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 45 : 48;
localparam FAST_CAL_COARSE = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 1 : 2;
localparam REDO_COARSE = (CLK_PERIOD/nCK_PER_CLK <= 2500) ? 2 : 5;
integer i, j, k, l, p, q, r, s, t, m, n, u, v, w, x,y;
reg phy_ctl_ready_r1;
reg phy_ctl_ready_r2;
reg phy_ctl_ready_r3;
reg phy_ctl_ready_r4;
reg phy_ctl_ready_r5;
reg phy_ctl_ready_r6;
(* max_fanout = 50 *) reg [DQS_CNT_WIDTH:0] dqs_count_r;
reg [1:0] rank_cnt_r;
reg [DQS_WIDTH-1:0] rd_data_rise_wl_r;
reg [DQS_WIDTH-1:0] rd_data_previous_r;
reg [DQS_WIDTH-1:0] rd_data_edge_detect_r;
reg wr_level_done_r;
reg wrlvl_rank_done_r;
reg wr_level_start_r;
reg [4:0] wl_state_r, wl_state_r1;
reg inhibit_edge_detect_r;
reg wl_edge_detect_valid_r;
reg [5:0] wl_tap_count_r;
reg [5:0] fine_dec_cnt;
reg [5:0] fine_inc[0:DQS_WIDTH-1]; // DQS_WIDTH number of counters 6-bit each
reg [2:0] corse_dec[0:DQS_WIDTH-1];
reg [2:0] corse_inc[0:DQS_WIDTH-1];
reg dq_cnt_inc;
reg [3:0] stable_cnt;
reg flag_ck_negedge;
//reg past_negedge;
reg flag_init;
reg [2:0] corse_cnt[0:DQS_WIDTH-1];
reg [3*DQS_WIDTH-1:0] corse_cnt_dbg;
reg [2:0] wl_corse_cnt[0:RANKS-1][0:DQS_WIDTH-1];
//reg [3*DQS_WIDTH-1:0] coarse_tap_inc;
reg [2:0] final_coarse_tap[0:DQS_WIDTH-1];
reg [5:0] add_smallest[0:DQS_WIDTH-1];
reg [5:0] add_largest[0:DQS_WIDTH-1];
//reg [6*DQS_WIDTH-1:0] fine_tap_inc;
//reg [6*DQS_WIDTH-1:0] fine_tap_dec;
reg wr_level_done_r1;
reg wr_level_done_r2;
reg wr_level_done_r3;
reg wr_level_done_r4;
reg wr_level_done_r5;
reg [5:0] wl_dqs_tap_count_r[0:RANKS-1][0:DQS_WIDTH-1];
reg [5:0] smallest[0:DQS_WIDTH-1];
reg [5:0] largest[0:DQS_WIDTH-1];
reg [5:0] final_val[0:DQS_WIDTH-1];
reg [5:0] po_dec_cnt[0:DQS_WIDTH-1];
reg done_dqs_dec;
reg [8:0] po_rdval_cnt;
reg po_cnt_dec;
reg po_dec_done;
reg dual_rnk_dec;
wire [DQS_CNT_WIDTH+2:0] dqs_count_w;
reg [5:0] fast_cal_fine_cnt;
reg [2:0] fast_cal_coarse_cnt;
reg wrlvl_byte_redo_r;
reg [2:0] wrlvl_redo_corse_inc;
reg wrlvl_final_r;
reg final_corse_dec;
wire [DQS_CNT_WIDTH+2:0] oclk_count_w;
reg wrlvl_tap_done_r ;
reg [3:0] wait_cnt;
reg [3:0] incdec_wait_cnt;
// Debug ports
assign dbg_wl_edge_detect_valid = wl_edge_detect_valid_r;
assign dbg_rd_data_edge_detect = rd_data_edge_detect_r;
assign dbg_wl_tap_cnt = wl_tap_count_r;
assign dbg_dqs_count = dqs_count_r;
assign dbg_wl_state = wl_state_r;
assign dbg_wrlvl_fine_tap_cnt = wl_po_fine_cnt;
assign dbg_wrlvl_coarse_tap_cnt = wl_po_coarse_cnt;
always @(*) begin
for (v = 0; v < DQS_WIDTH; v = v + 1)
corse_cnt_dbg[3*v+:3] = corse_cnt[v];
end
assign dbg_phy_wrlvl[0+:27] = corse_cnt_dbg;
assign dbg_phy_wrlvl[27+:5] = wl_state_r;
assign dbg_phy_wrlvl[32+:4] = dqs_count_r;
assign dbg_phy_wrlvl[36+:9] = rd_data_rise_wl_r;
assign dbg_phy_wrlvl[45+:9] = rd_data_previous_r;
assign dbg_phy_wrlvl[54+:4] = stable_cnt;
assign dbg_phy_wrlvl[58] = 'd0;
assign dbg_phy_wrlvl[59] = flag_ck_negedge;
assign dbg_phy_wrlvl [60] = wl_edge_detect_valid_r;
assign dbg_phy_wrlvl [61+:6] = wl_tap_count_r;
assign dbg_phy_wrlvl [67+:9] = rd_data_edge_detect_r;
assign dbg_phy_wrlvl [76+:54] = wl_po_fine_cnt;
assign dbg_phy_wrlvl [130+:27] = wl_po_coarse_cnt;
//**************************************************************************
// DQS count to hard PHY during write leveling using Phaser_OUT Stage2 delay
//**************************************************************************
assign po_stg2_wl_cnt = dqs_count_r;
assign wrlvl_rank_done = wrlvl_rank_done_r;
assign done_dqs_tap_inc = done_dqs_dec;
assign phy_ctl_rdy_dly = phy_ctl_ready_r6;
always @(posedge clk) begin
phy_ctl_ready_r1 <= #TCQ phy_ctl_ready;
phy_ctl_ready_r2 <= #TCQ phy_ctl_ready_r1;
phy_ctl_ready_r3 <= #TCQ phy_ctl_ready_r2;
phy_ctl_ready_r4 <= #TCQ phy_ctl_ready_r3;
phy_ctl_ready_r5 <= #TCQ phy_ctl_ready_r4;
phy_ctl_ready_r6 <= #TCQ phy_ctl_ready_r5;
wrlvl_byte_redo_r <= #TCQ wrlvl_byte_redo;
wrlvl_final_r <= #TCQ wrlvl_final;
if ((wrlvl_byte_redo && ~wrlvl_byte_redo_r) ||
(wrlvl_final && ~wrlvl_final_r))
wr_level_done <= #TCQ 1'b0;
else
wr_level_done <= #TCQ done_dqs_dec;
end
// Status signal that will be asserted once the first
// pass of write leveling is done.
always @(posedge clk) begin
if(rst) begin
wrlvl_tap_done_r <= #TCQ 1'b0 ;
end else begin
if(wrlvl_tap_done_r == 1'b0) begin
if(oclkdelay_calib_done) begin
wrlvl_tap_done_r <= #TCQ 1'b1 ;
end
end
end
end
always @(posedge clk) begin
if (rst || po_cnt_dec)
wait_cnt <= #TCQ 'd8;
else if (phy_ctl_ready_r6 && (wait_cnt > 'd0))
wait_cnt <= #TCQ wait_cnt - 1;
end
always @(posedge clk) begin
if (rst) begin
po_rdval_cnt <= #TCQ 'd0;
end else if (phy_ctl_ready_r5 && ~phy_ctl_ready_r6) begin
po_rdval_cnt <= #TCQ po_counter_read_val;
end else if (po_rdval_cnt > 'd0) begin
if (po_cnt_dec)
po_rdval_cnt <= #TCQ po_rdval_cnt - 1;
else
po_rdval_cnt <= #TCQ po_rdval_cnt;
end else if (po_rdval_cnt == 'd0) begin
po_rdval_cnt <= #TCQ po_rdval_cnt;
end
end
always @(posedge clk) begin
if (rst || (po_rdval_cnt == 'd0))
po_cnt_dec <= #TCQ 1'b0;
else if (phy_ctl_ready_r6 && (po_rdval_cnt > 'd0) && (wait_cnt == 'd1))
po_cnt_dec <= #TCQ 1'b1;
else
po_cnt_dec <= #TCQ 1'b0;
end
always @(posedge clk) begin
if (rst)
po_dec_done <= #TCQ 1'b0;
else if (((po_cnt_dec == 'd1) && (po_rdval_cnt == 'd1)) ||
(phy_ctl_ready_r6 && (po_rdval_cnt == 'd0))) begin
po_dec_done <= #TCQ 1'b1;
end
end
always @(posedge clk) begin
dqs_po_dec_done <= #TCQ po_dec_done;
wr_level_done_r1 <= #TCQ wr_level_done_r;
wr_level_done_r2 <= #TCQ wr_level_done_r1;
wr_level_done_r3 <= #TCQ wr_level_done_r2;
wr_level_done_r4 <= #TCQ wr_level_done_r3;
wr_level_done_r5 <= #TCQ wr_level_done_r4;
for (l = 0; l < DQS_WIDTH; l = l + 1) begin
wl_po_coarse_cnt[3*l+:3] <= #TCQ final_coarse_tap[l];
if ((RANKS == 1) || ~oclkdelay_calib_done)
wl_po_fine_cnt[6*l+:6] <= #TCQ smallest[l];
else
wl_po_fine_cnt[6*l+:6] <= #TCQ final_val[l];
end
end
generate
if (RANKS == 2) begin: dual_rank
always @(posedge clk) begin
if (rst || (wrlvl_byte_redo && ~wrlvl_byte_redo_r) ||
(wrlvl_final && ~wrlvl_final_r))
done_dqs_dec <= #TCQ 1'b0;
else if ((SIM_CAL_OPTION == "FAST_CAL") || ~oclkdelay_calib_done)
done_dqs_dec <= #TCQ wr_level_done_r;
else if (wr_level_done_r5 && (wl_state_r == WL_IDLE))
done_dqs_dec <= #TCQ 1'b1;
end
end else begin: single_rank
always @(posedge clk) begin
if (rst || (wrlvl_byte_redo && ~wrlvl_byte_redo_r) ||
(wrlvl_final && ~wrlvl_final_r))
done_dqs_dec <= #TCQ 1'b0;
else if (~oclkdelay_calib_done)
done_dqs_dec <= #TCQ wr_level_done_r;
else if (wr_level_done_r3 && ~wr_level_done_r4)
done_dqs_dec <= #TCQ 1'b1;
end
end
endgenerate
always @(posedge clk)
if (rst || (wrlvl_byte_redo && ~wrlvl_byte_redo_r))
wrlvl_byte_done <= #TCQ 1'b0;
else if (wrlvl_byte_redo && wr_level_done_r3 && ~wr_level_done_r4)
wrlvl_byte_done <= #TCQ 1'b1;
// Storing DQS tap values at the end of each DQS write leveling
always @(posedge clk) begin
if (rst) begin
for (k = 0; k < RANKS; k = k + 1) begin: rst_wl_dqs_tap_count_loop
for (n = 0; n < DQS_WIDTH; n = n + 1) begin
wl_corse_cnt[k][n] <= #TCQ 'b0;
wl_dqs_tap_count_r[k][n] <= #TCQ 'b0;
end
end
end else if ((wl_state_r == WL_DQS_CNT) | (wl_state_r == WL_WAIT) |
(wl_state_r == WL_FINE_DEC_WAIT1) |
(wl_state_r == WL_2RANK_TAP_DEC)) begin
wl_dqs_tap_count_r[rank_cnt_r][dqs_count_r] <= #TCQ wl_tap_count_r;
wl_corse_cnt[rank_cnt_r][dqs_count_r] <= #TCQ corse_cnt[dqs_count_r];
end else if ((SIM_CAL_OPTION == "FAST_CAL") & (wl_state_r == WL_DQS_CHECK)) begin
for (p = 0; p < RANKS; p = p +1) begin: dqs_tap_rank_cnt
for(q = 0; q < DQS_WIDTH; q = q +1) begin: dqs_tap_dqs_cnt
wl_dqs_tap_count_r[p][q] <= #TCQ wl_tap_count_r;
wl_corse_cnt[p][q] <= #TCQ corse_cnt[0];
end
end
end
end
// Convert coarse delay to fine taps in case of unequal number of coarse
// taps between ranks. Assuming a difference of 1 coarse tap counts
// between ranks. A common fine and coarse tap value must be used for both ranks
// because Phaser_Out has only one rank register.
// Coarse tap1 = period(ps)*93/360 = 34 fine taps
// Other coarse taps = period(ps)*103/360 = 38 fine taps
generate
genvar cnt;
if (RANKS == 2) begin // Dual rank
for(cnt = 0; cnt < DQS_WIDTH; cnt = cnt +1) begin: coarse_dqs_cnt
always @(posedge clk) begin
if (rst) begin
//coarse_tap_inc[3*cnt+:3] <= #TCQ 'b0;
add_smallest[cnt] <= #TCQ 'd0;
add_largest[cnt] <= #TCQ 'd0;
final_coarse_tap[cnt] <= #TCQ 'd0;
end else if (wr_level_done_r1 & ~wr_level_done_r2) begin
if (~oclkdelay_calib_done) begin
for(y = 0 ; y < DQS_WIDTH; y = y+1) begin
final_coarse_tap[y] <= #TCQ wl_corse_cnt[0][y];
add_smallest[y] <= #TCQ 'd0;
add_largest[y] <= #TCQ 'd0;
end
end else
if (wl_corse_cnt[0][cnt] == wl_corse_cnt[1][cnt]) begin
// Both ranks have use the same number of coarse delay taps.
// No conversion of coarse tap to fine taps required.
//coarse_tap_inc[3*cnt+:3] <= #TCQ wl_corse_cnt[1][3*cnt+:3];
final_coarse_tap[cnt] <= #TCQ wl_corse_cnt[1][cnt];
add_smallest[cnt] <= #TCQ 'd0;
add_largest[cnt] <= #TCQ 'd0;
end else if (wl_corse_cnt[0][cnt] < wl_corse_cnt[1][cnt]) begin
// Rank 0 uses fewer coarse delay taps than rank1.
// conversion of coarse tap to fine taps required for rank1.
// The final coarse count will the smaller value.
//coarse_tap_inc[3*cnt+:3] <= #TCQ wl_corse_cnt[1][3*cnt+:3] - 1;
final_coarse_tap[cnt] <= #TCQ wl_corse_cnt[1][cnt] - 1;
if (|wl_corse_cnt[0][cnt])
// Coarse tap 2 or higher being converted to fine taps
// This will be added to 'largest' value in final_val
// computation
add_largest[cnt] <= #TCQ 'd38;
else
// Coarse tap 1 being converted to fine taps
// This will be added to 'largest' value in final_val
// computation
add_largest[cnt] <= #TCQ 'd34;
end else if (wl_corse_cnt[0][cnt] > wl_corse_cnt[1][cnt]) begin
// This may be an unlikely scenario in a real system.
// Rank 0 uses more coarse delay taps than rank1.
// conversion of coarse tap to fine taps required.
//coarse_tap_inc[3*cnt+:3] <= #TCQ 'd0;
final_coarse_tap[cnt] <= #TCQ wl_corse_cnt[1][cnt];
if (|wl_corse_cnt[1][cnt])
// Coarse tap 2 or higher being converted to fine taps
// This will be added to 'smallest' value in final_val
// computation
add_smallest[cnt] <= #TCQ 'd38;
else
// Coarse tap 1 being converted to fine taps
// This will be added to 'smallest' value in
// final_val computation
add_smallest[cnt] <= #TCQ 'd34;
end
end
end
end
end else begin
// Single rank
always @(posedge clk) begin
//coarse_tap_inc <= #TCQ 'd0;
for(w = 0; w < DQS_WIDTH; w = w + 1) begin
final_coarse_tap[w] <= #TCQ wl_corse_cnt[0][w];
add_smallest[w] <= #TCQ 'd0;
add_largest[w] <= #TCQ 'd0;
end
end
end
endgenerate
// Determine delay value for DQS in multirank system
// Assuming delay value is the smallest for rank 0 DQS
// and largest delay value for rank 4 DQS
// Set to smallest + ((largest-smallest)/2)
always @(posedge clk) begin
if (rst) begin
for(x = 0; x < DQS_WIDTH; x = x +1) begin
smallest[x] <= #TCQ 'b0;
largest[x] <= #TCQ 'b0;
end
end else if ((wl_state_r == WL_DQS_CNT) & wrlvl_byte_redo) begin
smallest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[0][dqs_count_r];
largest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[0][dqs_count_r];
end else if ((wl_state_r == WL_DQS_CNT) |
(wl_state_r == WL_2RANK_TAP_DEC)) begin
smallest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[0][dqs_count_r];
largest[dqs_count_r] <= #TCQ wl_dqs_tap_count_r[RANKS-1][dqs_count_r];
end else if (((SIM_CAL_OPTION == "FAST_CAL") |
(~oclkdelay_calib_done & ~wrlvl_byte_redo)) &
wr_level_done_r1 & ~wr_level_done_r2) begin
for(i = 0; i < DQS_WIDTH; i = i +1) begin: smallest_dqs
smallest[i] <= #TCQ wl_dqs_tap_count_r[0][i];
largest[i] <= #TCQ wl_dqs_tap_count_r[0][i];
end
end
end
// final_val to be used for all DQSs in all ranks
genvar wr_i;
generate
for (wr_i = 0; wr_i < DQS_WIDTH; wr_i = wr_i +1) begin: gen_final_tap
always @(posedge clk) begin
if (rst)
final_val[wr_i] <= #TCQ 'b0;
else if (wr_level_done_r2 && ~wr_level_done_r3) begin
if (~oclkdelay_calib_done)
final_val[wr_i] <= #TCQ (smallest[wr_i] + add_smallest[wr_i]);
else if ((smallest[wr_i] + add_smallest[wr_i]) <
(largest[wr_i] + add_largest[wr_i]))
final_val[wr_i] <= #TCQ ((smallest[wr_i] + add_smallest[wr_i]) +
(((largest[wr_i] + add_largest[wr_i]) -
(smallest[wr_i] + add_smallest[wr_i]))/2));
else if ((smallest[wr_i] + add_smallest[wr_i]) >
(largest[wr_i] + add_largest[wr_i]))
final_val[wr_i] <= #TCQ ((largest[wr_i] + add_largest[wr_i]) +
(((smallest[wr_i] + add_smallest[wr_i]) -
(largest[wr_i] + add_largest[wr_i]))/2));
else if ((smallest[wr_i] + add_smallest[wr_i]) ==
(largest[wr_i] + add_largest[wr_i]))
final_val[wr_i] <= #TCQ (largest[wr_i] + add_largest[wr_i]);
end
end
end
endgenerate
// // fine tap inc/dec value for all DQSs in all ranks
// genvar dqs_i;
// generate
// for (dqs_i = 0; dqs_i < DQS_WIDTH; dqs_i = dqs_i +1) begin: gen_fine_tap
// always @(posedge clk) begin
// if (rst)
// fine_tap_inc[6*dqs_i+:6] <= #TCQ 'd0;
// //fine_tap_dec[6*dqs_i+:6] <= #TCQ 'd0;
// else if (wr_level_done_r3 && ~wr_level_done_r4) begin
// fine_tap_inc[6*dqs_i+:6] <= #TCQ final_val[6*dqs_i+:6];
// //fine_tap_dec[6*dqs_i+:6] <= #TCQ 'd0;
// end
// end
// endgenerate
// Inc/Dec Phaser_Out stage 2 fine delay line
always @(posedge clk) begin
if (rst) begin
// Fine delay line used only during write leveling
dqs_po_stg2_f_incdec <= #TCQ 1'b0;
dqs_po_en_stg2_f <= #TCQ 1'b0;
// Dec Phaser_Out fine delay (1)before write leveling,
// (2)if no 0 to 1 transition detected with 63 fine delay taps, or
// (3)dual rank case where fine taps for the first rank need to be 0
end else if (po_cnt_dec || (wl_state_r == WL_INIT_FINE_DEC) ||
(wl_state_r == WL_FINE_DEC)) begin
dqs_po_stg2_f_incdec <= #TCQ 1'b0;
dqs_po_en_stg2_f <= #TCQ 1'b1;
// Inc Phaser_Out fine delay during write leveling
end else if ((wl_state_r == WL_INIT_FINE_INC) ||
(wl_state_r == WL_FINE_INC)) begin
dqs_po_stg2_f_incdec <= #TCQ 1'b1;
dqs_po_en_stg2_f <= #TCQ 1'b1;
end else begin
dqs_po_stg2_f_incdec <= #TCQ 1'b0;
dqs_po_en_stg2_f <= #TCQ 1'b0;
end
end
// Inc Phaser_Out stage 2 Coarse delay line
always @(posedge clk) begin
if (rst) begin
// Coarse delay line used during write leveling
// only if no 0->1 transition undetected with 64
// fine delay line taps
dqs_wl_po_stg2_c_incdec <= #TCQ 1'b0;
dqs_wl_po_en_stg2_c <= #TCQ 1'b0;
end else if (wl_state_r == WL_CORSE_INC) begin
// Inc Phaser_Out coarse delay during write leveling
dqs_wl_po_stg2_c_incdec <= #TCQ 1'b1;
dqs_wl_po_en_stg2_c <= #TCQ 1'b1;
end else begin
dqs_wl_po_stg2_c_incdec <= #TCQ 1'b0;
dqs_wl_po_en_stg2_c <= #TCQ 1'b0;
end
end
// only storing the rise data for checking. The data comming back during
// write leveling will be a static value. Just checking for rise data is
// enough.
genvar rd_i;
generate
for(rd_i = 0; rd_i < DQS_WIDTH; rd_i = rd_i +1)begin: gen_rd
always @(posedge clk)
rd_data_rise_wl_r[rd_i] <=
#TCQ |rd_data_rise0[(rd_i*DRAM_WIDTH)+DRAM_WIDTH-1:rd_i*DRAM_WIDTH];
end
endgenerate
// storing the previous data for checking later.
always @(posedge clk)begin
if ((wl_state_r == WL_INIT) || //(wl_state_r == WL_INIT_FINE_INC_WAIT) ||
//(wl_state_r == WL_INIT_FINE_INC_WAIT1) ||
((wl_state_r1 == WL_INIT_FINE_INC_WAIT) & (wl_state_r == WL_INIT_FINE_INC)) ||
(wl_state_r == WL_FINE_DEC) || (wl_state_r == WL_FINE_DEC_WAIT1) || (wl_state_r == WL_FINE_DEC_WAIT) ||
(wl_state_r == WL_CORSE_INC) || (wl_state_r == WL_CORSE_INC_WAIT) || (wl_state_r == WL_CORSE_INC_WAIT_TMP) ||
(wl_state_r == WL_CORSE_INC_WAIT1) || (wl_state_r == WL_CORSE_INC_WAIT2) ||
((wl_state_r == WL_EDGE_CHECK) & (wl_edge_detect_valid_r)))
rd_data_previous_r <= #TCQ rd_data_rise_wl_r;
end
// changed stable count from 3 to 7 because of fine tap resolution
always @(posedge clk)begin
if (rst | (wl_state_r == WL_DQS_CNT) |
(wl_state_r == WL_2RANK_TAP_DEC) |
(wl_state_r == WL_FINE_DEC) |
(rd_data_previous_r[dqs_count_r] != rd_data_rise_wl_r[dqs_count_r]) |
(wl_state_r1 == WL_INIT_FINE_DEC))
stable_cnt <= #TCQ 'd0;
else if ((wl_tap_count_r > 6'd0) &
(((wl_state_r == WL_EDGE_CHECK) & (wl_edge_detect_valid_r)) |
((wl_state_r1 == WL_INIT_FINE_INC_WAIT) & (wl_state_r == WL_INIT_FINE_INC)))) begin
if ((rd_data_previous_r[dqs_count_r] == rd_data_rise_wl_r[dqs_count_r])
& (stable_cnt < 'd14))
stable_cnt <= #TCQ stable_cnt + 1;
end
end
// Signal to ensure that flag_ck_negedge does not incorrectly assert
// when DQS is very close to CK rising edge
//always @(posedge clk) begin
// if (rst | (wl_state_r == WL_DQS_CNT) |
// (wl_state_r == WL_DQS_CHECK) | wr_level_done_r)
// past_negedge <= #TCQ 1'b0;
// else if (~flag_ck_negedge && ~rd_data_previous_r[dqs_count_r] &&
// (stable_cnt == 'd0) && ((wl_state_r == WL_CORSE_INC_WAIT1) |
// (wl_state_r == WL_CORSE_INC_WAIT2)))
// past_negedge <= #TCQ 1'b1;
//end
// Flag to indicate negedge of CK detected and ignore 0->1 transitions
// in this region
always @(posedge clk)begin
if (rst | (wl_state_r == WL_DQS_CNT) |
(wl_state_r == WL_DQS_CHECK) | wr_level_done_r |
(wl_state_r1 == WL_INIT_FINE_DEC))
flag_ck_negedge <= #TCQ 1'd0;
else if ((rd_data_previous_r[dqs_count_r] && ((stable_cnt > 'd0) |
(wl_state_r == WL_FINE_DEC) | (wl_state_r == WL_FINE_DEC_WAIT) | (wl_state_r == WL_FINE_DEC_WAIT1))) |
(wl_state_r == WL_CORSE_INC))
flag_ck_negedge <= #TCQ 1'd1;
else if (~rd_data_previous_r[dqs_count_r] && (stable_cnt == 'd14))
//&& flag_ck_negedge)
flag_ck_negedge <= #TCQ 1'd0;
end
// Flag to inhibit rd_data_edge_detect_r before stable DQ
always @(posedge clk) begin
if (rst)
flag_init <= #TCQ 1'b1;
else if ((wl_state_r == WL_WAIT) && ((wl_state_r1 == WL_INIT_FINE_INC_WAIT) ||
(wl_state_r1 == WL_INIT_FINE_DEC_WAIT)))
flag_init <= #TCQ 1'b0;
end
//checking for transition from 0 to 1
always @(posedge clk)begin
if (rst | flag_ck_negedge | flag_init | (wl_tap_count_r < 'd1) |
inhibit_edge_detect_r)
rd_data_edge_detect_r <= #TCQ {DQS_WIDTH{1'b0}};
else if (rd_data_edge_detect_r[dqs_count_r] == 1'b1) begin
if ((wl_state_r == WL_FINE_DEC) || (wl_state_r == WL_FINE_DEC_WAIT) || (wl_state_r == WL_FINE_DEC_WAIT1) ||
(wl_state_r == WL_CORSE_INC) || (wl_state_r == WL_CORSE_INC_WAIT) || (wl_state_r == WL_CORSE_INC_WAIT_TMP) ||
(wl_state_r == WL_CORSE_INC_WAIT1) || (wl_state_r == WL_CORSE_INC_WAIT2))
rd_data_edge_detect_r <= #TCQ {DQS_WIDTH{1'b0}};
else
rd_data_edge_detect_r <= #TCQ rd_data_edge_detect_r;
end else if (rd_data_previous_r[dqs_count_r] && (stable_cnt < 'd14))
rd_data_edge_detect_r <= #TCQ {DQS_WIDTH{1'b0}};
else
rd_data_edge_detect_r <= #TCQ (~rd_data_previous_r & rd_data_rise_wl_r);
end
// registring the write level start signal
always@(posedge clk) begin
wr_level_start_r <= #TCQ wr_level_start;
end
// Assign dqs_count_r to dqs_count_w to perform the shift operation
// instead of multiply operation
assign dqs_count_w = {2'b00, dqs_count_r};
assign oclk_count_w = {2'b00, oclkdelay_calib_cnt};
always @(posedge clk) begin
if (rst)
incdec_wait_cnt <= #TCQ 'd0;
else if ((wl_state_r == WL_FINE_DEC_WAIT1) ||
(wl_state_r == WL_INIT_FINE_DEC_WAIT1) ||
(wl_state_r == WL_CORSE_INC_WAIT_TMP))
incdec_wait_cnt <= #TCQ incdec_wait_cnt + 1;
else
incdec_wait_cnt <= #TCQ 'd0;
end
// state machine to initiate the write leveling sequence
// The state machine operates on one byte at a time.
// It will increment the delays to the DQS OSERDES
// and sample the DQ from the memory. When it detects
// a transition from 1 to 0 then the write leveling is considered
// done.
always @(posedge clk) begin
if(rst)begin
wrlvl_err <= #TCQ 1'b0;
wr_level_done_r <= #TCQ 1'b0;
wrlvl_rank_done_r <= #TCQ 1'b0;
dqs_count_r <= #TCQ {DQS_CNT_WIDTH+1{1'b0}};
dq_cnt_inc <= #TCQ 1'b1;
rank_cnt_r <= #TCQ 2'b00;
wl_state_r <= #TCQ WL_IDLE;
wl_state_r1 <= #TCQ WL_IDLE;
inhibit_edge_detect_r <= #TCQ 1'b1;
wl_edge_detect_valid_r <= #TCQ 1'b0;
wl_tap_count_r <= #TCQ 6'd0;
fine_dec_cnt <= #TCQ 6'd0;
for (r = 0; r < DQS_WIDTH; r = r + 1) begin
fine_inc[r] <= #TCQ 6'b0;
corse_dec[r] <= #TCQ 3'b0;
corse_inc[r] <= #TCQ 3'b0;
corse_cnt[r] <= #TCQ 3'b0;
end
dual_rnk_dec <= #TCQ 1'b0;
fast_cal_fine_cnt <= #TCQ FAST_CAL_FINE;
fast_cal_coarse_cnt <= #TCQ FAST_CAL_COARSE;
final_corse_dec <= #TCQ 1'b0;
//zero_tran_r <= #TCQ 1'b0;
wrlvl_redo_corse_inc <= #TCQ 'd0;
end else begin
wl_state_r1 <= #TCQ wl_state_r;
case (wl_state_r)
WL_IDLE: begin
wrlvl_rank_done_r <= #TCQ 1'd0;
inhibit_edge_detect_r <= #TCQ 1'b1;
if (wrlvl_byte_redo && ~wrlvl_byte_redo_r) begin
wr_level_done_r <= #TCQ 1'b0;
dqs_count_r <= #TCQ wrcal_cnt;
corse_cnt[wrcal_cnt] <= #TCQ final_coarse_tap[wrcal_cnt];
wl_tap_count_r <= #TCQ smallest[wrcal_cnt];
if (early1_data &&
(((final_coarse_tap[wrcal_cnt] < 'd6) && (CLK_PERIOD/nCK_PER_CLK <= 2500)) ||
((final_coarse_tap[wrcal_cnt] < 'd3) && (CLK_PERIOD/nCK_PER_CLK > 2500))))
wrlvl_redo_corse_inc <= #TCQ REDO_COARSE;
else if (early2_data && (final_coarse_tap[wrcal_cnt] < 'd2))
wrlvl_redo_corse_inc <= #TCQ 3'd6;
else begin
wl_state_r <= #TCQ WL_IDLE;
wrlvl_err <= #TCQ 1'b1;
end
end else if (wrlvl_final && ~wrlvl_final_r) begin
wr_level_done_r <= #TCQ 1'b0;
dqs_count_r <= #TCQ 'd0;
end
// verilint STARC-2.2.3.3 off
if(!wr_level_done_r & wr_level_start_r & wl_sm_start) begin
if (SIM_CAL_OPTION == "FAST_CAL")
wl_state_r <= #TCQ WL_FINE_INC;
else
wl_state_r <= #TCQ WL_INIT;
end
end
// verilint STARC-2.2.3.3 on
WL_INIT: begin
wl_edge_detect_valid_r <= #TCQ 1'b0;
inhibit_edge_detect_r <= #TCQ 1'b1;
wrlvl_rank_done_r <= #TCQ 1'd0;
//zero_tran_r <= #TCQ 1'b0;
if (wrlvl_final)
corse_cnt[dqs_count_w ] <= #TCQ final_coarse_tap[dqs_count_w ];
if (wrlvl_byte_redo) begin
if (|wl_tap_count_r) begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end else if ((corse_cnt[dqs_count_w] + wrlvl_redo_corse_inc) <= 'd7)
wl_state_r <= #TCQ WL_CORSE_INC;
else begin
wl_state_r <= #TCQ WL_IDLE;
wrlvl_err <= #TCQ 1'b1;
end
end else if(wl_sm_start)
wl_state_r <= #TCQ WL_INIT_FINE_INC;
end
// Initially Phaser_Out fine delay taps incremented
// until stable_cnt=14. A stable_cnt of 14 indicates
// that rd_data_rise_wl_r=rd_data_previous_r for 14 fine
// tap increments. This is done to inhibit false 0->1
// edge detection when DQS is initially aligned to the
// negedge of CK
WL_INIT_FINE_INC: begin
wl_state_r <= #TCQ WL_INIT_FINE_INC_WAIT1;
wl_tap_count_r <= #TCQ wl_tap_count_r + 1'b1;
final_corse_dec <= #TCQ 1'b0;
end
WL_INIT_FINE_INC_WAIT1: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_INIT_FINE_INC_WAIT;
end
// Case1: stable value of rd_data_previous_r=0 then
// proceed to 0->1 edge detection.
// Case2: stable value of rd_data_previous_r=1 then
// decrement fine taps to '0' and proceed to 0->1
// edge detection. Need to decrement in this case to
// make sure a valid 0->1 transition was not left
// undetected.
WL_INIT_FINE_INC_WAIT: begin
if (wl_sm_start) begin
if (stable_cnt < 'd14)
wl_state_r <= #TCQ WL_INIT_FINE_INC;
else if (~rd_data_previous_r[dqs_count_r]) begin
wl_state_r <= #TCQ WL_WAIT;
inhibit_edge_detect_r <= #TCQ 1'b0;
end else begin
wl_state_r <= #TCQ WL_INIT_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end
end
end
// Case2: stable value of rd_data_previous_r=1 then
// decrement fine taps to '0' and proceed to 0->1
// edge detection. Need to decrement in this case to
// make sure a valid 0->1 transition was not left
// undetected.
WL_INIT_FINE_DEC: begin
wl_tap_count_r <= #TCQ 'd0;
wl_state_r <= #TCQ WL_INIT_FINE_DEC_WAIT1;
if (fine_dec_cnt > 6'd0)
fine_dec_cnt <= #TCQ fine_dec_cnt - 1;
else
fine_dec_cnt <= #TCQ fine_dec_cnt;
end
WL_INIT_FINE_DEC_WAIT1: begin
if (incdec_wait_cnt == 'd8)
wl_state_r <= #TCQ WL_INIT_FINE_DEC_WAIT;
end
WL_INIT_FINE_DEC_WAIT: begin
if (fine_dec_cnt > 6'd0) begin
wl_state_r <= #TCQ WL_INIT_FINE_DEC;
inhibit_edge_detect_r <= #TCQ 1'b1;
end else begin
wl_state_r <= #TCQ WL_WAIT;
inhibit_edge_detect_r <= #TCQ 1'b0;
end
end
// Inc DQS Phaser_Out Stage2 Fine Delay line
WL_FINE_INC: begin
wl_edge_detect_valid_r <= #TCQ 1'b0;
if (SIM_CAL_OPTION == "FAST_CAL") begin
wl_state_r <= #TCQ WL_FINE_INC_WAIT;
if (fast_cal_fine_cnt > 'd0)
fast_cal_fine_cnt <= #TCQ fast_cal_fine_cnt - 1;
else
fast_cal_fine_cnt <= #TCQ fast_cal_fine_cnt;
end else if (wr_level_done_r5) begin
wl_tap_count_r <= #TCQ 'd0;
wl_state_r <= #TCQ WL_FINE_INC_WAIT;
if (|fine_inc[dqs_count_w])
fine_inc[dqs_count_w] <= #TCQ fine_inc[dqs_count_w] - 1;
end else begin
wl_state_r <= #TCQ WL_WAIT;
wl_tap_count_r <= #TCQ wl_tap_count_r + 1'b1;
end
end
WL_FINE_INC_WAIT: begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
if (fast_cal_fine_cnt > 'd0)
wl_state_r <= #TCQ WL_FINE_INC;
else if (fast_cal_coarse_cnt > 'd0)
wl_state_r <= #TCQ WL_CORSE_INC;
else
wl_state_r <= #TCQ WL_DQS_CNT;
end else if (|fine_inc[dqs_count_w])
wl_state_r <= #TCQ WL_FINE_INC;
else if (dqs_count_r == (DQS_WIDTH-1))
wl_state_r <= #TCQ WL_IDLE;
else begin
wl_state_r <= #TCQ WL_2RANK_FINAL_TAP;
dqs_count_r <= #TCQ dqs_count_r + 1;
end
end
WL_FINE_DEC: begin
wl_edge_detect_valid_r <= #TCQ 1'b0;
wl_tap_count_r <= #TCQ 'd0;
wl_state_r <= #TCQ WL_FINE_DEC_WAIT1;
if (fine_dec_cnt > 6'd0)
fine_dec_cnt <= #TCQ fine_dec_cnt - 1;
else
fine_dec_cnt <= #TCQ fine_dec_cnt;
end
WL_FINE_DEC_WAIT1: begin
if (incdec_wait_cnt == 'd8)
wl_state_r <= #TCQ WL_FINE_DEC_WAIT;
end
WL_FINE_DEC_WAIT: begin
if (fine_dec_cnt > 6'd0)
wl_state_r <= #TCQ WL_FINE_DEC;
//else if (zero_tran_r)
// wl_state_r <= #TCQ WL_DQS_CNT;
else if (dual_rnk_dec) begin
if (|corse_dec[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_DEC;
else
wl_state_r <= #TCQ WL_2RANK_DQS_CNT;
end else if (wrlvl_byte_redo) begin
if ((corse_cnt[dqs_count_w] + wrlvl_redo_corse_inc) <= 'd7)
wl_state_r <= #TCQ WL_CORSE_INC;
else begin
wl_state_r <= #TCQ WL_IDLE;
wrlvl_err <= #TCQ 1'b1;
end
end else
wl_state_r <= #TCQ WL_CORSE_INC;
end
WL_CORSE_DEC: begin
wl_state_r <= #TCQ WL_CORSE_DEC_WAIT;
dual_rnk_dec <= #TCQ 1'b0;
if (|corse_dec[dqs_count_r])
corse_dec[dqs_count_r] <= #TCQ corse_dec[dqs_count_r] - 1;
else
corse_dec[dqs_count_r] <= #TCQ corse_dec[dqs_count_r];
end
WL_CORSE_DEC_WAIT: begin
if (wl_sm_start) begin
//if (|corse_dec[dqs_count_r])
// wl_state_r <= #TCQ WL_CORSE_DEC;
if (|corse_dec[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_DEC_WAIT1;
else
wl_state_r <= #TCQ WL_2RANK_DQS_CNT;
end
end
WL_CORSE_DEC_WAIT1: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_CORSE_DEC;
end
WL_CORSE_INC: begin
wl_state_r <= #TCQ WL_CORSE_INC_WAIT_TMP;
if (SIM_CAL_OPTION == "FAST_CAL") begin
if (fast_cal_coarse_cnt > 'd0)
fast_cal_coarse_cnt <= #TCQ fast_cal_coarse_cnt - 1;
else
fast_cal_coarse_cnt <= #TCQ fast_cal_coarse_cnt;
end else if (wrlvl_byte_redo) begin
corse_cnt[dqs_count_w] <= #TCQ corse_cnt[dqs_count_w] + 1;
if (|wrlvl_redo_corse_inc)
wrlvl_redo_corse_inc <= #TCQ wrlvl_redo_corse_inc - 1;
end else if (~wr_level_done_r5)
corse_cnt[dqs_count_r] <= #TCQ corse_cnt[dqs_count_r] + 1;
else if (|corse_inc[dqs_count_w])
corse_inc[dqs_count_w] <= #TCQ corse_inc[dqs_count_w] - 1;
end
WL_CORSE_INC_WAIT_TMP: begin
if (incdec_wait_cnt == 'd8)
wl_state_r <= #TCQ WL_CORSE_INC_WAIT;
end
WL_CORSE_INC_WAIT: begin
if (SIM_CAL_OPTION == "FAST_CAL") begin
if (fast_cal_coarse_cnt > 'd0)
wl_state_r <= #TCQ WL_CORSE_INC;
else
wl_state_r <= #TCQ WL_DQS_CNT;
end else if (wrlvl_byte_redo) begin
if (|wrlvl_redo_corse_inc)
wl_state_r <= #TCQ WL_CORSE_INC;
else begin
wl_state_r <= #TCQ WL_INIT_FINE_INC;
inhibit_edge_detect_r <= #TCQ 1'b1;
end
end else if (~wr_level_done_r5 && wl_sm_start)
wl_state_r <= #TCQ WL_CORSE_INC_WAIT1;
else if (wr_level_done_r5) begin
if (|corse_inc[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_INC;
else if (|fine_inc[dqs_count_w])
wl_state_r <= #TCQ WL_FINE_INC;
else if (dqs_count_r == (DQS_WIDTH-1))
wl_state_r <= #TCQ WL_IDLE;
else begin
wl_state_r <= #TCQ WL_2RANK_FINAL_TAP;
dqs_count_r <= #TCQ dqs_count_r + 1;
end
end
end
WL_CORSE_INC_WAIT1: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_CORSE_INC_WAIT2;
end
WL_CORSE_INC_WAIT2: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_WAIT;
end
WL_WAIT: begin
if (wl_sm_start)
wl_state_r <= #TCQ WL_EDGE_CHECK;
end
WL_EDGE_CHECK: begin // Look for the edge
if (wl_edge_detect_valid_r == 1'b0) begin
wl_state_r <= #TCQ WL_WAIT;
wl_edge_detect_valid_r <= #TCQ 1'b1;
end
// 0->1 transition detected with DQS
else if(rd_data_edge_detect_r[dqs_count_r] &&
wl_edge_detect_valid_r)
begin
wl_tap_count_r <= #TCQ wl_tap_count_r;
if ((SIM_CAL_OPTION == "FAST_CAL") || (RANKS < 2) ||
~oclkdelay_calib_done)
wl_state_r <= #TCQ WL_DQS_CNT;
else
wl_state_r <= #TCQ WL_2RANK_TAP_DEC;
end
// For initial writes check only upto 56 taps. Reserving the
// remaining taps for OCLK calibration.
else if((~wrlvl_tap_done_r) && (wl_tap_count_r > 6'd55)) begin
if (corse_cnt[dqs_count_r] < COARSE_TAPS) begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end else begin
wrlvl_err <= #TCQ 1'b1;
wl_state_r <= #TCQ WL_IDLE;
end
end else begin
if (wl_tap_count_r < 6'd56) //for reuse wrlvl for complex ocal
wl_state_r <= #TCQ WL_FINE_INC;
else if (corse_cnt[dqs_count_r] < COARSE_TAPS) begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
end else begin
wrlvl_err <= #TCQ 1'b1;
wl_state_r <= #TCQ WL_IDLE;
end
end
end
WL_2RANK_TAP_DEC: begin
wl_state_r <= #TCQ WL_FINE_DEC;
fine_dec_cnt <= #TCQ wl_tap_count_r;
for (m = 0; m < DQS_WIDTH; m = m + 1)
corse_dec[m] <= #TCQ corse_cnt[m];
wl_edge_detect_valid_r <= #TCQ 1'b0;
dual_rnk_dec <= #TCQ 1'b1;
end
WL_DQS_CNT: begin
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(dqs_count_r == (DQS_WIDTH-1)) ||
wrlvl_byte_redo) begin
dqs_count_r <= #TCQ dqs_count_r;
dq_cnt_inc <= #TCQ 1'b0;
end else begin
dqs_count_r <= #TCQ dqs_count_r + 1'b1;
dq_cnt_inc <= #TCQ 1'b1;
end
wl_state_r <= #TCQ WL_DQS_CHECK;
wl_edge_detect_valid_r <= #TCQ 1'b0;
end
WL_2RANK_DQS_CNT: begin
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(dqs_count_r == (DQS_WIDTH-1))) begin
dqs_count_r <= #TCQ dqs_count_r;
dq_cnt_inc <= #TCQ 1'b0;
end else begin
dqs_count_r <= #TCQ dqs_count_r + 1'b1;
dq_cnt_inc <= #TCQ 1'b1;
end
wl_state_r <= #TCQ WL_DQS_CHECK;
wl_edge_detect_valid_r <= #TCQ 1'b0;
dual_rnk_dec <= #TCQ 1'b0;
end
WL_DQS_CHECK: begin // check if all DQS have been calibrated
wl_tap_count_r <= #TCQ 'd0;
if (dq_cnt_inc == 1'b0)begin
wrlvl_rank_done_r <= #TCQ 1'd1;
for (t = 0; t < DQS_WIDTH; t = t + 1)
corse_cnt[t] <= #TCQ 3'b0;
if ((SIM_CAL_OPTION == "FAST_CAL") || (RANKS < 2) || ~oclkdelay_calib_done) begin
wl_state_r <= #TCQ WL_IDLE;
if (wrlvl_byte_redo)
dqs_count_r <= #TCQ dqs_count_r;
else
dqs_count_r <= #TCQ 'd0;
end else if (rank_cnt_r == RANKS-1) begin
dqs_count_r <= #TCQ dqs_count_r;
if (RANKS > 1)
wl_state_r <= #TCQ WL_2RANK_FINAL_TAP;
else
wl_state_r <= #TCQ WL_IDLE;
end else begin
wl_state_r <= #TCQ WL_INIT;
dqs_count_r <= #TCQ 'd0;
end
if ((SIM_CAL_OPTION == "FAST_CAL") ||
(rank_cnt_r == RANKS-1)) begin
wr_level_done_r <= #TCQ 1'd1;
rank_cnt_r <= #TCQ 2'b00;
end else begin
wr_level_done_r <= #TCQ 1'd0;
rank_cnt_r <= #TCQ rank_cnt_r + 1'b1;
end
end else
wl_state_r <= #TCQ WL_INIT;
end
WL_2RANK_FINAL_TAP: begin
if (wr_level_done_r4 && ~wr_level_done_r5) begin
for(u = 0; u < DQS_WIDTH; u = u + 1) begin
corse_inc[u] <= #TCQ final_coarse_tap[u];
fine_inc[u] <= #TCQ final_val[u];
end
dqs_count_r <= #TCQ 'd0;
end else if (wr_level_done_r5) begin
if (|corse_inc[dqs_count_r])
wl_state_r <= #TCQ WL_CORSE_INC;
else if (|fine_inc[dqs_count_w])
wl_state_r <= #TCQ WL_FINE_INC;
end
end
endcase
end
end // always @ (posedge clk)
endmodule
|
module mig_7series_v2_3_ddr_phy_ocd_po_cntlr #
(parameter DQS_CNT_WIDTH = 3,
parameter DQS_WIDTH = 8,
parameter nCK_PER_CLK = 4,
parameter TCQ = 100)
(/*AUTOARG*/
// Outputs
scan_done, ocal_num_samples_done_r, oclkdelay_center_calib_start,
oclkdelay_center_calib_done, oclk_center_write_resume, ocd2stg2_inc,
ocd2stg2_dec, ocd2stg3_inc, ocd2stg3_dec, stg3, simp_stg3_final,
cmplx_stg3_final, simp_stg3_final_sel, ninety_offsets,
scanning_right, ocd_ktap_left, ocd_ktap_right, ocd_edge_detect_rdy,
taps_set, use_noise_window, ocal_scan_win_not_found,
// Inputs
clk, rst, reset_scan, oclkdelay_init_val, lim2ocal_stg3_right_lim,
lim2ocal_stg3_left_lim, complex_oclkdelay_calib_start,
po_counter_read_val, oclkdelay_calib_cnt, mmcm_edge_detect_done,
mmcm_lbclk_edge_aligned, poc_backup, phy_rddata_en_3, zero2fuzz,
fuzz2zero, oneeighty2fuzz, fuzz2oneeighty, z2f, f2z, o2f, f2o,
scan_right, samp_done, wl_po_fine_cnt_sel, po_rdy
);
input clk;
input rst;
input reset_scan;
reg scan_done_r;
output scan_done;
assign scan_done = scan_done_r;
output [5:0] simp_stg3_final_sel;
reg cmplx_samples_done_ns, cmplx_samples_done_r;
always @(posedge clk) cmplx_samples_done_r <= #TCQ cmplx_samples_done_ns;
output ocal_num_samples_done_r;
assign ocal_num_samples_done_r = cmplx_samples_done_r;
// Write Level signals during OCLKDELAY calibration
input [5:0] oclkdelay_init_val;
input [5:0] lim2ocal_stg3_right_lim;
input [5:0] lim2ocal_stg3_left_lim;
input complex_oclkdelay_calib_start;
reg oclkdelay_center_calib_start_ns, oclkdelay_center_calib_start_r;
always @(posedge clk) oclkdelay_center_calib_start_r <= #TCQ oclkdelay_center_calib_start_ns;
output oclkdelay_center_calib_start;
assign oclkdelay_center_calib_start = oclkdelay_center_calib_start_r;
reg oclkdelay_center_calib_done_ns, oclkdelay_center_calib_done_r;
always @(posedge clk) oclkdelay_center_calib_done_r <= #TCQ oclkdelay_center_calib_done_ns;
output oclkdelay_center_calib_done;
assign oclkdelay_center_calib_done = oclkdelay_center_calib_done_r;
reg oclk_center_write_resume_ns, oclk_center_write_resume_r;
always @(posedge clk) oclk_center_write_resume_r <= #TCQ oclk_center_write_resume_ns;
output oclk_center_write_resume;
assign oclk_center_write_resume = oclk_center_write_resume_r;
reg ocd2stg2_inc_r, ocd2stg2_dec_r, ocd2stg3_inc_r, ocd2stg3_dec_r;
output ocd2stg2_inc, ocd2stg2_dec, ocd2stg3_inc, ocd2stg3_dec;
assign ocd2stg2_inc = ocd2stg2_inc_r;
assign ocd2stg2_dec = ocd2stg2_dec_r;
assign ocd2stg3_inc = ocd2stg3_inc_r;
assign ocd2stg3_dec = ocd2stg3_dec_r;
// Remember, two stage 2 steps for every stg 3 step. And we need a sign bit.
reg [8:0] stg2_ns, stg2_r;
always @(posedge clk) stg2_r <= #TCQ stg2_ns;
reg [5:0] stg3_ns, stg3_r;
always @(posedge clk) stg3_r <= #TCQ stg3_ns;
output [5:0] stg3;
assign stg3 = stg3_r;
input [5:0] wl_po_fine_cnt_sel;
input [8:0] po_counter_read_val;
reg [5:0] po_counter_read_val_r;
always @(posedge clk) po_counter_read_val_r <= #TCQ po_counter_read_val[5:0];
reg [DQS_WIDTH*6-1:0] simp_stg3_final_ns, simp_stg3_final_r, cmplx_stg3_final_ns, cmplx_stg3_final_r;
always @(posedge clk) simp_stg3_final_r <= #TCQ simp_stg3_final_ns;
always @(posedge clk) cmplx_stg3_final_r <= #TCQ cmplx_stg3_final_ns;
output [DQS_WIDTH*6-1:0] simp_stg3_final, cmplx_stg3_final;
assign simp_stg3_final = simp_stg3_final_r;
assign cmplx_stg3_final = cmplx_stg3_final_r;
input [DQS_CNT_WIDTH:0] oclkdelay_calib_cnt;
wire [DQS_WIDTH*6-1:0] simp_stg3_final_shft = simp_stg3_final_r >> oclkdelay_calib_cnt * 6;
assign simp_stg3_final_sel = simp_stg3_final_shft[5:0];
wire [5:0] stg3_init = complex_oclkdelay_calib_start ? simp_stg3_final_sel : oclkdelay_init_val;
wire signed [8:0] stg2_steps = stg3_r > stg3_init
? -9'sd2 * $signed({3'b0, (stg3_r - stg3_init)})
: 9'sd2 * $signed({3'b0, (stg3_init - stg3_r)});
wire signed [8:0] stg2_target_ns = $signed({3'b0, wl_po_fine_cnt_sel}) + stg2_steps;
reg signed [8:0] stg2_target_r;
always @ (posedge clk) stg2_target_r <= #TCQ stg2_target_ns;
reg [5:0] stg2_final_ns, stg2_final_r;
always @(posedge clk) stg2_final_r <= #TCQ stg2_final_ns;
always @(*) stg2_final_ns = stg2_target_r[8] == 1'b1
? 6'd0
: stg2_target_r > 9'd63
? 6'd63
: stg2_target_r[5:0];
wire final_stg2_inc = stg2_final_r > po_counter_read_val_r;
wire final_stg2_dec = stg2_final_r < po_counter_read_val_r;
wire left_lim = stg3_r == lim2ocal_stg3_left_lim;
wire right_lim = stg3_r == lim2ocal_stg3_right_lim;
reg [1:0] ninety_offsets_ns, ninety_offsets_r;
always @(posedge clk) ninety_offsets_r <= #TCQ ninety_offsets_ns;
output [1:0] ninety_offsets;
assign ninety_offsets = ninety_offsets_r;
reg scanning_right_ns, scanning_right_r;
always @(posedge clk) scanning_right_r <= #TCQ scanning_right_ns;
output scanning_right;
assign scanning_right = scanning_right_r;
reg ocd_ktap_left_ns, ocd_ktap_left_r, ocd_ktap_right_ns, ocd_ktap_right_r;
always @(posedge clk) ocd_ktap_left_r <= #TCQ ocd_ktap_left_ns;
always @(posedge clk) ocd_ktap_right_r <= #TCQ ocd_ktap_right_ns;
output ocd_ktap_left, ocd_ktap_right;
assign ocd_ktap_left = ocd_ktap_left_r;
assign ocd_ktap_right = ocd_ktap_right_r;
reg ocd_edge_detect_rdy_ns, ocd_edge_detect_rdy_r;
always @(posedge clk) ocd_edge_detect_rdy_r <= #TCQ ocd_edge_detect_rdy_ns;
output ocd_edge_detect_rdy;
assign ocd_edge_detect_rdy = ocd_edge_detect_rdy_r;
input mmcm_edge_detect_done;
input mmcm_lbclk_edge_aligned;
input poc_backup;
reg poc_backup_ns, poc_backup_r;
always @(posedge clk) poc_backup_r <= #TCQ poc_backup_ns;
reg taps_set_r;
output taps_set;
assign taps_set = taps_set_r;
input phy_rddata_en_3;
input [5:0] zero2fuzz, fuzz2zero, oneeighty2fuzz, fuzz2oneeighty;
input z2f, f2z, o2f, f2o;
wire zero = f2z && z2f;
wire noise = z2f && f2o;
wire oneeighty = f2o && o2f;
reg win_not_found;
reg [1:0] ninety_offsets_final;
reg [5:0] left, right, current_edge;
always @(*) begin
left = lim2ocal_stg3_left_lim;
right = lim2ocal_stg3_right_lim;
ninety_offsets_final = 2'd0;
win_not_found = 1'b0;
if (zero) begin
left = fuzz2zero;
right = zero2fuzz;
end
else if (noise) begin
left = zero2fuzz;
right = fuzz2oneeighty;
ninety_offsets_final = 2'd1;
end
else if (oneeighty) begin
left = fuzz2oneeighty;
right = oneeighty2fuzz;
ninety_offsets_final = 2'd2;
end
else if (z2f) begin
right = zero2fuzz;
end
else if (f2o) begin
left = fuzz2oneeighty;
ninety_offsets_final = 2'd2;
end
else if (f2z) begin
left = fuzz2zero;
end
else win_not_found = 1'b1;
current_edge = ocd_ktap_left_r ? left : right;
end // always @ begin
output use_noise_window;
assign use_noise_window = ninety_offsets == 2'd1;
reg ocal_scan_win_not_found_ns, ocal_scan_win_not_found_r;
always @(posedge clk) ocal_scan_win_not_found_r <= #TCQ ocal_scan_win_not_found_ns;
output ocal_scan_win_not_found;
assign ocal_scan_win_not_found = ocal_scan_win_not_found_r;
wire inc_po_ns = current_edge > stg3_r;
wire dec_po_ns = current_edge < stg3_r;
reg inc_po_r, dec_po_r;
always @(posedge clk) inc_po_r <= #TCQ inc_po_ns;
always @(posedge clk) dec_po_r <= #TCQ dec_po_ns;
input scan_right;
wire left_stop = left_lim || scan_right;
wire right_stop = right_lim || o2f;
reg [4:0] resume_wait_ns, resume_wait_r;
always @(posedge clk) resume_wait_r <= #TCQ resume_wait_ns;
wire resume_wait = |resume_wait_r;
reg po_done_ns, po_done_r;
always @(posedge clk) po_done_r <= #TCQ po_done_ns;
input samp_done;
input po_rdy;
reg up_ns, up_r;
always @(posedge clk) up_r <= #TCQ up_ns;
reg [1:0] two_ns, two_r;
always @(posedge clk) two_r <= #TCQ two_ns;
/* wire stg2_zero = ~|stg2_r;
wire [8:0] stg2_2_zero = stg2_r[8] ? 9'd0
: stg2_r > 9'd63
? 9'd63
: stg2_r; */
reg [3:0] sm_ns, sm_r;
always @(posedge clk) sm_r <= #TCQ sm_ns;
(* dont_touch = "true" *) reg phy_rddata_en_3_second_ns, phy_rddata_en_3_second_r;
always @(posedge clk) phy_rddata_en_3_second_r <= #TCQ phy_rddata_en_3_second_ns;
always @(*) phy_rddata_en_3_second_ns = ~reset_scan && (phy_rddata_en_3
? ~phy_rddata_en_3_second_r
: phy_rddata_en_3_second_r);
(* dont_touch = "true" *) wire use_samp_done = nCK_PER_CLK == 2 ? phy_rddata_en_3 && phy_rddata_en_3_second_r : phy_rddata_en_3;
reg po_center_wait;
reg po_slew;
reg po_finish_scan;
always @(*) begin
// Default next state assignments.
cmplx_samples_done_ns = cmplx_samples_done_r;
cmplx_stg3_final_ns = cmplx_stg3_final_r;
scanning_right_ns = scanning_right_r;
ninety_offsets_ns = ninety_offsets_r;
ocal_scan_win_not_found_ns = ocal_scan_win_not_found_r;
ocd_edge_detect_rdy_ns = ocd_edge_detect_rdy_r;
ocd_ktap_left_ns = ocd_ktap_left_r;
ocd_ktap_right_ns = ocd_ktap_right_r;
ocd2stg2_inc_r = 1'b0;
ocd2stg2_dec_r = 1'b0;
ocd2stg3_inc_r = 1'b0;
ocd2stg3_dec_r = 1'b0;
oclkdelay_center_calib_start_ns = oclkdelay_center_calib_start_r;
oclkdelay_center_calib_done_ns = 1'b0;
oclk_center_write_resume_ns = oclk_center_write_resume_r;
po_center_wait = 1'b0;
po_done_ns = po_done_r;
po_finish_scan = 1'b0;
po_slew = 1'b0;
poc_backup_ns = poc_backup_r;
scan_done_r = 1'b0;
simp_stg3_final_ns = simp_stg3_final_r;
sm_ns = sm_r;
taps_set_r = 1'b0;
up_ns = up_r;
stg2_ns = stg2_r;
stg3_ns = stg3_r;
two_ns = two_r;
resume_wait_ns = resume_wait_r;
if (rst == 1'b1) begin
// RESET next states
cmplx_samples_done_ns = 1'b0;
ocal_scan_win_not_found_ns = 1'b0;
ocd_ktap_left_ns = 1'b0;
ocd_ktap_right_ns = 1'b0;
ocd_edge_detect_rdy_ns = 1'b0;
oclk_center_write_resume_ns = 1'b0;
oclkdelay_center_calib_start_ns = 1'b0;
po_done_ns = 1'b1;
resume_wait_ns = 5'd0;
sm_ns = /*AK("READY")*/4'd0;
end else
// State based actions and next states.
case (sm_r)
/*AL("READY")*/4'd0:begin
poc_backup_ns = 1'b0;
stg2_ns = {3'b0, wl_po_fine_cnt_sel};
stg3_ns = stg3_init;
scanning_right_ns = 1'b0;
if (complex_oclkdelay_calib_start) cmplx_samples_done_ns = 1'b1;
if (!reset_scan && ~resume_wait) begin
cmplx_samples_done_ns = 1'b0;
ocal_scan_win_not_found_ns = 1'b0;
taps_set_r = 1'b1;
sm_ns = /*AK("SAMPLING")*/4'd1;
end
end
/*AL("SAMPLING")*/4'd1:begin
if (samp_done && use_samp_done) begin
if (complex_oclkdelay_calib_start) cmplx_samples_done_ns = 1'b1;
scanning_right_ns = scanning_right_r || left_stop;
if (right_stop && scanning_right_r) begin
oclkdelay_center_calib_start_ns = 1'b1;
ocd_ktap_left_ns = 1'b1;
ocal_scan_win_not_found_ns = win_not_found;
sm_ns = /*AK("SLEW_PO")*/4'd3;
end else begin
if (scanning_right_ns) ocd2stg3_inc_r = 1'b1;
else ocd2stg3_dec_r = 1'b1;
sm_ns = /*AK("PO_WAIT")*/4'd2;
end
end
end
/*AL("PO_WAIT")*/4'd2:begin
if (po_done_r && ~resume_wait) begin
taps_set_r = 1'b1;
sm_ns = /*AK("SAMPLING")*/4'd1;
cmplx_samples_done_ns = 1'b0;
end
end
/*AL("SLEW_PO")*/4'd3:begin
po_slew = 1'b1;
ninety_offsets_ns = |ninety_offsets_final ? 2'b01 : 2'b00;
if (~resume_wait) begin
if (po_done_r) begin
if (inc_po_r) ocd2stg3_inc_r = 1'b1;
else if (dec_po_r) ocd2stg3_dec_r = 1'b1;
else if (~resume_wait) begin
cmplx_samples_done_ns = 1'b0;
sm_ns = /*AK("ALIGN_EDGES")*/4'd4;
oclk_center_write_resume_ns = 1'b1;
end
end // if (po_done)
end
end // case: 3'd3
/*AL("ALIGN_EDGES")*/4'd4:
if (~resume_wait) begin
if (mmcm_edge_detect_done) begin
ocd_edge_detect_rdy_ns = 1'b0;
if (ocd_ktap_left_r) begin
ocd_ktap_left_ns = 1'b0;
ocd_ktap_right_ns = 1'b1;
oclk_center_write_resume_ns = 1'b0;
sm_ns = /*AK("SLEW_PO")*/4'd3;
end else if (ocd_ktap_right_r) begin
ocd_ktap_right_ns = 1'b0;
sm_ns = /*AK("WAIT_ONE")*/4'd5;
end else if (~mmcm_lbclk_edge_aligned) begin
sm_ns = /*AK("DQS_STOP_WAIT")*/4'd6;
oclk_center_write_resume_ns = 1'b0;
end else begin
if (ninety_offsets_r != ninety_offsets_final && ocd_edge_detect_rdy_r) begin
ninety_offsets_ns = ninety_offsets_r + 2'b01;
sm_ns = /*AK("WAIT_ONE")*/4'd5;
end else begin
oclk_center_write_resume_ns = 1'b0;
poc_backup_ns = poc_backup;
// stg2_ns = stg2_2_zero;
sm_ns = /*AK("FINISH_SCAN")*/4'd8;
end
end // else: !if(~mmcm_lbclk_edge_aligned)
end else ocd_edge_detect_rdy_ns = 1'b1;
end // if (~resume_wait)
/*AL("WAIT_ONE")*/4'd5:
sm_ns = /*AK("ALIGN_EDGES")*/4'd4;
/*AL("DQS_STOP_WAIT")*/4'd6:
if (~resume_wait) begin
ocd2stg3_dec_r = 1'b1;
sm_ns = /*AK("CENTER_PO_WAIT")*/4'd7;
end
/*AL("CENTER_PO_WAIT")*/4'd7: begin
po_center_wait = 1'b1; // Kludge to get around limitation of the AUTOs symbols.
if (po_done_r) begin
sm_ns = /*AK("ALIGN_EDGES")*/4'd4;
oclk_center_write_resume_ns = 1'b1;
end
end
/*AL("FINISH_SCAN")*/4'd8: begin
po_finish_scan = 1'b1;
if (resume_wait_r == 5'd1) begin
if (~poc_backup_r) begin
oclkdelay_center_calib_done_ns = 1'b1;
oclkdelay_center_calib_start_ns = 1'b0;
end
end
if (~resume_wait) begin
if (po_rdy)
if (poc_backup_r) begin
ocd2stg3_inc_r = 1'b1;
poc_backup_ns = 1'b0;
end
else if (~final_stg2_inc && ~final_stg2_dec) begin
if (complex_oclkdelay_calib_start) cmplx_stg3_final_ns[oclkdelay_calib_cnt*6+:6] = stg3_r;
else simp_stg3_final_ns[oclkdelay_calib_cnt*6+:6] = stg3_r;
sm_ns = /*AK("READY")*/4'd0;
scan_done_r = 1'b1;
end else begin
ocd2stg2_inc_r = final_stg2_inc;
ocd2stg2_dec_r = final_stg2_dec;
end
end // if (~resume_wait)
end // case: 4'd8
endcase // case (sm_r)
if (ocd2stg3_inc_r) begin
stg3_ns = stg3_r + 6'h1;
up_ns = 1'b0;
end
if (ocd2stg3_dec_r) begin
stg3_ns = stg3_r - 6'h1;
up_ns = 1'b1;
end
if (ocd2stg3_inc_r || ocd2stg3_dec_r) begin
po_done_ns = 1'b0;
two_ns = 2'b00;
end
if (~po_done_r)
if (po_rdy)
if (two_r == 2'b10 || po_center_wait || po_slew || po_finish_scan) po_done_ns = 1'b1;
else begin
two_ns = two_r + 2'b1;
if (up_r) begin
stg2_ns = stg2_r + 9'b1;
if (stg2_r >= 9'd0 && stg2_r < 9'd63) ocd2stg2_inc_r = 1'b1;
end else begin
stg2_ns = stg2_r - 9'b1;
if (stg2_r > 9'd0 && stg2_r <= 9'd63) ocd2stg2_dec_r = 1'b1;
end
end // else: !if(two_r == 2'b10)
if (ocd_ktap_left_ns && ~ocd_ktap_left_r) resume_wait_ns = 5'b1;
else if (oclk_center_write_resume_ns ^ oclk_center_write_resume_r) resume_wait_ns = 5'd15;
else if (cmplx_samples_done_ns & ~cmplx_samples_done_r ||
complex_oclkdelay_calib_start & reset_scan ||
poc_backup_r & ocd2stg3_inc_r) resume_wait_ns = 5'd31;
else if (|resume_wait_r) resume_wait_ns = resume_wait_r - 5'd1;
end // always @ begin
endmodule
|
module axi_crossbar_v2_1_crossbar #
(
parameter C_FAMILY = "none",
parameter integer C_NUM_SLAVE_SLOTS = 1,
parameter integer C_NUM_MASTER_SLOTS = 1,
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_AXI_ID_WIDTH = 1,
parameter integer C_AXI_ADDR_WIDTH = 32,
parameter integer C_AXI_DATA_WIDTH = 32,
parameter integer C_AXI_PROTOCOL = 0,
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_BASE_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_HIGH_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_BASE_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_HIGH_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_THREAD_ID_WIDTH = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter integer C_AXI_SUPPORTS_USER_SIGNALS = 0,
parameter integer C_AXI_AWUSER_WIDTH = 1,
parameter integer C_AXI_ARUSER_WIDTH = 1,
parameter integer C_AXI_WUSER_WIDTH = 1,
parameter integer C_AXI_RUSER_WIDTH = 1,
parameter integer C_AXI_BUSER_WIDTH = 1,
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_WRITE = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_READ = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_WRITE = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_READ = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_WRITE_CONNECTIVITY = {C_NUM_MASTER_SLOTS*32{1'b1}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_READ_CONNECTIVITY = {C_NUM_MASTER_SLOTS*32{1'b1}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_SINGLE_THREAD = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_WRITE_ACCEPTANCE = {C_NUM_SLAVE_SLOTS{32'h00000001}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_READ_ACCEPTANCE = {C_NUM_SLAVE_SLOTS{32'h00000001}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_WRITE_ISSUING = {C_NUM_MASTER_SLOTS{32'h00000001}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_READ_ISSUING = {C_NUM_MASTER_SLOTS{32'h00000001}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_ARB_PRIORITY = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_SECURE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_ERR_MODE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE = 0,
parameter [(C_NUM_MASTER_SLOTS+1)*32-1:0] C_W_ISSUE_WIDTH = {C_NUM_MASTER_SLOTS+1{32'h00000000}},
parameter [(C_NUM_MASTER_SLOTS+1)*32-1:0] C_R_ISSUE_WIDTH = {C_NUM_MASTER_SLOTS+1{32'h00000000}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_W_ACCEPT_WIDTH = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_R_ACCEPT_WIDTH = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESETN,
// Slave Interface Write Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_AWID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_AWADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_AWLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_AWUSER_WIDTH-1:0] S_AXI_AWUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWREADY,
// Slave Interface Write Data Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_WID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_WDATA,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH/8-1:0] S_AXI_WSTRB,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WLAST,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_WUSER_WIDTH-1:0] S_AXI_WUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WREADY,
// Slave Interface Write Response Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_BID,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_BRESP,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_BUSER_WIDTH-1:0] S_AXI_BUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BREADY,
// Slave Interface Read Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_ARID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_ARADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_ARLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ARUSER_WIDTH-1:0] S_AXI_ARUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARREADY,
// Slave Interface Read Data Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_RID,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_RDATA,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_RRESP,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RLAST,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_RUSER_WIDTH-1:0] S_AXI_RUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RREADY,
// Master Interface Write Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_AWID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_AWADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_AWLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_AWUSER_WIDTH-1:0] M_AXI_AWUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWREADY,
// Master Interface Write Data Ports
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_WID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_WDATA,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH/8-1:0] M_AXI_WSTRB,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WLAST,
output wire [C_NUM_MASTER_SLOTS*C_AXI_WUSER_WIDTH-1:0] M_AXI_WUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WREADY,
// Master Interface Write Response Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_BID,
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_BRESP,
input wire [C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH-1:0] M_AXI_BUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BREADY,
// Master Interface Read Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_ARID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_ARADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_ARLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ARUSER_WIDTH-1:0] M_AXI_ARUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARREADY,
// Master Interface Read Data Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_RID,
input wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_RDATA,
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_RRESP,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RLAST,
input wire [C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH-1:0] M_AXI_RUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RREADY
);
localparam integer P_AXI4 = 0;
localparam integer P_AXI3 = 1;
localparam integer P_AXILITE = 2;
localparam integer P_WRITE = 0;
localparam integer P_READ = 1;
localparam integer P_NUM_MASTER_SLOTS_LOG = f_ceil_log2(C_NUM_MASTER_SLOTS);
localparam integer P_NUM_SLAVE_SLOTS_LOG = f_ceil_log2((C_NUM_SLAVE_SLOTS>1) ? C_NUM_SLAVE_SLOTS : 2);
localparam integer P_AXI_WID_WIDTH = (C_AXI_PROTOCOL == P_AXI3) ? C_AXI_ID_WIDTH : 1;
localparam integer P_ST_AWMESG_WIDTH = 2+4+4 + C_AXI_AWUSER_WIDTH;
localparam integer P_AA_AWMESG_WIDTH = C_AXI_ID_WIDTH + C_AXI_ADDR_WIDTH + 8+3+2+3+4 + P_ST_AWMESG_WIDTH;
localparam integer P_ST_ARMESG_WIDTH = 2+4+4 + C_AXI_ARUSER_WIDTH;
localparam integer P_AA_ARMESG_WIDTH = C_AXI_ID_WIDTH + C_AXI_ADDR_WIDTH + 8+3+2+3+4 + P_ST_ARMESG_WIDTH;
localparam integer P_ST_BMESG_WIDTH = 2 + C_AXI_BUSER_WIDTH;
localparam integer P_ST_RMESG_WIDTH = 2 + C_AXI_RUSER_WIDTH + C_AXI_DATA_WIDTH;
localparam integer P_WR_WMESG_WIDTH = C_AXI_DATA_WIDTH + C_AXI_DATA_WIDTH/8 + C_AXI_WUSER_WIDTH + P_AXI_WID_WIDTH;
localparam [31:0] P_BYPASS = 32'h00000000;
localparam [31:0] P_FWD_REV = 32'h00000001;
localparam [31:0] P_SIMPLE = 32'h00000007;
localparam [(C_NUM_MASTER_SLOTS+1)-1:0] P_M_AXI_SUPPORTS_READ = {1'b1, C_M_AXI_SUPPORTS_READ[0+:C_NUM_MASTER_SLOTS]};
localparam [(C_NUM_MASTER_SLOTS+1)-1:0] P_M_AXI_SUPPORTS_WRITE = {1'b1, C_M_AXI_SUPPORTS_WRITE[0+:C_NUM_MASTER_SLOTS]};
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_WRITE_CONNECTIVITY = {{32{1'b1}}, C_M_AXI_WRITE_CONNECTIVITY[0+:C_NUM_MASTER_SLOTS*32]};
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_READ_CONNECTIVITY = {{32{1'b1}}, C_M_AXI_READ_CONNECTIVITY[0+:C_NUM_MASTER_SLOTS*32]};
localparam [C_NUM_SLAVE_SLOTS*32-1:0] P_S_AXI_WRITE_CONNECTIVITY = f_si_write_connectivity(0);
localparam [C_NUM_SLAVE_SLOTS*32-1:0] P_S_AXI_READ_CONNECTIVITY = f_si_read_connectivity(0);
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_READ_ISSUING = {32'h00000001, C_M_AXI_READ_ISSUING[0+:C_NUM_MASTER_SLOTS*32]};
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_WRITE_ISSUING = {32'h00000001, C_M_AXI_WRITE_ISSUING[0+:C_NUM_MASTER_SLOTS*32]};
localparam P_DECERR = 2'b11;
//---------------------------------------------------------------------------
// Functions
//---------------------------------------------------------------------------
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// Isolate thread bits of input S_ID and add to BASE_ID (RNG00) to form MI-side ID value
// only for end-point SI-slots
function [C_AXI_ID_WIDTH-1:0] f_extend_ID
(
input [C_AXI_ID_WIDTH-1:0] s_id,
input integer slot
);
begin
f_extend_ID = C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] | (s_id & (C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] ^ C_S_AXI_HIGH_ID[slot*64+:C_AXI_ID_WIDTH]));
end
endfunction
// Write connectivity array transposed
function [C_NUM_SLAVE_SLOTS*32-1:0] f_si_write_connectivity
(
input integer null_arg
);
integer si_slot;
integer mi_slot;
reg [C_NUM_SLAVE_SLOTS*32-1:0] result;
begin
result = {C_NUM_SLAVE_SLOTS*32{1'b1}};
for (si_slot=0; si_slot<C_NUM_SLAVE_SLOTS; si_slot=si_slot+1) begin
for (mi_slot=0; mi_slot<C_NUM_MASTER_SLOTS; mi_slot=mi_slot+1) begin
result[si_slot*32+mi_slot] = C_M_AXI_WRITE_CONNECTIVITY[mi_slot*32+si_slot];
end
end
f_si_write_connectivity = result;
end
endfunction
// Read connectivity array transposed
function [C_NUM_SLAVE_SLOTS*32-1:0] f_si_read_connectivity
(
input integer null_arg
);
integer si_slot;
integer mi_slot;
reg [C_NUM_SLAVE_SLOTS*32-1:0] result;
begin
result = {C_NUM_SLAVE_SLOTS*32{1'b1}};
for (si_slot=0; si_slot<C_NUM_SLAVE_SLOTS; si_slot=si_slot+1) begin
for (mi_slot=0; mi_slot<C_NUM_MASTER_SLOTS; mi_slot=mi_slot+1) begin
result[si_slot*32+mi_slot] = C_M_AXI_READ_CONNECTIVITY[mi_slot*32+si_slot];
end
end
f_si_read_connectivity = result;
end
endfunction
genvar gen_si_slot;
genvar gen_mi_slot;
wire [C_NUM_SLAVE_SLOTS*P_ST_AWMESG_WIDTH-1:0] si_st_awmesg ;
wire [C_NUM_SLAVE_SLOTS*P_ST_AWMESG_WIDTH-1:0] st_tmp_awmesg ;
wire [C_NUM_SLAVE_SLOTS*P_AA_AWMESG_WIDTH-1:0] tmp_aa_awmesg ;
wire [P_AA_AWMESG_WIDTH-1:0] aa_mi_awmesg ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] st_aa_awid ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] st_aa_awaddr ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_awlen ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_awsize ;
wire [C_NUM_SLAVE_SLOTS*2-1:0] st_aa_awlock ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_awprot ;
wire [C_NUM_SLAVE_SLOTS*4-1:0] st_aa_awregion ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_awerror ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_aa_awtarget_hot ;
wire [C_NUM_SLAVE_SLOTS*(P_NUM_MASTER_SLOTS_LOG+1)-1:0] st_aa_awtarget_enc ;
wire [P_NUM_SLAVE_SLOTS_LOG*1-1:0] aa_wm_awgrant_enc ;
wire [(C_NUM_MASTER_SLOTS+1)-1:0] aa_mi_awtarget_hot ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_awvalid_qual ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_ss_awvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_ss_awready ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_wr_awvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_wr_awready ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_aa_awvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_aa_awready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] sa_wm_awvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] sa_wm_awready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_awvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_awready ;
wire aa_sa_awvalid ;
wire aa_sa_awready ;
wire aa_mi_arready ;
wire mi_awvalid_en ;
wire sa_wm_awvalid_en ;
wire sa_wm_awready_mux ;
wire [C_NUM_SLAVE_SLOTS*P_ST_ARMESG_WIDTH-1:0] si_st_armesg ;
wire [C_NUM_SLAVE_SLOTS*P_ST_ARMESG_WIDTH-1:0] st_tmp_armesg ;
wire [C_NUM_SLAVE_SLOTS*P_AA_ARMESG_WIDTH-1:0] tmp_aa_armesg ;
wire [P_AA_ARMESG_WIDTH-1:0] aa_mi_armesg ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] st_aa_arid ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] st_aa_araddr ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_arlen ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_arsize ;
wire [C_NUM_SLAVE_SLOTS*2-1:0] st_aa_arlock ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_arprot ;
wire [C_NUM_SLAVE_SLOTS*4-1:0] st_aa_arregion ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_arerror ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_aa_artarget_hot ;
wire [C_NUM_SLAVE_SLOTS*(P_NUM_MASTER_SLOTS_LOG+1)-1:0] st_aa_artarget_enc ;
wire [(C_NUM_MASTER_SLOTS+1)-1:0] aa_mi_artarget_hot ;
wire [P_NUM_SLAVE_SLOTS_LOG*1-1:0] aa_mi_argrant_enc ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_arvalid_qual ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_arvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_arready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_arvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_arready ;
wire aa_mi_arvalid ;
wire mi_awready_mux ;
wire [C_NUM_SLAVE_SLOTS*P_ST_BMESG_WIDTH-1:0] st_si_bmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*P_ST_BMESG_WIDTH-1:0] st_mr_bmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] st_mr_bid ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] st_mr_bresp ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_BUSER_WIDTH-1:0] st_mr_buser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_bvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_bready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_bready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_bid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_mr_bid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*P_NUM_SLAVE_SLOTS_LOG-1:0] debug_bid_target_i ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] bid_match ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] mi_bid ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] mi_bresp ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_BUSER_WIDTH-1:0] mi_buser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_bvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_bready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] bready_carry ;
wire [C_NUM_SLAVE_SLOTS*P_ST_RMESG_WIDTH-1:0] st_si_rmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*P_ST_RMESG_WIDTH-1:0] st_mr_rmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] st_mr_rid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] st_mr_rdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_RUSER_WIDTH-1:0] st_mr_ruser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_rlast ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] st_mr_rresp ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_rvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_rready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_rready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_rid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_mr_rid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*P_NUM_SLAVE_SLOTS_LOG-1:0] debug_rid_target_i ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] rid_match ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] mi_rid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] mi_rdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_RUSER_WIDTH-1:0] mi_ruser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_rlast ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] mi_rresp ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_rvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_rready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] rready_carry ;
wire [C_NUM_SLAVE_SLOTS*P_WR_WMESG_WIDTH-1:0] si_wr_wmesg ;
wire [C_NUM_SLAVE_SLOTS*P_WR_WMESG_WIDTH-1:0] wr_wm_wmesg ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] wr_wm_wlast ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] wr_tmp_wvalid ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] wr_tmp_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_wm_wvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_wm_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*P_WR_WMESG_WIDTH-1:0] wm_mr_wmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] wm_mr_wdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH/8-1:0] wm_mr_wstrb ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] wm_mr_wid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_WUSER_WIDTH-1:0] wm_mr_wuser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] wm_mr_wlast ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] wm_mr_wvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] wm_mr_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] mi_wdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH/8-1:0] mi_wstrb ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_WUSER_WIDTH-1:0] mi_wuser ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] mi_wid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_wlast ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_wvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] w_cmd_push ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] w_cmd_pop ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] r_cmd_push ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] r_cmd_pop ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_awmaxissuing ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_armaxissuing ;
reg [(C_NUM_MASTER_SLOTS+1)*8-1:0] w_issuing_cnt ;
reg [(C_NUM_MASTER_SLOTS+1)*8-1:0] r_issuing_cnt ;
reg [8-1:0] debug_aw_trans_seq_i ;
reg [8-1:0] debug_ar_trans_seq_i ;
wire [(C_NUM_MASTER_SLOTS+1)*8-1:0] debug_w_trans_seq_i ;
reg [(C_NUM_MASTER_SLOTS+1)*8-1:0] debug_w_beat_cnt_i ;
reg aresetn_d = 1'b0; // Reset delay register
always @(posedge ACLK) begin
if (~ARESETN) begin
aresetn_d <= 1'b0;
end else begin
aresetn_d <= ARESETN;
end
end
wire reset;
assign reset = ~aresetn_d;
generate
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_slave_slots
if (C_S_AXI_SUPPORTS_READ[gen_si_slot]) begin : gen_si_read
axi_crossbar_v2_1_si_transactor # // "ST": SI Transactor (read channel)
(
.C_FAMILY (C_FAMILY),
.C_SI (gen_si_slot),
.C_DIR (P_READ),
.C_NUM_ADDR_RANGES (C_NUM_ADDR_RANGES),
.C_NUM_M (C_NUM_MASTER_SLOTS),
.C_NUM_M_LOG (P_NUM_MASTER_SLOTS_LOG),
.C_ACCEPTANCE (C_S_AXI_READ_ACCEPTANCE[gen_si_slot*32+:32]),
.C_ACCEPTANCE_LOG (C_R_ACCEPT_WIDTH[gen_si_slot*32+:32]),
.C_ID_WIDTH (C_AXI_ID_WIDTH),
.C_THREAD_ID_WIDTH (C_S_AXI_THREAD_ID_WIDTH[gen_si_slot*32+:32]),
.C_ADDR_WIDTH (C_AXI_ADDR_WIDTH),
.C_AMESG_WIDTH (P_ST_ARMESG_WIDTH),
.C_RMESG_WIDTH (P_ST_RMESG_WIDTH),
.C_BASE_ID (C_S_AXI_BASE_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_HIGH_ID (C_S_AXI_HIGH_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_SINGLE_THREAD (C_S_AXI_SINGLE_THREAD[gen_si_slot*32+:32]),
.C_BASE_ADDR (C_M_AXI_BASE_ADDR),
.C_HIGH_ADDR (C_M_AXI_HIGH_ADDR),
.C_TARGET_QUAL (P_S_AXI_READ_CONNECTIVITY[gen_si_slot*32+:C_NUM_MASTER_SLOTS]),
.C_M_AXI_SECURE (C_M_AXI_SECURE),
.C_RANGE_CHECK (C_RANGE_CHECK),
.C_ADDR_DECODE (C_ADDR_DECODE),
.C_ERR_MODE (C_M_AXI_ERR_MODE),
.C_DEBUG (C_DEBUG)
)
si_transactor_ar
(
.ACLK (ACLK),
.ARESET (reset),
.S_AID (f_extend_ID(S_AXI_ARID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot)),
.S_AADDR (S_AXI_ARADDR[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.S_ALEN (S_AXI_ARLEN[gen_si_slot*8+:8]),
.S_ASIZE (S_AXI_ARSIZE[gen_si_slot*3+:3]),
.S_ABURST (S_AXI_ARBURST[gen_si_slot*2+:2]),
.S_ALOCK (S_AXI_ARLOCK[gen_si_slot*2+:2]),
.S_APROT (S_AXI_ARPROT[gen_si_slot*3+:3]),
// .S_AREGION (S_AXI_ARREGION[gen_si_slot*4+:4]),
.S_AMESG (si_st_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH]),
.S_AVALID (S_AXI_ARVALID[gen_si_slot]),
.S_AREADY (S_AXI_ARREADY[gen_si_slot]),
.M_AID (st_aa_arid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.M_AADDR (st_aa_araddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.M_ALEN (st_aa_arlen[gen_si_slot*8+:8]),
.M_ASIZE (st_aa_arsize[gen_si_slot*3+:3]),
.M_ALOCK (st_aa_arlock[gen_si_slot*2+:2]),
.M_APROT (st_aa_arprot[gen_si_slot*3+:3]),
.M_AREGION (st_aa_arregion[gen_si_slot*4+:4]),
.M_AMESG (st_tmp_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH]),
.M_ATARGET_HOT (st_aa_artarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_ATARGET_ENC (st_aa_artarget_enc[gen_si_slot*(P_NUM_MASTER_SLOTS_LOG+1)+:(P_NUM_MASTER_SLOTS_LOG+1)]),
.M_AERROR (st_aa_arerror[gen_si_slot*8+:8]),
.M_AVALID_QUAL (st_aa_arvalid_qual[gen_si_slot]),
.M_AVALID (st_aa_arvalid[gen_si_slot]),
.M_AREADY (st_aa_arready[gen_si_slot]),
.S_RID (S_AXI_RID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_RMESG (st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+:P_ST_RMESG_WIDTH]),
.S_RLAST (S_AXI_RLAST[gen_si_slot]),
.S_RVALID (S_AXI_RVALID[gen_si_slot]),
.S_RREADY (S_AXI_RREADY[gen_si_slot]),
.M_RID (st_mr_rid),
.M_RLAST (st_mr_rlast),
.M_RMESG (st_mr_rmesg),
.M_RVALID (st_mr_rvalid),
.M_RREADY (st_tmp_rready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_RTARGET (st_tmp_rid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.DEBUG_A_TRANS_SEQ (C_DEBUG ? debug_ar_trans_seq_i : 8'h0)
);
assign si_st_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH] = {
S_AXI_ARUSER[gen_si_slot*C_AXI_ARUSER_WIDTH+:C_AXI_ARUSER_WIDTH],
S_AXI_ARQOS[gen_si_slot*4+:4],
S_AXI_ARCACHE[gen_si_slot*4+:4],
S_AXI_ARBURST[gen_si_slot*2+:2]
};
assign tmp_aa_armesg[gen_si_slot*P_AA_ARMESG_WIDTH+:P_AA_ARMESG_WIDTH] = {
st_tmp_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH],
st_aa_arregion[gen_si_slot*4+:4],
st_aa_arprot[gen_si_slot*3+:3],
st_aa_arlock[gen_si_slot*2+:2],
st_aa_arsize[gen_si_slot*3+:3],
st_aa_arlen[gen_si_slot*8+:8],
st_aa_araddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH],
st_aa_arid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]
};
assign S_AXI_RRESP[gen_si_slot*2+:2] = st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+:2];
assign S_AXI_RUSER[gen_si_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] = st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+2 +: C_AXI_RUSER_WIDTH];
assign S_AXI_RDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+2+C_AXI_RUSER_WIDTH +: C_AXI_DATA_WIDTH];
end else begin : gen_no_si_read
assign S_AXI_ARREADY[gen_si_slot] = 1'b0;
assign st_aa_arvalid[gen_si_slot] = 1'b0;
assign st_aa_arvalid_qual[gen_si_slot] = 1'b1;
assign tmp_aa_armesg[gen_si_slot*P_AA_ARMESG_WIDTH+:P_AA_ARMESG_WIDTH] = 0;
assign S_AXI_RID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign S_AXI_RRESP[gen_si_slot*2+:2] = 0;
assign S_AXI_RUSER[gen_si_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] = 0;
assign S_AXI_RDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = 0;
assign S_AXI_RVALID[gen_si_slot] = 1'b0;
assign S_AXI_RLAST[gen_si_slot] = 1'b0;
assign st_tmp_rready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
assign st_aa_artarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
end // gen_si_read
if (C_S_AXI_SUPPORTS_WRITE[gen_si_slot]) begin : gen_si_write
axi_crossbar_v2_1_si_transactor # // "ST": SI Transactor (write channel)
(
.C_FAMILY (C_FAMILY),
.C_SI (gen_si_slot),
.C_DIR (P_WRITE),
.C_NUM_ADDR_RANGES (C_NUM_ADDR_RANGES),
.C_NUM_M (C_NUM_MASTER_SLOTS),
.C_NUM_M_LOG (P_NUM_MASTER_SLOTS_LOG),
.C_ACCEPTANCE (C_S_AXI_WRITE_ACCEPTANCE[gen_si_slot*32+:32]),
.C_ACCEPTANCE_LOG (C_W_ACCEPT_WIDTH[gen_si_slot*32+:32]),
.C_ID_WIDTH (C_AXI_ID_WIDTH),
.C_THREAD_ID_WIDTH (C_S_AXI_THREAD_ID_WIDTH[gen_si_slot*32+:32]),
.C_ADDR_WIDTH (C_AXI_ADDR_WIDTH),
.C_AMESG_WIDTH (P_ST_AWMESG_WIDTH),
.C_RMESG_WIDTH (P_ST_BMESG_WIDTH),
.C_BASE_ID (C_S_AXI_BASE_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_HIGH_ID (C_S_AXI_HIGH_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_SINGLE_THREAD (C_S_AXI_SINGLE_THREAD[gen_si_slot*32+:32]),
.C_BASE_ADDR (C_M_AXI_BASE_ADDR),
.C_HIGH_ADDR (C_M_AXI_HIGH_ADDR),
.C_TARGET_QUAL (P_S_AXI_WRITE_CONNECTIVITY[gen_si_slot*32+:C_NUM_MASTER_SLOTS]),
.C_M_AXI_SECURE (C_M_AXI_SECURE),
.C_RANGE_CHECK (C_RANGE_CHECK),
.C_ADDR_DECODE (C_ADDR_DECODE),
.C_ERR_MODE (C_M_AXI_ERR_MODE),
.C_DEBUG (C_DEBUG)
)
si_transactor_aw
(
.ACLK (ACLK),
.ARESET (reset),
.S_AID (f_extend_ID(S_AXI_AWID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot)),
.S_AADDR (S_AXI_AWADDR[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.S_ALEN (S_AXI_AWLEN[gen_si_slot*8+:8]),
.S_ASIZE (S_AXI_AWSIZE[gen_si_slot*3+:3]),
.S_ABURST (S_AXI_AWBURST[gen_si_slot*2+:2]),
.S_ALOCK (S_AXI_AWLOCK[gen_si_slot*2+:2]),
.S_APROT (S_AXI_AWPROT[gen_si_slot*3+:3]),
// .S_AREGION (S_AXI_AWREGION[gen_si_slot*4+:4]),
.S_AMESG (si_st_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH]),
.S_AVALID (S_AXI_AWVALID[gen_si_slot]),
.S_AREADY (S_AXI_AWREADY[gen_si_slot]),
.M_AID (st_aa_awid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.M_AADDR (st_aa_awaddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.M_ALEN (st_aa_awlen[gen_si_slot*8+:8]),
.M_ASIZE (st_aa_awsize[gen_si_slot*3+:3]),
.M_ALOCK (st_aa_awlock[gen_si_slot*2+:2]),
.M_APROT (st_aa_awprot[gen_si_slot*3+:3]),
.M_AREGION (st_aa_awregion[gen_si_slot*4+:4]),
.M_AMESG (st_tmp_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH]),
.M_ATARGET_HOT (st_aa_awtarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_ATARGET_ENC (st_aa_awtarget_enc[gen_si_slot*(P_NUM_MASTER_SLOTS_LOG+1)+:(P_NUM_MASTER_SLOTS_LOG+1)]),
.M_AERROR (st_aa_awerror[gen_si_slot*8+:8]),
.M_AVALID_QUAL (st_aa_awvalid_qual[gen_si_slot]),
.M_AVALID (st_ss_awvalid[gen_si_slot]),
.M_AREADY (st_ss_awready[gen_si_slot]),
.S_RID (S_AXI_BID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_RMESG (st_si_bmesg[gen_si_slot*P_ST_BMESG_WIDTH+:P_ST_BMESG_WIDTH]),
.S_RLAST (),
.S_RVALID (S_AXI_BVALID[gen_si_slot]),
.S_RREADY (S_AXI_BREADY[gen_si_slot]),
.M_RID (st_mr_bid),
.M_RLAST ({(C_NUM_MASTER_SLOTS+1){1'b1}}),
.M_RMESG (st_mr_bmesg),
.M_RVALID (st_mr_bvalid),
.M_RREADY (st_tmp_bready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_RTARGET (st_tmp_bid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.DEBUG_A_TRANS_SEQ (C_DEBUG ? debug_aw_trans_seq_i : 8'h0)
);
// Note: Concatenation of mesg signals is from MSB to LSB; assignments that chop mesg signals appear in opposite order.
assign si_st_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH] = {
S_AXI_AWUSER[gen_si_slot*C_AXI_AWUSER_WIDTH+:C_AXI_AWUSER_WIDTH],
S_AXI_AWQOS[gen_si_slot*4+:4],
S_AXI_AWCACHE[gen_si_slot*4+:4],
S_AXI_AWBURST[gen_si_slot*2+:2]
};
assign tmp_aa_awmesg[gen_si_slot*P_AA_AWMESG_WIDTH+:P_AA_AWMESG_WIDTH] = {
st_tmp_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH],
st_aa_awregion[gen_si_slot*4+:4],
st_aa_awprot[gen_si_slot*3+:3],
st_aa_awlock[gen_si_slot*2+:2],
st_aa_awsize[gen_si_slot*3+:3],
st_aa_awlen[gen_si_slot*8+:8],
st_aa_awaddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH],
st_aa_awid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]
};
assign S_AXI_BRESP[gen_si_slot*2+:2] = st_si_bmesg[gen_si_slot*P_ST_BMESG_WIDTH+:2];
assign S_AXI_BUSER[gen_si_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] = st_si_bmesg[gen_si_slot*P_ST_BMESG_WIDTH+2 +: C_AXI_BUSER_WIDTH];
// AW SI-transactor transfer completes upon completion of both W-router address acceptance (command push) and AW arbitration
axi_crossbar_v2_1_splitter # // "SS": Splitter from SI-Transactor (write channel)
(
.C_NUM_M (2)
)
splitter_aw_si
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (st_ss_awvalid[gen_si_slot]),
.S_READY (st_ss_awready[gen_si_slot]),
.M_VALID ({ss_wr_awvalid[gen_si_slot], ss_aa_awvalid[gen_si_slot]}),
.M_READY ({ss_wr_awready[gen_si_slot], ss_aa_awready[gen_si_slot]})
);
axi_crossbar_v2_1_wdata_router # // "WR": Write data Router
(
.C_FAMILY (C_FAMILY),
.C_NUM_MASTER_SLOTS (C_NUM_MASTER_SLOTS+1),
.C_SELECT_WIDTH (P_NUM_MASTER_SLOTS_LOG+1),
.C_WMESG_WIDTH (P_WR_WMESG_WIDTH),
.C_FIFO_DEPTH_LOG (C_W_ACCEPT_WIDTH[gen_si_slot*32+:6])
)
wdata_router_w
(
.ACLK (ACLK),
.ARESET (reset),
// Write transfer input from the current SI-slot
.S_WMESG (si_wr_wmesg[gen_si_slot*P_WR_WMESG_WIDTH+:P_WR_WMESG_WIDTH]),
.S_WLAST (S_AXI_WLAST[gen_si_slot]),
.S_WVALID (S_AXI_WVALID[gen_si_slot]),
.S_WREADY (S_AXI_WREADY[gen_si_slot]),
// Vector of write transfer outputs to each MI-slot's W-mux
.M_WMESG (wr_wm_wmesg[gen_si_slot*(P_WR_WMESG_WIDTH)+:P_WR_WMESG_WIDTH]),
.M_WLAST (wr_wm_wlast[gen_si_slot]),
.M_WVALID (wr_tmp_wvalid[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_WREADY (wr_tmp_wready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
// AW command push from local SI-slot
.S_ASELECT (st_aa_awtarget_enc[gen_si_slot*(P_NUM_MASTER_SLOTS_LOG+1)+:(P_NUM_MASTER_SLOTS_LOG+1)]), // Target MI-slot
.S_AVALID (ss_wr_awvalid[gen_si_slot]),
.S_AREADY (ss_wr_awready[gen_si_slot])
);
assign si_wr_wmesg[gen_si_slot*P_WR_WMESG_WIDTH+:P_WR_WMESG_WIDTH] = {
((C_AXI_PROTOCOL == P_AXI3) ? f_extend_ID(S_AXI_WID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot) : 1'b0),
S_AXI_WUSER[gen_si_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH],
S_AXI_WSTRB[gen_si_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8],
S_AXI_WDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]
};
end else begin : gen_no_si_write
assign S_AXI_AWREADY[gen_si_slot] = 1'b0;
assign ss_aa_awvalid[gen_si_slot] = 1'b0;
assign st_aa_awvalid_qual[gen_si_slot] = 1'b1;
assign tmp_aa_awmesg[gen_si_slot*P_AA_AWMESG_WIDTH+:P_AA_AWMESG_WIDTH] = 0;
assign S_AXI_BID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign S_AXI_BRESP[gen_si_slot*2+:2] = 0;
assign S_AXI_BUSER[gen_si_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] = 0;
assign S_AXI_BVALID[gen_si_slot] = 1'b0;
assign st_tmp_bready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
assign S_AXI_WREADY[gen_si_slot] = 1'b0;
assign wr_wm_wmesg[gen_si_slot*(P_WR_WMESG_WIDTH)+:P_WR_WMESG_WIDTH] = 0;
assign wr_wm_wlast[gen_si_slot] = 1'b0;
assign wr_tmp_wvalid[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
assign st_aa_awtarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
end // gen_si_write
end // gen_slave_slots
for (gen_mi_slot=0; gen_mi_slot<C_NUM_MASTER_SLOTS+1; gen_mi_slot=gen_mi_slot+1) begin : gen_master_slots
if (P_M_AXI_SUPPORTS_READ[gen_mi_slot]) begin : gen_mi_read
if (C_NUM_SLAVE_SLOTS>1) begin : gen_rid_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_SLAVE_SLOTS),
.C_NUM_TARGETS_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_NUM_RANGES (1),
.C_ADDR_WIDTH (C_AXI_ID_WIDTH),
.C_TARGET_ENC (C_DEBUG),
.C_TARGET_HOT (1),
.C_REGION_ENC (0),
.C_BASE_ADDR (C_S_AXI_BASE_ID),
.C_HIGH_ADDR (C_S_AXI_HIGH_ID),
.C_TARGET_QUAL (P_M_AXI_READ_CONNECTIVITY[gen_mi_slot*32+:C_NUM_SLAVE_SLOTS]),
.C_RESOLUTION (0)
)
rid_decoder_inst
(
.ADDR (st_mr_rid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.TARGET_HOT (tmp_mr_rid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
.TARGET_ENC (debug_rid_target_i[gen_mi_slot*P_NUM_SLAVE_SLOTS_LOG+:P_NUM_SLAVE_SLOTS_LOG]),
.MATCH (rid_match[gen_mi_slot]),
.REGION ()
);
end else begin : gen_no_rid_decoder
assign tmp_mr_rid_target[gen_mi_slot] = 1'b1; // All response transfers route to solo SI-slot.
assign rid_match[gen_mi_slot] = 1'b1;
end
assign st_mr_rmesg[gen_mi_slot*P_ST_RMESG_WIDTH+:P_ST_RMESG_WIDTH] = {
st_mr_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH],
st_mr_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH],
st_mr_rresp[gen_mi_slot*2+:2]
};
end else begin : gen_no_mi_read
assign tmp_mr_rid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS] = 0;
assign rid_match[gen_mi_slot] = 1'b0;
assign st_mr_rmesg[gen_mi_slot*P_ST_RMESG_WIDTH+:P_ST_RMESG_WIDTH] = 0;
end // gen_mi_read
if (P_M_AXI_SUPPORTS_WRITE[gen_mi_slot]) begin : gen_mi_write
if (C_NUM_SLAVE_SLOTS>1) begin : gen_bid_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_SLAVE_SLOTS),
.C_NUM_TARGETS_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_NUM_RANGES (1),
.C_ADDR_WIDTH (C_AXI_ID_WIDTH),
.C_TARGET_ENC (C_DEBUG),
.C_TARGET_HOT (1),
.C_REGION_ENC (0),
.C_BASE_ADDR (C_S_AXI_BASE_ID),
.C_HIGH_ADDR (C_S_AXI_HIGH_ID),
.C_TARGET_QUAL (P_M_AXI_WRITE_CONNECTIVITY[gen_mi_slot*32+:C_NUM_SLAVE_SLOTS]),
.C_RESOLUTION (0)
)
bid_decoder_inst
(
.ADDR (st_mr_bid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.TARGET_HOT (tmp_mr_bid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
.TARGET_ENC (debug_bid_target_i[gen_mi_slot*P_NUM_SLAVE_SLOTS_LOG+:P_NUM_SLAVE_SLOTS_LOG]),
.MATCH (bid_match[gen_mi_slot]),
.REGION ()
);
end else begin : gen_no_bid_decoder
assign tmp_mr_bid_target[gen_mi_slot] = 1'b1; // All response transfers route to solo SI-slot.
assign bid_match[gen_mi_slot] = 1'b1;
end
axi_crossbar_v2_1_wdata_mux # // "WM": Write data Mux, per MI-slot (incl error-handler)
(
.C_FAMILY (C_FAMILY),
.C_NUM_SLAVE_SLOTS (C_NUM_SLAVE_SLOTS),
.C_SELECT_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_WMESG_WIDTH (P_WR_WMESG_WIDTH),
.C_FIFO_DEPTH_LOG (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6])
)
wdata_mux_w
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of write transfer inputs from each SI-slot's W-router
.S_WMESG (wr_wm_wmesg),
.S_WLAST (wr_wm_wlast),
.S_WVALID (tmp_wm_wvalid[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
.S_WREADY (tmp_wm_wready[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
// Write transfer output to the current MI-slot
.M_WMESG (wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+:P_WR_WMESG_WIDTH]),
.M_WLAST (wm_mr_wlast[gen_mi_slot]),
.M_WVALID (wm_mr_wvalid[gen_mi_slot]),
.M_WREADY (wm_mr_wready[gen_mi_slot]),
// AW command push from AW arbiter output
.S_ASELECT (aa_wm_awgrant_enc), // SI-slot selected by arbiter
.S_AVALID (sa_wm_awvalid[gen_mi_slot]),
.S_AREADY (sa_wm_awready[gen_mi_slot])
);
if (C_DEBUG) begin : gen_debug_w
// DEBUG WRITE BEAT COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_w_beat_cnt_i[gen_mi_slot*8+:8] <= 0;
end else begin
if (mi_wvalid[gen_mi_slot] & mi_wready[gen_mi_slot]) begin
if (mi_wlast[gen_mi_slot]) begin
debug_w_beat_cnt_i[gen_mi_slot*8+:8] <= 0;
end else begin
debug_w_beat_cnt_i[gen_mi_slot*8+:8] <= debug_w_beat_cnt_i[gen_mi_slot*8+:8] + 1;
end
end
end
end // clocked process
// DEBUG W-CHANNEL TRANSACTION SEQUENCE QUEUE
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]),
.C_USE_FULL (0)
)
debug_w_seq_fifo
(
.ACLK (ACLK),
.ARESET (reset),
.S_MESG (debug_aw_trans_seq_i),
.S_VALID (sa_wm_awvalid[gen_mi_slot]),
.S_READY (),
.M_MESG (debug_w_trans_seq_i[gen_mi_slot*8+:8]),
.M_VALID (),
.M_READY (mi_wvalid[gen_mi_slot] & mi_wready[gen_mi_slot] & mi_wlast[gen_mi_slot])
);
end // gen_debug_w
assign wm_mr_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH +: C_AXI_DATA_WIDTH];
assign wm_mr_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+C_AXI_DATA_WIDTH +: C_AXI_DATA_WIDTH/8];
assign wm_mr_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+C_AXI_DATA_WIDTH+C_AXI_DATA_WIDTH/8 +: C_AXI_WUSER_WIDTH];
assign wm_mr_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+C_AXI_DATA_WIDTH+(C_AXI_DATA_WIDTH/8)+C_AXI_WUSER_WIDTH +: P_AXI_WID_WIDTH];
assign st_mr_bmesg[gen_mi_slot*P_ST_BMESG_WIDTH+:P_ST_BMESG_WIDTH] = {
st_mr_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH],
st_mr_bresp[gen_mi_slot*2+:2]
};
end else begin : gen_no_mi_write
assign tmp_mr_bid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS] = 0;
assign bid_match[gen_mi_slot] = 1'b0;
assign wm_mr_wvalid[gen_mi_slot] = 0;
assign wm_mr_wlast[gen_mi_slot] = 0;
assign wm_mr_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = 0;
assign wm_mr_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8] = 0;
assign wm_mr_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH] = 0;
assign wm_mr_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign st_mr_bmesg[gen_mi_slot*P_ST_BMESG_WIDTH+:P_ST_BMESG_WIDTH] = 0;
assign tmp_wm_wready[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS] = 0;
assign sa_wm_awready[gen_mi_slot] = 0;
end // gen_mi_write
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_trans_si
// Transpose handshakes from W-router (SxM) to W-mux (MxS).
assign tmp_wm_wvalid[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot] = wr_tmp_wvalid[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot];
assign wr_tmp_wready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = tmp_wm_wready[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot];
// Transpose response enables from ID decoders (MxS) to si_transactors (SxM).
assign st_tmp_bid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = tmp_mr_bid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot];
assign st_tmp_rid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = tmp_mr_rid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot];
end // gen_trans_si
assign bready_carry[gen_mi_slot] = st_tmp_bready[gen_mi_slot];
assign rready_carry[gen_mi_slot] = st_tmp_rready[gen_mi_slot];
for (gen_si_slot=1; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_resp_carry_si
assign bready_carry[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = // Generate M_BREADY if ...
bready_carry[(gen_si_slot-1)*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] | // For any SI-slot (OR carry-chain across all SI-slots), ...
st_tmp_bready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot]; // The write SI transactor indicates BREADY for that MI-slot.
assign rready_carry[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = // Generate M_RREADY if ...
rready_carry[(gen_si_slot-1)*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] | // For any SI-slot (OR carry-chain across all SI-slots), ...
st_tmp_rready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot]; // The write SI transactor indicates RREADY for that MI-slot.
end // gen_resp_carry_si
assign w_cmd_push[gen_mi_slot] = mi_awvalid[gen_mi_slot] && mi_awready[gen_mi_slot] && P_M_AXI_SUPPORTS_WRITE[gen_mi_slot];
assign r_cmd_push[gen_mi_slot] = mi_arvalid[gen_mi_slot] && mi_arready[gen_mi_slot] && P_M_AXI_SUPPORTS_READ[gen_mi_slot];
assign w_cmd_pop[gen_mi_slot] = st_mr_bvalid[gen_mi_slot] && st_mr_bready[gen_mi_slot] && P_M_AXI_SUPPORTS_WRITE[gen_mi_slot];
assign r_cmd_pop[gen_mi_slot] = st_mr_rvalid[gen_mi_slot] && st_mr_rready[gen_mi_slot] && st_mr_rlast[gen_mi_slot] && P_M_AXI_SUPPORTS_READ[gen_mi_slot];
// Disqualify arbitration of SI-slot if targeted MI-slot has reached its issuing limit.
assign mi_awmaxissuing[gen_mi_slot] = (w_issuing_cnt[gen_mi_slot*8 +: (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] ==
P_M_AXI_WRITE_ISSUING[gen_mi_slot*32 +: (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)]) & ~w_cmd_pop[gen_mi_slot];
assign mi_armaxissuing[gen_mi_slot] = (r_issuing_cnt[gen_mi_slot*8 +: (C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] ==
P_M_AXI_READ_ISSUING[gen_mi_slot*32 +: (C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)]) & ~r_cmd_pop[gen_mi_slot];
always @(posedge ACLK) begin
if (reset) begin
w_issuing_cnt[gen_mi_slot*8+:8] <= 0; // Some high-order bits remain constant 0
r_issuing_cnt[gen_mi_slot*8+:8] <= 0; // Some high-order bits remain constant 0
end else begin
if (w_cmd_push[gen_mi_slot] && ~w_cmd_pop[gen_mi_slot]) begin
w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] + 1;
end else if (w_cmd_pop[gen_mi_slot] && ~w_cmd_push[gen_mi_slot] && (|w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)])) begin
w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] - 1;
end
if (r_cmd_push[gen_mi_slot] && ~r_cmd_pop[gen_mi_slot]) begin
r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] + 1;
end else if (r_cmd_pop[gen_mi_slot] && ~r_cmd_push[gen_mi_slot] && (|r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)])) begin
r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] - 1;
end
end
end // Clocked process
// Reg-slice must break combinatorial path from M_BID and M_RID inputs to M_BREADY and M_RREADY outputs.
// (See m_rready_i and m_resp_en combinatorial assignments in si_transactor.)
// Reg-slice incurs +1 latency, but no bubble-cycles.
axi_register_slice_v2_1_axi_register_slice # // "MR": MI-side R/B-channel Reg-slice, per MI-slot (pass-through if only 1 SI-slot configured)
(
.C_FAMILY (C_FAMILY),
.C_AXI_PROTOCOL ((C_AXI_PROTOCOL == P_AXI3) ? P_AXI3 : P_AXI4),
.C_AXI_ID_WIDTH (C_AXI_ID_WIDTH),
.C_AXI_ADDR_WIDTH (1),
.C_AXI_DATA_WIDTH (C_AXI_DATA_WIDTH),
.C_AXI_SUPPORTS_USER_SIGNALS (C_AXI_SUPPORTS_USER_SIGNALS),
.C_AXI_AWUSER_WIDTH (1),
.C_AXI_ARUSER_WIDTH (1),
.C_AXI_WUSER_WIDTH (C_AXI_WUSER_WIDTH),
.C_AXI_RUSER_WIDTH (C_AXI_RUSER_WIDTH),
.C_AXI_BUSER_WIDTH (C_AXI_BUSER_WIDTH),
.C_REG_CONFIG_AW (P_BYPASS),
.C_REG_CONFIG_AR (P_BYPASS),
.C_REG_CONFIG_W (P_BYPASS),
.C_REG_CONFIG_R (P_M_AXI_SUPPORTS_READ[gen_mi_slot] ? P_FWD_REV : P_BYPASS),
.C_REG_CONFIG_B (P_M_AXI_SUPPORTS_WRITE[gen_mi_slot] ? P_SIMPLE : P_BYPASS)
)
reg_slice_mi
(
.aresetn (ARESETN),
.aclk (ACLK),
.s_axi_awid ({C_AXI_ID_WIDTH{1'b0}}),
.s_axi_awaddr ({1{1'b0}}),
.s_axi_awlen ({((C_AXI_PROTOCOL == P_AXI3) ? 4 : 8){1'b0}}),
.s_axi_awsize ({3{1'b0}}),
.s_axi_awburst ({2{1'b0}}),
.s_axi_awlock ({((C_AXI_PROTOCOL == P_AXI3) ? 2 : 1){1'b0}}),
.s_axi_awcache ({4{1'b0}}),
.s_axi_awprot ({3{1'b0}}),
.s_axi_awregion ({4{1'b0}}),
.s_axi_awqos ({4{1'b0}}),
.s_axi_awuser ({1{1'b0}}),
.s_axi_awvalid ({1{1'b0}}),
.s_axi_awready (),
.s_axi_wid (wm_mr_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.s_axi_wdata (wm_mr_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.s_axi_wstrb (wm_mr_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8]),
.s_axi_wlast (wm_mr_wlast[gen_mi_slot]),
.s_axi_wuser (wm_mr_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH]),
.s_axi_wvalid (wm_mr_wvalid[gen_mi_slot]),
.s_axi_wready (wm_mr_wready[gen_mi_slot]),
.s_axi_bid (st_mr_bid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.s_axi_bresp (st_mr_bresp[gen_mi_slot*2+:2] ),
.s_axi_buser (st_mr_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] ),
.s_axi_bvalid (st_mr_bvalid[gen_mi_slot*1+:1] ),
.s_axi_bready (st_mr_bready[gen_mi_slot*1+:1] ),
.s_axi_arid ({C_AXI_ID_WIDTH{1'b0}}),
.s_axi_araddr ({1{1'b0}}),
.s_axi_arlen ({((C_AXI_PROTOCOL == P_AXI3) ? 4 : 8){1'b0}}),
.s_axi_arsize ({3{1'b0}}),
.s_axi_arburst ({2{1'b0}}),
.s_axi_arlock ({((C_AXI_PROTOCOL == P_AXI3) ? 2 : 1){1'b0}}),
.s_axi_arcache ({4{1'b0}}),
.s_axi_arprot ({3{1'b0}}),
.s_axi_arregion ({4{1'b0}}),
.s_axi_arqos ({4{1'b0}}),
.s_axi_aruser ({1{1'b0}}),
.s_axi_arvalid ({1{1'b0}}),
.s_axi_arready (),
.s_axi_rid (st_mr_rid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.s_axi_rdata (st_mr_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] ),
.s_axi_rresp (st_mr_rresp[gen_mi_slot*2+:2] ),
.s_axi_rlast (st_mr_rlast[gen_mi_slot*1+:1] ),
.s_axi_ruser (st_mr_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] ),
.s_axi_rvalid (st_mr_rvalid[gen_mi_slot*1+:1] ),
.s_axi_rready (st_mr_rready[gen_mi_slot*1+:1] ),
.m_axi_awid (),
.m_axi_awaddr (),
.m_axi_awlen (),
.m_axi_awsize (),
.m_axi_awburst (),
.m_axi_awlock (),
.m_axi_awcache (),
.m_axi_awprot (),
.m_axi_awregion (),
.m_axi_awqos (),
.m_axi_awuser (),
.m_axi_awvalid (),
.m_axi_awready ({1{1'b0}}),
.m_axi_wid (mi_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.m_axi_wdata (mi_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.m_axi_wstrb (mi_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8]),
.m_axi_wlast (mi_wlast[gen_mi_slot]),
.m_axi_wuser (mi_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH]),
.m_axi_wvalid (mi_wvalid[gen_mi_slot]),
.m_axi_wready (mi_wready[gen_mi_slot]),
.m_axi_bid (mi_bid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.m_axi_bresp (mi_bresp[gen_mi_slot*2+:2] ),
.m_axi_buser (mi_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] ),
.m_axi_bvalid (mi_bvalid[gen_mi_slot*1+:1] ),
.m_axi_bready (mi_bready[gen_mi_slot*1+:1] ),
.m_axi_arid (),
.m_axi_araddr (),
.m_axi_arlen (),
.m_axi_arsize (),
.m_axi_arburst (),
.m_axi_arlock (),
.m_axi_arcache (),
.m_axi_arprot (),
.m_axi_arregion (),
.m_axi_arqos (),
.m_axi_aruser (),
.m_axi_arvalid (),
.m_axi_arready ({1{1'b0}}),
.m_axi_rid (mi_rid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.m_axi_rdata (mi_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] ),
.m_axi_rresp (mi_rresp[gen_mi_slot*2+:2] ),
.m_axi_rlast (mi_rlast[gen_mi_slot*1+:1] ),
.m_axi_ruser (mi_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] ),
.m_axi_rvalid (mi_rvalid[gen_mi_slot*1+:1] ),
.m_axi_rready (mi_rready[gen_mi_slot*1+:1] )
);
end // gen_master_slots (Next gen_mi_slot)
// Highest row of *ready_carry contains accumulated OR across all SI-slots, for each MI-slot.
assign st_mr_bready = bready_carry[(C_NUM_SLAVE_SLOTS-1)*(C_NUM_MASTER_SLOTS+1) +: C_NUM_MASTER_SLOTS+1];
assign st_mr_rready = rready_carry[(C_NUM_SLAVE_SLOTS-1)*(C_NUM_MASTER_SLOTS+1) +: C_NUM_MASTER_SLOTS+1];
// Assign MI-side B, R and W channel ports (exclude error handler signals).
assign mi_bid[0+:C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH] = M_AXI_BID;
assign mi_bvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_BVALID;
assign mi_bresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_BRESP;
assign mi_buser[0+:C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH] = M_AXI_BUSER;
assign M_AXI_BREADY = mi_bready[0+:C_NUM_MASTER_SLOTS];
assign mi_rid[0+:C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH] = M_AXI_RID;
assign mi_rlast[0+:C_NUM_MASTER_SLOTS] = M_AXI_RLAST;
assign mi_rvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_RVALID;
assign mi_rresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_RRESP;
assign mi_ruser[0+:C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH] = M_AXI_RUSER;
assign mi_rdata[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH] = M_AXI_RDATA;
assign M_AXI_RREADY = mi_rready[0+:C_NUM_MASTER_SLOTS];
assign M_AXI_WLAST = mi_wlast[0+:C_NUM_MASTER_SLOTS];
assign M_AXI_WVALID = mi_wvalid[0+:C_NUM_MASTER_SLOTS];
assign M_AXI_WUSER = mi_wuser[0+:C_NUM_MASTER_SLOTS*C_AXI_WUSER_WIDTH];
assign M_AXI_WID = (C_AXI_PROTOCOL == P_AXI3) ? mi_wid[0+:C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH] : 0;
assign M_AXI_WDATA = mi_wdata[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH];
assign M_AXI_WSTRB = mi_wstrb[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH/8];
assign mi_wready[0+:C_NUM_MASTER_SLOTS] = M_AXI_WREADY;
axi_crossbar_v2_1_addr_arbiter # // "AA": Addr Arbiter (AW channel)
(
.C_FAMILY (C_FAMILY),
.C_NUM_M (C_NUM_MASTER_SLOTS+1),
.C_NUM_S (C_NUM_SLAVE_SLOTS),
.C_NUM_S_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_MESG_WIDTH (P_AA_AWMESG_WIDTH),
.C_ARB_PRIORITY (C_S_AXI_ARB_PRIORITY)
)
addr_arbiter_aw
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of SI-side AW command request inputs
.S_MESG (tmp_aa_awmesg),
.S_TARGET_HOT (st_aa_awtarget_hot),
.S_VALID (ss_aa_awvalid),
.S_VALID_QUAL (st_aa_awvalid_qual),
.S_READY (ss_aa_awready),
// Granted AW command output
.M_MESG (aa_mi_awmesg),
.M_TARGET_HOT (aa_mi_awtarget_hot), // MI-slot targeted by granted command
.M_GRANT_ENC (aa_wm_awgrant_enc), // SI-slot index of granted command
.M_VALID (aa_sa_awvalid),
.M_READY (aa_sa_awready),
.ISSUING_LIMIT (mi_awmaxissuing)
);
// Broadcast AW transfer payload to all MI-slots
assign M_AXI_AWID = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[0+:C_AXI_ID_WIDTH]}};
assign M_AXI_AWADDR = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+:C_AXI_ADDR_WIDTH]}};
assign M_AXI_AWLEN = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +:8]}};
assign M_AXI_AWSIZE = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8 +:3]}};
assign M_AXI_AWLOCK = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3 +:2]}};
assign M_AXI_AWPROT = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2 +:3]}};
assign M_AXI_AWREGION = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3 +:4]}};
assign M_AXI_AWBURST = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4 +:2]}};
assign M_AXI_AWCACHE = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2 +:4]}};
assign M_AXI_AWQOS = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4 +:4]}};
assign M_AXI_AWUSER = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4+4 +:C_AXI_AWUSER_WIDTH]}};
axi_crossbar_v2_1_addr_arbiter # // "AA": Addr Arbiter (AR channel)
(
.C_FAMILY (C_FAMILY),
.C_NUM_M (C_NUM_MASTER_SLOTS+1),
.C_NUM_S (C_NUM_SLAVE_SLOTS),
.C_NUM_S_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_MESG_WIDTH (P_AA_ARMESG_WIDTH),
.C_ARB_PRIORITY (C_S_AXI_ARB_PRIORITY)
)
addr_arbiter_ar
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of SI-side AR command request inputs
.S_MESG (tmp_aa_armesg),
.S_TARGET_HOT (st_aa_artarget_hot),
.S_VALID_QUAL (st_aa_arvalid_qual),
.S_VALID (st_aa_arvalid),
.S_READY (st_aa_arready),
// Granted AR command output
.M_MESG (aa_mi_armesg),
.M_TARGET_HOT (aa_mi_artarget_hot), // MI-slot targeted by granted command
.M_GRANT_ENC (aa_mi_argrant_enc),
.M_VALID (aa_mi_arvalid), // SI-slot index of granted command
.M_READY (aa_mi_arready),
.ISSUING_LIMIT (mi_armaxissuing)
);
if (C_DEBUG) begin : gen_debug_trans_seq
// DEBUG WRITE TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_aw_trans_seq_i <= 1;
end else begin
if (aa_sa_awvalid && aa_sa_awready) begin
debug_aw_trans_seq_i <= debug_aw_trans_seq_i + 1;
end
end
end
// DEBUG READ TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_ar_trans_seq_i <= 1;
end else begin
if (aa_mi_arvalid && aa_mi_arready) begin
debug_ar_trans_seq_i <= debug_ar_trans_seq_i + 1;
end
end
end
end // gen_debug_trans_seq
// Broadcast AR transfer payload to all MI-slots
assign M_AXI_ARID = {C_NUM_MASTER_SLOTS{aa_mi_armesg[0+:C_AXI_ID_WIDTH]}};
assign M_AXI_ARADDR = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+:C_AXI_ADDR_WIDTH]}};
assign M_AXI_ARLEN = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +:8]}};
assign M_AXI_ARSIZE = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8 +:3]}};
assign M_AXI_ARLOCK = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3 +:2]}};
assign M_AXI_ARPROT = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2 +:3]}};
assign M_AXI_ARREGION = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3 +:4]}};
assign M_AXI_ARBURST = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4 +:2]}};
assign M_AXI_ARCACHE = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2 +:4]}};
assign M_AXI_ARQOS = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4 +:4]}};
assign M_AXI_ARUSER = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4+4 +:C_AXI_ARUSER_WIDTH]}};
// AW arbiter command transfer completes upon completion of both M-side AW-channel transfer and W-mux address acceptance (command push).
axi_crossbar_v2_1_splitter # // "SA": Splitter for Write Addr Arbiter
(
.C_NUM_M (2)
)
splitter_aw_mi
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (aa_sa_awvalid),
.S_READY (aa_sa_awready),
.M_VALID ({mi_awvalid_en, sa_wm_awvalid_en}),
.M_READY ({mi_awready_mux, sa_wm_awready_mux})
);
assign mi_awvalid = aa_mi_awtarget_hot & {C_NUM_MASTER_SLOTS+1{mi_awvalid_en}};
assign mi_awready_mux = |(aa_mi_awtarget_hot & mi_awready);
assign M_AXI_AWVALID = mi_awvalid[0+:C_NUM_MASTER_SLOTS]; // Slot C_NUM_MASTER_SLOTS+1 is the error handler
assign mi_awready[0+:C_NUM_MASTER_SLOTS] = M_AXI_AWREADY;
assign sa_wm_awvalid = aa_mi_awtarget_hot & {C_NUM_MASTER_SLOTS+1{sa_wm_awvalid_en}};
assign sa_wm_awready_mux = |(aa_mi_awtarget_hot & sa_wm_awready);
assign mi_arvalid = aa_mi_artarget_hot & {C_NUM_MASTER_SLOTS+1{aa_mi_arvalid}};
assign aa_mi_arready = |(aa_mi_artarget_hot & mi_arready);
assign M_AXI_ARVALID = mi_arvalid[0+:C_NUM_MASTER_SLOTS]; // Slot C_NUM_MASTER_SLOTS+1 is the error handler
assign mi_arready[0+:C_NUM_MASTER_SLOTS] = M_AXI_ARREADY;
// MI-slot # C_NUM_MASTER_SLOTS is the error handler
if (C_RANGE_CHECK) begin : gen_decerr_slave
axi_crossbar_v2_1_decerr_slave #
(
.C_AXI_ID_WIDTH (C_AXI_ID_WIDTH),
.C_AXI_DATA_WIDTH (C_AXI_DATA_WIDTH),
.C_AXI_RUSER_WIDTH (C_AXI_RUSER_WIDTH),
.C_AXI_BUSER_WIDTH (C_AXI_BUSER_WIDTH),
.C_AXI_PROTOCOL (C_AXI_PROTOCOL),
.C_RESP (P_DECERR)
)
decerr_slave_inst
(
.S_AXI_ACLK (ACLK),
.S_AXI_ARESET (reset),
.S_AXI_AWID (aa_mi_awmesg[0+:C_AXI_ID_WIDTH]),
.S_AXI_AWVALID (mi_awvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_AWREADY (mi_awready[C_NUM_MASTER_SLOTS]),
.S_AXI_WLAST (mi_wlast[C_NUM_MASTER_SLOTS]),
.S_AXI_WVALID (mi_wvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_WREADY (mi_wready[C_NUM_MASTER_SLOTS]),
.S_AXI_BID (mi_bid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_AXI_BRESP (mi_bresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_BUSER (mi_buser[C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH]),
.S_AXI_BVALID (mi_bvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_BREADY (mi_bready[C_NUM_MASTER_SLOTS]),
.S_AXI_ARID (aa_mi_armesg[0+:C_AXI_ID_WIDTH]),
.S_AXI_ARLEN (aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +:8]),
.S_AXI_ARVALID (mi_arvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_ARREADY (mi_arready[C_NUM_MASTER_SLOTS]),
.S_AXI_RID (mi_rid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_AXI_RDATA (mi_rdata[C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.S_AXI_RRESP (mi_rresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_RUSER (mi_ruser[C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH]),
.S_AXI_RLAST (mi_rlast[C_NUM_MASTER_SLOTS]),
.S_AXI_RVALID (mi_rvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_RREADY (mi_rready[C_NUM_MASTER_SLOTS])
);
end else begin : gen_no_decerr_slave
assign mi_awready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_wready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_arready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_awready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_awready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_bid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign mi_bresp[C_NUM_MASTER_SLOTS*2+:2] = 0;
assign mi_buser[C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] = 0;
assign mi_bvalid[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_rid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign mi_rdata[C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = 0;
assign mi_rresp[C_NUM_MASTER_SLOTS*2+:2] = 0;
assign mi_ruser[C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] = 0;
assign mi_rlast[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_rvalid[C_NUM_MASTER_SLOTS] = 1'b0;
end // gen_decerr_slave
endgenerate
endmodule
|
module axi_crossbar_v2_1_crossbar #
(
parameter C_FAMILY = "none",
parameter integer C_NUM_SLAVE_SLOTS = 1,
parameter integer C_NUM_MASTER_SLOTS = 1,
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_AXI_ID_WIDTH = 1,
parameter integer C_AXI_ADDR_WIDTH = 32,
parameter integer C_AXI_DATA_WIDTH = 32,
parameter integer C_AXI_PROTOCOL = 0,
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_BASE_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_HIGH_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_BASE_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_HIGH_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_THREAD_ID_WIDTH = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter integer C_AXI_SUPPORTS_USER_SIGNALS = 0,
parameter integer C_AXI_AWUSER_WIDTH = 1,
parameter integer C_AXI_ARUSER_WIDTH = 1,
parameter integer C_AXI_WUSER_WIDTH = 1,
parameter integer C_AXI_RUSER_WIDTH = 1,
parameter integer C_AXI_BUSER_WIDTH = 1,
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_WRITE = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_READ = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_WRITE = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_READ = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_WRITE_CONNECTIVITY = {C_NUM_MASTER_SLOTS*32{1'b1}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_READ_CONNECTIVITY = {C_NUM_MASTER_SLOTS*32{1'b1}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_SINGLE_THREAD = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_WRITE_ACCEPTANCE = {C_NUM_SLAVE_SLOTS{32'h00000001}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_READ_ACCEPTANCE = {C_NUM_SLAVE_SLOTS{32'h00000001}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_WRITE_ISSUING = {C_NUM_MASTER_SLOTS{32'h00000001}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_READ_ISSUING = {C_NUM_MASTER_SLOTS{32'h00000001}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_ARB_PRIORITY = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_SECURE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_ERR_MODE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE = 0,
parameter [(C_NUM_MASTER_SLOTS+1)*32-1:0] C_W_ISSUE_WIDTH = {C_NUM_MASTER_SLOTS+1{32'h00000000}},
parameter [(C_NUM_MASTER_SLOTS+1)*32-1:0] C_R_ISSUE_WIDTH = {C_NUM_MASTER_SLOTS+1{32'h00000000}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_W_ACCEPT_WIDTH = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_R_ACCEPT_WIDTH = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESETN,
// Slave Interface Write Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_AWID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_AWADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_AWLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_AWUSER_WIDTH-1:0] S_AXI_AWUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWREADY,
// Slave Interface Write Data Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_WID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_WDATA,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH/8-1:0] S_AXI_WSTRB,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WLAST,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_WUSER_WIDTH-1:0] S_AXI_WUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WREADY,
// Slave Interface Write Response Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_BID,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_BRESP,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_BUSER_WIDTH-1:0] S_AXI_BUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BREADY,
// Slave Interface Read Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_ARID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_ARADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_ARLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ARUSER_WIDTH-1:0] S_AXI_ARUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARREADY,
// Slave Interface Read Data Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_RID,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_RDATA,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_RRESP,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RLAST,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_RUSER_WIDTH-1:0] S_AXI_RUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RREADY,
// Master Interface Write Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_AWID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_AWADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_AWLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_AWUSER_WIDTH-1:0] M_AXI_AWUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWREADY,
// Master Interface Write Data Ports
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_WID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_WDATA,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH/8-1:0] M_AXI_WSTRB,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WLAST,
output wire [C_NUM_MASTER_SLOTS*C_AXI_WUSER_WIDTH-1:0] M_AXI_WUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WREADY,
// Master Interface Write Response Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_BID,
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_BRESP,
input wire [C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH-1:0] M_AXI_BUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BREADY,
// Master Interface Read Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_ARID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_ARADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_ARLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ARUSER_WIDTH-1:0] M_AXI_ARUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARREADY,
// Master Interface Read Data Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_RID,
input wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_RDATA,
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_RRESP,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RLAST,
input wire [C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH-1:0] M_AXI_RUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RREADY
);
localparam integer P_AXI4 = 0;
localparam integer P_AXI3 = 1;
localparam integer P_AXILITE = 2;
localparam integer P_WRITE = 0;
localparam integer P_READ = 1;
localparam integer P_NUM_MASTER_SLOTS_LOG = f_ceil_log2(C_NUM_MASTER_SLOTS);
localparam integer P_NUM_SLAVE_SLOTS_LOG = f_ceil_log2((C_NUM_SLAVE_SLOTS>1) ? C_NUM_SLAVE_SLOTS : 2);
localparam integer P_AXI_WID_WIDTH = (C_AXI_PROTOCOL == P_AXI3) ? C_AXI_ID_WIDTH : 1;
localparam integer P_ST_AWMESG_WIDTH = 2+4+4 + C_AXI_AWUSER_WIDTH;
localparam integer P_AA_AWMESG_WIDTH = C_AXI_ID_WIDTH + C_AXI_ADDR_WIDTH + 8+3+2+3+4 + P_ST_AWMESG_WIDTH;
localparam integer P_ST_ARMESG_WIDTH = 2+4+4 + C_AXI_ARUSER_WIDTH;
localparam integer P_AA_ARMESG_WIDTH = C_AXI_ID_WIDTH + C_AXI_ADDR_WIDTH + 8+3+2+3+4 + P_ST_ARMESG_WIDTH;
localparam integer P_ST_BMESG_WIDTH = 2 + C_AXI_BUSER_WIDTH;
localparam integer P_ST_RMESG_WIDTH = 2 + C_AXI_RUSER_WIDTH + C_AXI_DATA_WIDTH;
localparam integer P_WR_WMESG_WIDTH = C_AXI_DATA_WIDTH + C_AXI_DATA_WIDTH/8 + C_AXI_WUSER_WIDTH + P_AXI_WID_WIDTH;
localparam [31:0] P_BYPASS = 32'h00000000;
localparam [31:0] P_FWD_REV = 32'h00000001;
localparam [31:0] P_SIMPLE = 32'h00000007;
localparam [(C_NUM_MASTER_SLOTS+1)-1:0] P_M_AXI_SUPPORTS_READ = {1'b1, C_M_AXI_SUPPORTS_READ[0+:C_NUM_MASTER_SLOTS]};
localparam [(C_NUM_MASTER_SLOTS+1)-1:0] P_M_AXI_SUPPORTS_WRITE = {1'b1, C_M_AXI_SUPPORTS_WRITE[0+:C_NUM_MASTER_SLOTS]};
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_WRITE_CONNECTIVITY = {{32{1'b1}}, C_M_AXI_WRITE_CONNECTIVITY[0+:C_NUM_MASTER_SLOTS*32]};
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_READ_CONNECTIVITY = {{32{1'b1}}, C_M_AXI_READ_CONNECTIVITY[0+:C_NUM_MASTER_SLOTS*32]};
localparam [C_NUM_SLAVE_SLOTS*32-1:0] P_S_AXI_WRITE_CONNECTIVITY = f_si_write_connectivity(0);
localparam [C_NUM_SLAVE_SLOTS*32-1:0] P_S_AXI_READ_CONNECTIVITY = f_si_read_connectivity(0);
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_READ_ISSUING = {32'h00000001, C_M_AXI_READ_ISSUING[0+:C_NUM_MASTER_SLOTS*32]};
localparam [(C_NUM_MASTER_SLOTS+1)*32-1:0] P_M_AXI_WRITE_ISSUING = {32'h00000001, C_M_AXI_WRITE_ISSUING[0+:C_NUM_MASTER_SLOTS*32]};
localparam P_DECERR = 2'b11;
//---------------------------------------------------------------------------
// Functions
//---------------------------------------------------------------------------
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// Isolate thread bits of input S_ID and add to BASE_ID (RNG00) to form MI-side ID value
// only for end-point SI-slots
function [C_AXI_ID_WIDTH-1:0] f_extend_ID
(
input [C_AXI_ID_WIDTH-1:0] s_id,
input integer slot
);
begin
f_extend_ID = C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] | (s_id & (C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] ^ C_S_AXI_HIGH_ID[slot*64+:C_AXI_ID_WIDTH]));
end
endfunction
// Write connectivity array transposed
function [C_NUM_SLAVE_SLOTS*32-1:0] f_si_write_connectivity
(
input integer null_arg
);
integer si_slot;
integer mi_slot;
reg [C_NUM_SLAVE_SLOTS*32-1:0] result;
begin
result = {C_NUM_SLAVE_SLOTS*32{1'b1}};
for (si_slot=0; si_slot<C_NUM_SLAVE_SLOTS; si_slot=si_slot+1) begin
for (mi_slot=0; mi_slot<C_NUM_MASTER_SLOTS; mi_slot=mi_slot+1) begin
result[si_slot*32+mi_slot] = C_M_AXI_WRITE_CONNECTIVITY[mi_slot*32+si_slot];
end
end
f_si_write_connectivity = result;
end
endfunction
// Read connectivity array transposed
function [C_NUM_SLAVE_SLOTS*32-1:0] f_si_read_connectivity
(
input integer null_arg
);
integer si_slot;
integer mi_slot;
reg [C_NUM_SLAVE_SLOTS*32-1:0] result;
begin
result = {C_NUM_SLAVE_SLOTS*32{1'b1}};
for (si_slot=0; si_slot<C_NUM_SLAVE_SLOTS; si_slot=si_slot+1) begin
for (mi_slot=0; mi_slot<C_NUM_MASTER_SLOTS; mi_slot=mi_slot+1) begin
result[si_slot*32+mi_slot] = C_M_AXI_READ_CONNECTIVITY[mi_slot*32+si_slot];
end
end
f_si_read_connectivity = result;
end
endfunction
genvar gen_si_slot;
genvar gen_mi_slot;
wire [C_NUM_SLAVE_SLOTS*P_ST_AWMESG_WIDTH-1:0] si_st_awmesg ;
wire [C_NUM_SLAVE_SLOTS*P_ST_AWMESG_WIDTH-1:0] st_tmp_awmesg ;
wire [C_NUM_SLAVE_SLOTS*P_AA_AWMESG_WIDTH-1:0] tmp_aa_awmesg ;
wire [P_AA_AWMESG_WIDTH-1:0] aa_mi_awmesg ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] st_aa_awid ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] st_aa_awaddr ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_awlen ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_awsize ;
wire [C_NUM_SLAVE_SLOTS*2-1:0] st_aa_awlock ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_awprot ;
wire [C_NUM_SLAVE_SLOTS*4-1:0] st_aa_awregion ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_awerror ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_aa_awtarget_hot ;
wire [C_NUM_SLAVE_SLOTS*(P_NUM_MASTER_SLOTS_LOG+1)-1:0] st_aa_awtarget_enc ;
wire [P_NUM_SLAVE_SLOTS_LOG*1-1:0] aa_wm_awgrant_enc ;
wire [(C_NUM_MASTER_SLOTS+1)-1:0] aa_mi_awtarget_hot ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_awvalid_qual ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_ss_awvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_ss_awready ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_wr_awvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_wr_awready ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_aa_awvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] ss_aa_awready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] sa_wm_awvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] sa_wm_awready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_awvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_awready ;
wire aa_sa_awvalid ;
wire aa_sa_awready ;
wire aa_mi_arready ;
wire mi_awvalid_en ;
wire sa_wm_awvalid_en ;
wire sa_wm_awready_mux ;
wire [C_NUM_SLAVE_SLOTS*P_ST_ARMESG_WIDTH-1:0] si_st_armesg ;
wire [C_NUM_SLAVE_SLOTS*P_ST_ARMESG_WIDTH-1:0] st_tmp_armesg ;
wire [C_NUM_SLAVE_SLOTS*P_AA_ARMESG_WIDTH-1:0] tmp_aa_armesg ;
wire [P_AA_ARMESG_WIDTH-1:0] aa_mi_armesg ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] st_aa_arid ;
wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] st_aa_araddr ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_arlen ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_arsize ;
wire [C_NUM_SLAVE_SLOTS*2-1:0] st_aa_arlock ;
wire [C_NUM_SLAVE_SLOTS*3-1:0] st_aa_arprot ;
wire [C_NUM_SLAVE_SLOTS*4-1:0] st_aa_arregion ;
wire [C_NUM_SLAVE_SLOTS*8-1:0] st_aa_arerror ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_aa_artarget_hot ;
wire [C_NUM_SLAVE_SLOTS*(P_NUM_MASTER_SLOTS_LOG+1)-1:0] st_aa_artarget_enc ;
wire [(C_NUM_MASTER_SLOTS+1)-1:0] aa_mi_artarget_hot ;
wire [P_NUM_SLAVE_SLOTS_LOG*1-1:0] aa_mi_argrant_enc ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_arvalid_qual ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_arvalid ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] st_aa_arready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_arvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_arready ;
wire aa_mi_arvalid ;
wire mi_awready_mux ;
wire [C_NUM_SLAVE_SLOTS*P_ST_BMESG_WIDTH-1:0] st_si_bmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*P_ST_BMESG_WIDTH-1:0] st_mr_bmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] st_mr_bid ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] st_mr_bresp ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_BUSER_WIDTH-1:0] st_mr_buser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_bvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_bready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_bready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_bid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_mr_bid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*P_NUM_SLAVE_SLOTS_LOG-1:0] debug_bid_target_i ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] bid_match ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] mi_bid ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] mi_bresp ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_BUSER_WIDTH-1:0] mi_buser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_bvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_bready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] bready_carry ;
wire [C_NUM_SLAVE_SLOTS*P_ST_RMESG_WIDTH-1:0] st_si_rmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*P_ST_RMESG_WIDTH-1:0] st_mr_rmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] st_mr_rid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] st_mr_rdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_RUSER_WIDTH-1:0] st_mr_ruser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_rlast ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] st_mr_rresp ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_rvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] st_mr_rready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_rready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] st_tmp_rid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_mr_rid_target ;
wire [(C_NUM_MASTER_SLOTS+1)*P_NUM_SLAVE_SLOTS_LOG-1:0] debug_rid_target_i ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] rid_match ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] mi_rid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] mi_rdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_RUSER_WIDTH-1:0] mi_ruser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_rlast ;
wire [(C_NUM_MASTER_SLOTS+1)*2-1:0] mi_rresp ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_rvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_rready ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] rready_carry ;
wire [C_NUM_SLAVE_SLOTS*P_WR_WMESG_WIDTH-1:0] si_wr_wmesg ;
wire [C_NUM_SLAVE_SLOTS*P_WR_WMESG_WIDTH-1:0] wr_wm_wmesg ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] wr_wm_wlast ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] wr_tmp_wvalid ;
wire [C_NUM_SLAVE_SLOTS*(C_NUM_MASTER_SLOTS+1)-1:0] wr_tmp_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_wm_wvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_NUM_SLAVE_SLOTS-1:0] tmp_wm_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*P_WR_WMESG_WIDTH-1:0] wm_mr_wmesg ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] wm_mr_wdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH/8-1:0] wm_mr_wstrb ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] wm_mr_wid ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_WUSER_WIDTH-1:0] wm_mr_wuser ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] wm_mr_wlast ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] wm_mr_wvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] wm_mr_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH-1:0] mi_wdata ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_DATA_WIDTH/8-1:0] mi_wstrb ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_WUSER_WIDTH-1:0] mi_wuser ;
wire [(C_NUM_MASTER_SLOTS+1)*C_AXI_ID_WIDTH-1:0] mi_wid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_wlast ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_wvalid ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_wready ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] w_cmd_push ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] w_cmd_pop ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] r_cmd_push ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] r_cmd_pop ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_awmaxissuing ;
wire [(C_NUM_MASTER_SLOTS+1)*1-1:0] mi_armaxissuing ;
reg [(C_NUM_MASTER_SLOTS+1)*8-1:0] w_issuing_cnt ;
reg [(C_NUM_MASTER_SLOTS+1)*8-1:0] r_issuing_cnt ;
reg [8-1:0] debug_aw_trans_seq_i ;
reg [8-1:0] debug_ar_trans_seq_i ;
wire [(C_NUM_MASTER_SLOTS+1)*8-1:0] debug_w_trans_seq_i ;
reg [(C_NUM_MASTER_SLOTS+1)*8-1:0] debug_w_beat_cnt_i ;
reg aresetn_d = 1'b0; // Reset delay register
always @(posedge ACLK) begin
if (~ARESETN) begin
aresetn_d <= 1'b0;
end else begin
aresetn_d <= ARESETN;
end
end
wire reset;
assign reset = ~aresetn_d;
generate
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_slave_slots
if (C_S_AXI_SUPPORTS_READ[gen_si_slot]) begin : gen_si_read
axi_crossbar_v2_1_si_transactor # // "ST": SI Transactor (read channel)
(
.C_FAMILY (C_FAMILY),
.C_SI (gen_si_slot),
.C_DIR (P_READ),
.C_NUM_ADDR_RANGES (C_NUM_ADDR_RANGES),
.C_NUM_M (C_NUM_MASTER_SLOTS),
.C_NUM_M_LOG (P_NUM_MASTER_SLOTS_LOG),
.C_ACCEPTANCE (C_S_AXI_READ_ACCEPTANCE[gen_si_slot*32+:32]),
.C_ACCEPTANCE_LOG (C_R_ACCEPT_WIDTH[gen_si_slot*32+:32]),
.C_ID_WIDTH (C_AXI_ID_WIDTH),
.C_THREAD_ID_WIDTH (C_S_AXI_THREAD_ID_WIDTH[gen_si_slot*32+:32]),
.C_ADDR_WIDTH (C_AXI_ADDR_WIDTH),
.C_AMESG_WIDTH (P_ST_ARMESG_WIDTH),
.C_RMESG_WIDTH (P_ST_RMESG_WIDTH),
.C_BASE_ID (C_S_AXI_BASE_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_HIGH_ID (C_S_AXI_HIGH_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_SINGLE_THREAD (C_S_AXI_SINGLE_THREAD[gen_si_slot*32+:32]),
.C_BASE_ADDR (C_M_AXI_BASE_ADDR),
.C_HIGH_ADDR (C_M_AXI_HIGH_ADDR),
.C_TARGET_QUAL (P_S_AXI_READ_CONNECTIVITY[gen_si_slot*32+:C_NUM_MASTER_SLOTS]),
.C_M_AXI_SECURE (C_M_AXI_SECURE),
.C_RANGE_CHECK (C_RANGE_CHECK),
.C_ADDR_DECODE (C_ADDR_DECODE),
.C_ERR_MODE (C_M_AXI_ERR_MODE),
.C_DEBUG (C_DEBUG)
)
si_transactor_ar
(
.ACLK (ACLK),
.ARESET (reset),
.S_AID (f_extend_ID(S_AXI_ARID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot)),
.S_AADDR (S_AXI_ARADDR[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.S_ALEN (S_AXI_ARLEN[gen_si_slot*8+:8]),
.S_ASIZE (S_AXI_ARSIZE[gen_si_slot*3+:3]),
.S_ABURST (S_AXI_ARBURST[gen_si_slot*2+:2]),
.S_ALOCK (S_AXI_ARLOCK[gen_si_slot*2+:2]),
.S_APROT (S_AXI_ARPROT[gen_si_slot*3+:3]),
// .S_AREGION (S_AXI_ARREGION[gen_si_slot*4+:4]),
.S_AMESG (si_st_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH]),
.S_AVALID (S_AXI_ARVALID[gen_si_slot]),
.S_AREADY (S_AXI_ARREADY[gen_si_slot]),
.M_AID (st_aa_arid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.M_AADDR (st_aa_araddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.M_ALEN (st_aa_arlen[gen_si_slot*8+:8]),
.M_ASIZE (st_aa_arsize[gen_si_slot*3+:3]),
.M_ALOCK (st_aa_arlock[gen_si_slot*2+:2]),
.M_APROT (st_aa_arprot[gen_si_slot*3+:3]),
.M_AREGION (st_aa_arregion[gen_si_slot*4+:4]),
.M_AMESG (st_tmp_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH]),
.M_ATARGET_HOT (st_aa_artarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_ATARGET_ENC (st_aa_artarget_enc[gen_si_slot*(P_NUM_MASTER_SLOTS_LOG+1)+:(P_NUM_MASTER_SLOTS_LOG+1)]),
.M_AERROR (st_aa_arerror[gen_si_slot*8+:8]),
.M_AVALID_QUAL (st_aa_arvalid_qual[gen_si_slot]),
.M_AVALID (st_aa_arvalid[gen_si_slot]),
.M_AREADY (st_aa_arready[gen_si_slot]),
.S_RID (S_AXI_RID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_RMESG (st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+:P_ST_RMESG_WIDTH]),
.S_RLAST (S_AXI_RLAST[gen_si_slot]),
.S_RVALID (S_AXI_RVALID[gen_si_slot]),
.S_RREADY (S_AXI_RREADY[gen_si_slot]),
.M_RID (st_mr_rid),
.M_RLAST (st_mr_rlast),
.M_RMESG (st_mr_rmesg),
.M_RVALID (st_mr_rvalid),
.M_RREADY (st_tmp_rready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_RTARGET (st_tmp_rid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.DEBUG_A_TRANS_SEQ (C_DEBUG ? debug_ar_trans_seq_i : 8'h0)
);
assign si_st_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH] = {
S_AXI_ARUSER[gen_si_slot*C_AXI_ARUSER_WIDTH+:C_AXI_ARUSER_WIDTH],
S_AXI_ARQOS[gen_si_slot*4+:4],
S_AXI_ARCACHE[gen_si_slot*4+:4],
S_AXI_ARBURST[gen_si_slot*2+:2]
};
assign tmp_aa_armesg[gen_si_slot*P_AA_ARMESG_WIDTH+:P_AA_ARMESG_WIDTH] = {
st_tmp_armesg[gen_si_slot*P_ST_ARMESG_WIDTH+:P_ST_ARMESG_WIDTH],
st_aa_arregion[gen_si_slot*4+:4],
st_aa_arprot[gen_si_slot*3+:3],
st_aa_arlock[gen_si_slot*2+:2],
st_aa_arsize[gen_si_slot*3+:3],
st_aa_arlen[gen_si_slot*8+:8],
st_aa_araddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH],
st_aa_arid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]
};
assign S_AXI_RRESP[gen_si_slot*2+:2] = st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+:2];
assign S_AXI_RUSER[gen_si_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] = st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+2 +: C_AXI_RUSER_WIDTH];
assign S_AXI_RDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = st_si_rmesg[gen_si_slot*P_ST_RMESG_WIDTH+2+C_AXI_RUSER_WIDTH +: C_AXI_DATA_WIDTH];
end else begin : gen_no_si_read
assign S_AXI_ARREADY[gen_si_slot] = 1'b0;
assign st_aa_arvalid[gen_si_slot] = 1'b0;
assign st_aa_arvalid_qual[gen_si_slot] = 1'b1;
assign tmp_aa_armesg[gen_si_slot*P_AA_ARMESG_WIDTH+:P_AA_ARMESG_WIDTH] = 0;
assign S_AXI_RID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign S_AXI_RRESP[gen_si_slot*2+:2] = 0;
assign S_AXI_RUSER[gen_si_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] = 0;
assign S_AXI_RDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = 0;
assign S_AXI_RVALID[gen_si_slot] = 1'b0;
assign S_AXI_RLAST[gen_si_slot] = 1'b0;
assign st_tmp_rready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
assign st_aa_artarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
end // gen_si_read
if (C_S_AXI_SUPPORTS_WRITE[gen_si_slot]) begin : gen_si_write
axi_crossbar_v2_1_si_transactor # // "ST": SI Transactor (write channel)
(
.C_FAMILY (C_FAMILY),
.C_SI (gen_si_slot),
.C_DIR (P_WRITE),
.C_NUM_ADDR_RANGES (C_NUM_ADDR_RANGES),
.C_NUM_M (C_NUM_MASTER_SLOTS),
.C_NUM_M_LOG (P_NUM_MASTER_SLOTS_LOG),
.C_ACCEPTANCE (C_S_AXI_WRITE_ACCEPTANCE[gen_si_slot*32+:32]),
.C_ACCEPTANCE_LOG (C_W_ACCEPT_WIDTH[gen_si_slot*32+:32]),
.C_ID_WIDTH (C_AXI_ID_WIDTH),
.C_THREAD_ID_WIDTH (C_S_AXI_THREAD_ID_WIDTH[gen_si_slot*32+:32]),
.C_ADDR_WIDTH (C_AXI_ADDR_WIDTH),
.C_AMESG_WIDTH (P_ST_AWMESG_WIDTH),
.C_RMESG_WIDTH (P_ST_BMESG_WIDTH),
.C_BASE_ID (C_S_AXI_BASE_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_HIGH_ID (C_S_AXI_HIGH_ID[gen_si_slot*64+:C_AXI_ID_WIDTH]),
.C_SINGLE_THREAD (C_S_AXI_SINGLE_THREAD[gen_si_slot*32+:32]),
.C_BASE_ADDR (C_M_AXI_BASE_ADDR),
.C_HIGH_ADDR (C_M_AXI_HIGH_ADDR),
.C_TARGET_QUAL (P_S_AXI_WRITE_CONNECTIVITY[gen_si_slot*32+:C_NUM_MASTER_SLOTS]),
.C_M_AXI_SECURE (C_M_AXI_SECURE),
.C_RANGE_CHECK (C_RANGE_CHECK),
.C_ADDR_DECODE (C_ADDR_DECODE),
.C_ERR_MODE (C_M_AXI_ERR_MODE),
.C_DEBUG (C_DEBUG)
)
si_transactor_aw
(
.ACLK (ACLK),
.ARESET (reset),
.S_AID (f_extend_ID(S_AXI_AWID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot)),
.S_AADDR (S_AXI_AWADDR[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.S_ALEN (S_AXI_AWLEN[gen_si_slot*8+:8]),
.S_ASIZE (S_AXI_AWSIZE[gen_si_slot*3+:3]),
.S_ABURST (S_AXI_AWBURST[gen_si_slot*2+:2]),
.S_ALOCK (S_AXI_AWLOCK[gen_si_slot*2+:2]),
.S_APROT (S_AXI_AWPROT[gen_si_slot*3+:3]),
// .S_AREGION (S_AXI_AWREGION[gen_si_slot*4+:4]),
.S_AMESG (si_st_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH]),
.S_AVALID (S_AXI_AWVALID[gen_si_slot]),
.S_AREADY (S_AXI_AWREADY[gen_si_slot]),
.M_AID (st_aa_awid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.M_AADDR (st_aa_awaddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH]),
.M_ALEN (st_aa_awlen[gen_si_slot*8+:8]),
.M_ASIZE (st_aa_awsize[gen_si_slot*3+:3]),
.M_ALOCK (st_aa_awlock[gen_si_slot*2+:2]),
.M_APROT (st_aa_awprot[gen_si_slot*3+:3]),
.M_AREGION (st_aa_awregion[gen_si_slot*4+:4]),
.M_AMESG (st_tmp_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH]),
.M_ATARGET_HOT (st_aa_awtarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_ATARGET_ENC (st_aa_awtarget_enc[gen_si_slot*(P_NUM_MASTER_SLOTS_LOG+1)+:(P_NUM_MASTER_SLOTS_LOG+1)]),
.M_AERROR (st_aa_awerror[gen_si_slot*8+:8]),
.M_AVALID_QUAL (st_aa_awvalid_qual[gen_si_slot]),
.M_AVALID (st_ss_awvalid[gen_si_slot]),
.M_AREADY (st_ss_awready[gen_si_slot]),
.S_RID (S_AXI_BID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_RMESG (st_si_bmesg[gen_si_slot*P_ST_BMESG_WIDTH+:P_ST_BMESG_WIDTH]),
.S_RLAST (),
.S_RVALID (S_AXI_BVALID[gen_si_slot]),
.S_RREADY (S_AXI_BREADY[gen_si_slot]),
.M_RID (st_mr_bid),
.M_RLAST ({(C_NUM_MASTER_SLOTS+1){1'b1}}),
.M_RMESG (st_mr_bmesg),
.M_RVALID (st_mr_bvalid),
.M_RREADY (st_tmp_bready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_RTARGET (st_tmp_bid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.DEBUG_A_TRANS_SEQ (C_DEBUG ? debug_aw_trans_seq_i : 8'h0)
);
// Note: Concatenation of mesg signals is from MSB to LSB; assignments that chop mesg signals appear in opposite order.
assign si_st_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH] = {
S_AXI_AWUSER[gen_si_slot*C_AXI_AWUSER_WIDTH+:C_AXI_AWUSER_WIDTH],
S_AXI_AWQOS[gen_si_slot*4+:4],
S_AXI_AWCACHE[gen_si_slot*4+:4],
S_AXI_AWBURST[gen_si_slot*2+:2]
};
assign tmp_aa_awmesg[gen_si_slot*P_AA_AWMESG_WIDTH+:P_AA_AWMESG_WIDTH] = {
st_tmp_awmesg[gen_si_slot*P_ST_AWMESG_WIDTH+:P_ST_AWMESG_WIDTH],
st_aa_awregion[gen_si_slot*4+:4],
st_aa_awprot[gen_si_slot*3+:3],
st_aa_awlock[gen_si_slot*2+:2],
st_aa_awsize[gen_si_slot*3+:3],
st_aa_awlen[gen_si_slot*8+:8],
st_aa_awaddr[gen_si_slot*C_AXI_ADDR_WIDTH+:C_AXI_ADDR_WIDTH],
st_aa_awid[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]
};
assign S_AXI_BRESP[gen_si_slot*2+:2] = st_si_bmesg[gen_si_slot*P_ST_BMESG_WIDTH+:2];
assign S_AXI_BUSER[gen_si_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] = st_si_bmesg[gen_si_slot*P_ST_BMESG_WIDTH+2 +: C_AXI_BUSER_WIDTH];
// AW SI-transactor transfer completes upon completion of both W-router address acceptance (command push) and AW arbitration
axi_crossbar_v2_1_splitter # // "SS": Splitter from SI-Transactor (write channel)
(
.C_NUM_M (2)
)
splitter_aw_si
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (st_ss_awvalid[gen_si_slot]),
.S_READY (st_ss_awready[gen_si_slot]),
.M_VALID ({ss_wr_awvalid[gen_si_slot], ss_aa_awvalid[gen_si_slot]}),
.M_READY ({ss_wr_awready[gen_si_slot], ss_aa_awready[gen_si_slot]})
);
axi_crossbar_v2_1_wdata_router # // "WR": Write data Router
(
.C_FAMILY (C_FAMILY),
.C_NUM_MASTER_SLOTS (C_NUM_MASTER_SLOTS+1),
.C_SELECT_WIDTH (P_NUM_MASTER_SLOTS_LOG+1),
.C_WMESG_WIDTH (P_WR_WMESG_WIDTH),
.C_FIFO_DEPTH_LOG (C_W_ACCEPT_WIDTH[gen_si_slot*32+:6])
)
wdata_router_w
(
.ACLK (ACLK),
.ARESET (reset),
// Write transfer input from the current SI-slot
.S_WMESG (si_wr_wmesg[gen_si_slot*P_WR_WMESG_WIDTH+:P_WR_WMESG_WIDTH]),
.S_WLAST (S_AXI_WLAST[gen_si_slot]),
.S_WVALID (S_AXI_WVALID[gen_si_slot]),
.S_WREADY (S_AXI_WREADY[gen_si_slot]),
// Vector of write transfer outputs to each MI-slot's W-mux
.M_WMESG (wr_wm_wmesg[gen_si_slot*(P_WR_WMESG_WIDTH)+:P_WR_WMESG_WIDTH]),
.M_WLAST (wr_wm_wlast[gen_si_slot]),
.M_WVALID (wr_tmp_wvalid[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
.M_WREADY (wr_tmp_wready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)]),
// AW command push from local SI-slot
.S_ASELECT (st_aa_awtarget_enc[gen_si_slot*(P_NUM_MASTER_SLOTS_LOG+1)+:(P_NUM_MASTER_SLOTS_LOG+1)]), // Target MI-slot
.S_AVALID (ss_wr_awvalid[gen_si_slot]),
.S_AREADY (ss_wr_awready[gen_si_slot])
);
assign si_wr_wmesg[gen_si_slot*P_WR_WMESG_WIDTH+:P_WR_WMESG_WIDTH] = {
((C_AXI_PROTOCOL == P_AXI3) ? f_extend_ID(S_AXI_WID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot) : 1'b0),
S_AXI_WUSER[gen_si_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH],
S_AXI_WSTRB[gen_si_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8],
S_AXI_WDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]
};
end else begin : gen_no_si_write
assign S_AXI_AWREADY[gen_si_slot] = 1'b0;
assign ss_aa_awvalid[gen_si_slot] = 1'b0;
assign st_aa_awvalid_qual[gen_si_slot] = 1'b1;
assign tmp_aa_awmesg[gen_si_slot*P_AA_AWMESG_WIDTH+:P_AA_AWMESG_WIDTH] = 0;
assign S_AXI_BID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign S_AXI_BRESP[gen_si_slot*2+:2] = 0;
assign S_AXI_BUSER[gen_si_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] = 0;
assign S_AXI_BVALID[gen_si_slot] = 1'b0;
assign st_tmp_bready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
assign S_AXI_WREADY[gen_si_slot] = 1'b0;
assign wr_wm_wmesg[gen_si_slot*(P_WR_WMESG_WIDTH)+:P_WR_WMESG_WIDTH] = 0;
assign wr_wm_wlast[gen_si_slot] = 1'b0;
assign wr_tmp_wvalid[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
assign st_aa_awtarget_hot[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+:(C_NUM_MASTER_SLOTS+1)] = 0;
end // gen_si_write
end // gen_slave_slots
for (gen_mi_slot=0; gen_mi_slot<C_NUM_MASTER_SLOTS+1; gen_mi_slot=gen_mi_slot+1) begin : gen_master_slots
if (P_M_AXI_SUPPORTS_READ[gen_mi_slot]) begin : gen_mi_read
if (C_NUM_SLAVE_SLOTS>1) begin : gen_rid_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_SLAVE_SLOTS),
.C_NUM_TARGETS_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_NUM_RANGES (1),
.C_ADDR_WIDTH (C_AXI_ID_WIDTH),
.C_TARGET_ENC (C_DEBUG),
.C_TARGET_HOT (1),
.C_REGION_ENC (0),
.C_BASE_ADDR (C_S_AXI_BASE_ID),
.C_HIGH_ADDR (C_S_AXI_HIGH_ID),
.C_TARGET_QUAL (P_M_AXI_READ_CONNECTIVITY[gen_mi_slot*32+:C_NUM_SLAVE_SLOTS]),
.C_RESOLUTION (0)
)
rid_decoder_inst
(
.ADDR (st_mr_rid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.TARGET_HOT (tmp_mr_rid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
.TARGET_ENC (debug_rid_target_i[gen_mi_slot*P_NUM_SLAVE_SLOTS_LOG+:P_NUM_SLAVE_SLOTS_LOG]),
.MATCH (rid_match[gen_mi_slot]),
.REGION ()
);
end else begin : gen_no_rid_decoder
assign tmp_mr_rid_target[gen_mi_slot] = 1'b1; // All response transfers route to solo SI-slot.
assign rid_match[gen_mi_slot] = 1'b1;
end
assign st_mr_rmesg[gen_mi_slot*P_ST_RMESG_WIDTH+:P_ST_RMESG_WIDTH] = {
st_mr_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH],
st_mr_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH],
st_mr_rresp[gen_mi_slot*2+:2]
};
end else begin : gen_no_mi_read
assign tmp_mr_rid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS] = 0;
assign rid_match[gen_mi_slot] = 1'b0;
assign st_mr_rmesg[gen_mi_slot*P_ST_RMESG_WIDTH+:P_ST_RMESG_WIDTH] = 0;
end // gen_mi_read
if (P_M_AXI_SUPPORTS_WRITE[gen_mi_slot]) begin : gen_mi_write
if (C_NUM_SLAVE_SLOTS>1) begin : gen_bid_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_SLAVE_SLOTS),
.C_NUM_TARGETS_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_NUM_RANGES (1),
.C_ADDR_WIDTH (C_AXI_ID_WIDTH),
.C_TARGET_ENC (C_DEBUG),
.C_TARGET_HOT (1),
.C_REGION_ENC (0),
.C_BASE_ADDR (C_S_AXI_BASE_ID),
.C_HIGH_ADDR (C_S_AXI_HIGH_ID),
.C_TARGET_QUAL (P_M_AXI_WRITE_CONNECTIVITY[gen_mi_slot*32+:C_NUM_SLAVE_SLOTS]),
.C_RESOLUTION (0)
)
bid_decoder_inst
(
.ADDR (st_mr_bid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.TARGET_HOT (tmp_mr_bid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
.TARGET_ENC (debug_bid_target_i[gen_mi_slot*P_NUM_SLAVE_SLOTS_LOG+:P_NUM_SLAVE_SLOTS_LOG]),
.MATCH (bid_match[gen_mi_slot]),
.REGION ()
);
end else begin : gen_no_bid_decoder
assign tmp_mr_bid_target[gen_mi_slot] = 1'b1; // All response transfers route to solo SI-slot.
assign bid_match[gen_mi_slot] = 1'b1;
end
axi_crossbar_v2_1_wdata_mux # // "WM": Write data Mux, per MI-slot (incl error-handler)
(
.C_FAMILY (C_FAMILY),
.C_NUM_SLAVE_SLOTS (C_NUM_SLAVE_SLOTS),
.C_SELECT_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_WMESG_WIDTH (P_WR_WMESG_WIDTH),
.C_FIFO_DEPTH_LOG (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6])
)
wdata_mux_w
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of write transfer inputs from each SI-slot's W-router
.S_WMESG (wr_wm_wmesg),
.S_WLAST (wr_wm_wlast),
.S_WVALID (tmp_wm_wvalid[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
.S_WREADY (tmp_wm_wready[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS]),
// Write transfer output to the current MI-slot
.M_WMESG (wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+:P_WR_WMESG_WIDTH]),
.M_WLAST (wm_mr_wlast[gen_mi_slot]),
.M_WVALID (wm_mr_wvalid[gen_mi_slot]),
.M_WREADY (wm_mr_wready[gen_mi_slot]),
// AW command push from AW arbiter output
.S_ASELECT (aa_wm_awgrant_enc), // SI-slot selected by arbiter
.S_AVALID (sa_wm_awvalid[gen_mi_slot]),
.S_AREADY (sa_wm_awready[gen_mi_slot])
);
if (C_DEBUG) begin : gen_debug_w
// DEBUG WRITE BEAT COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_w_beat_cnt_i[gen_mi_slot*8+:8] <= 0;
end else begin
if (mi_wvalid[gen_mi_slot] & mi_wready[gen_mi_slot]) begin
if (mi_wlast[gen_mi_slot]) begin
debug_w_beat_cnt_i[gen_mi_slot*8+:8] <= 0;
end else begin
debug_w_beat_cnt_i[gen_mi_slot*8+:8] <= debug_w_beat_cnt_i[gen_mi_slot*8+:8] + 1;
end
end
end
end // clocked process
// DEBUG W-CHANNEL TRANSACTION SEQUENCE QUEUE
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]),
.C_USE_FULL (0)
)
debug_w_seq_fifo
(
.ACLK (ACLK),
.ARESET (reset),
.S_MESG (debug_aw_trans_seq_i),
.S_VALID (sa_wm_awvalid[gen_mi_slot]),
.S_READY (),
.M_MESG (debug_w_trans_seq_i[gen_mi_slot*8+:8]),
.M_VALID (),
.M_READY (mi_wvalid[gen_mi_slot] & mi_wready[gen_mi_slot] & mi_wlast[gen_mi_slot])
);
end // gen_debug_w
assign wm_mr_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH +: C_AXI_DATA_WIDTH];
assign wm_mr_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+C_AXI_DATA_WIDTH +: C_AXI_DATA_WIDTH/8];
assign wm_mr_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+C_AXI_DATA_WIDTH+C_AXI_DATA_WIDTH/8 +: C_AXI_WUSER_WIDTH];
assign wm_mr_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = wm_mr_wmesg[gen_mi_slot*P_WR_WMESG_WIDTH+C_AXI_DATA_WIDTH+(C_AXI_DATA_WIDTH/8)+C_AXI_WUSER_WIDTH +: P_AXI_WID_WIDTH];
assign st_mr_bmesg[gen_mi_slot*P_ST_BMESG_WIDTH+:P_ST_BMESG_WIDTH] = {
st_mr_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH],
st_mr_bresp[gen_mi_slot*2+:2]
};
end else begin : gen_no_mi_write
assign tmp_mr_bid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS] = 0;
assign bid_match[gen_mi_slot] = 1'b0;
assign wm_mr_wvalid[gen_mi_slot] = 0;
assign wm_mr_wlast[gen_mi_slot] = 0;
assign wm_mr_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = 0;
assign wm_mr_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8] = 0;
assign wm_mr_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH] = 0;
assign wm_mr_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign st_mr_bmesg[gen_mi_slot*P_ST_BMESG_WIDTH+:P_ST_BMESG_WIDTH] = 0;
assign tmp_wm_wready[gen_mi_slot*C_NUM_SLAVE_SLOTS+:C_NUM_SLAVE_SLOTS] = 0;
assign sa_wm_awready[gen_mi_slot] = 0;
end // gen_mi_write
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_trans_si
// Transpose handshakes from W-router (SxM) to W-mux (MxS).
assign tmp_wm_wvalid[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot] = wr_tmp_wvalid[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot];
assign wr_tmp_wready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = tmp_wm_wready[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot];
// Transpose response enables from ID decoders (MxS) to si_transactors (SxM).
assign st_tmp_bid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = tmp_mr_bid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot];
assign st_tmp_rid_target[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = tmp_mr_rid_target[gen_mi_slot*C_NUM_SLAVE_SLOTS+gen_si_slot];
end // gen_trans_si
assign bready_carry[gen_mi_slot] = st_tmp_bready[gen_mi_slot];
assign rready_carry[gen_mi_slot] = st_tmp_rready[gen_mi_slot];
for (gen_si_slot=1; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_resp_carry_si
assign bready_carry[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = // Generate M_BREADY if ...
bready_carry[(gen_si_slot-1)*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] | // For any SI-slot (OR carry-chain across all SI-slots), ...
st_tmp_bready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot]; // The write SI transactor indicates BREADY for that MI-slot.
assign rready_carry[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] = // Generate M_RREADY if ...
rready_carry[(gen_si_slot-1)*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot] | // For any SI-slot (OR carry-chain across all SI-slots), ...
st_tmp_rready[gen_si_slot*(C_NUM_MASTER_SLOTS+1)+gen_mi_slot]; // The write SI transactor indicates RREADY for that MI-slot.
end // gen_resp_carry_si
assign w_cmd_push[gen_mi_slot] = mi_awvalid[gen_mi_slot] && mi_awready[gen_mi_slot] && P_M_AXI_SUPPORTS_WRITE[gen_mi_slot];
assign r_cmd_push[gen_mi_slot] = mi_arvalid[gen_mi_slot] && mi_arready[gen_mi_slot] && P_M_AXI_SUPPORTS_READ[gen_mi_slot];
assign w_cmd_pop[gen_mi_slot] = st_mr_bvalid[gen_mi_slot] && st_mr_bready[gen_mi_slot] && P_M_AXI_SUPPORTS_WRITE[gen_mi_slot];
assign r_cmd_pop[gen_mi_slot] = st_mr_rvalid[gen_mi_slot] && st_mr_rready[gen_mi_slot] && st_mr_rlast[gen_mi_slot] && P_M_AXI_SUPPORTS_READ[gen_mi_slot];
// Disqualify arbitration of SI-slot if targeted MI-slot has reached its issuing limit.
assign mi_awmaxissuing[gen_mi_slot] = (w_issuing_cnt[gen_mi_slot*8 +: (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] ==
P_M_AXI_WRITE_ISSUING[gen_mi_slot*32 +: (C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)]) & ~w_cmd_pop[gen_mi_slot];
assign mi_armaxissuing[gen_mi_slot] = (r_issuing_cnt[gen_mi_slot*8 +: (C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] ==
P_M_AXI_READ_ISSUING[gen_mi_slot*32 +: (C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)]) & ~r_cmd_pop[gen_mi_slot];
always @(posedge ACLK) begin
if (reset) begin
w_issuing_cnt[gen_mi_slot*8+:8] <= 0; // Some high-order bits remain constant 0
r_issuing_cnt[gen_mi_slot*8+:8] <= 0; // Some high-order bits remain constant 0
end else begin
if (w_cmd_push[gen_mi_slot] && ~w_cmd_pop[gen_mi_slot]) begin
w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] + 1;
end else if (w_cmd_pop[gen_mi_slot] && ~w_cmd_push[gen_mi_slot] && (|w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)])) begin
w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= w_issuing_cnt[gen_mi_slot*8+:(C_W_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] - 1;
end
if (r_cmd_push[gen_mi_slot] && ~r_cmd_pop[gen_mi_slot]) begin
r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] + 1;
end else if (r_cmd_pop[gen_mi_slot] && ~r_cmd_push[gen_mi_slot] && (|r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)])) begin
r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] <= r_issuing_cnt[gen_mi_slot*8+:(C_R_ISSUE_WIDTH[gen_mi_slot*32+:6]+1)] - 1;
end
end
end // Clocked process
// Reg-slice must break combinatorial path from M_BID and M_RID inputs to M_BREADY and M_RREADY outputs.
// (See m_rready_i and m_resp_en combinatorial assignments in si_transactor.)
// Reg-slice incurs +1 latency, but no bubble-cycles.
axi_register_slice_v2_1_axi_register_slice # // "MR": MI-side R/B-channel Reg-slice, per MI-slot (pass-through if only 1 SI-slot configured)
(
.C_FAMILY (C_FAMILY),
.C_AXI_PROTOCOL ((C_AXI_PROTOCOL == P_AXI3) ? P_AXI3 : P_AXI4),
.C_AXI_ID_WIDTH (C_AXI_ID_WIDTH),
.C_AXI_ADDR_WIDTH (1),
.C_AXI_DATA_WIDTH (C_AXI_DATA_WIDTH),
.C_AXI_SUPPORTS_USER_SIGNALS (C_AXI_SUPPORTS_USER_SIGNALS),
.C_AXI_AWUSER_WIDTH (1),
.C_AXI_ARUSER_WIDTH (1),
.C_AXI_WUSER_WIDTH (C_AXI_WUSER_WIDTH),
.C_AXI_RUSER_WIDTH (C_AXI_RUSER_WIDTH),
.C_AXI_BUSER_WIDTH (C_AXI_BUSER_WIDTH),
.C_REG_CONFIG_AW (P_BYPASS),
.C_REG_CONFIG_AR (P_BYPASS),
.C_REG_CONFIG_W (P_BYPASS),
.C_REG_CONFIG_R (P_M_AXI_SUPPORTS_READ[gen_mi_slot] ? P_FWD_REV : P_BYPASS),
.C_REG_CONFIG_B (P_M_AXI_SUPPORTS_WRITE[gen_mi_slot] ? P_SIMPLE : P_BYPASS)
)
reg_slice_mi
(
.aresetn (ARESETN),
.aclk (ACLK),
.s_axi_awid ({C_AXI_ID_WIDTH{1'b0}}),
.s_axi_awaddr ({1{1'b0}}),
.s_axi_awlen ({((C_AXI_PROTOCOL == P_AXI3) ? 4 : 8){1'b0}}),
.s_axi_awsize ({3{1'b0}}),
.s_axi_awburst ({2{1'b0}}),
.s_axi_awlock ({((C_AXI_PROTOCOL == P_AXI3) ? 2 : 1){1'b0}}),
.s_axi_awcache ({4{1'b0}}),
.s_axi_awprot ({3{1'b0}}),
.s_axi_awregion ({4{1'b0}}),
.s_axi_awqos ({4{1'b0}}),
.s_axi_awuser ({1{1'b0}}),
.s_axi_awvalid ({1{1'b0}}),
.s_axi_awready (),
.s_axi_wid (wm_mr_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.s_axi_wdata (wm_mr_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.s_axi_wstrb (wm_mr_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8]),
.s_axi_wlast (wm_mr_wlast[gen_mi_slot]),
.s_axi_wuser (wm_mr_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH]),
.s_axi_wvalid (wm_mr_wvalid[gen_mi_slot]),
.s_axi_wready (wm_mr_wready[gen_mi_slot]),
.s_axi_bid (st_mr_bid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.s_axi_bresp (st_mr_bresp[gen_mi_slot*2+:2] ),
.s_axi_buser (st_mr_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] ),
.s_axi_bvalid (st_mr_bvalid[gen_mi_slot*1+:1] ),
.s_axi_bready (st_mr_bready[gen_mi_slot*1+:1] ),
.s_axi_arid ({C_AXI_ID_WIDTH{1'b0}}),
.s_axi_araddr ({1{1'b0}}),
.s_axi_arlen ({((C_AXI_PROTOCOL == P_AXI3) ? 4 : 8){1'b0}}),
.s_axi_arsize ({3{1'b0}}),
.s_axi_arburst ({2{1'b0}}),
.s_axi_arlock ({((C_AXI_PROTOCOL == P_AXI3) ? 2 : 1){1'b0}}),
.s_axi_arcache ({4{1'b0}}),
.s_axi_arprot ({3{1'b0}}),
.s_axi_arregion ({4{1'b0}}),
.s_axi_arqos ({4{1'b0}}),
.s_axi_aruser ({1{1'b0}}),
.s_axi_arvalid ({1{1'b0}}),
.s_axi_arready (),
.s_axi_rid (st_mr_rid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.s_axi_rdata (st_mr_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] ),
.s_axi_rresp (st_mr_rresp[gen_mi_slot*2+:2] ),
.s_axi_rlast (st_mr_rlast[gen_mi_slot*1+:1] ),
.s_axi_ruser (st_mr_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] ),
.s_axi_rvalid (st_mr_rvalid[gen_mi_slot*1+:1] ),
.s_axi_rready (st_mr_rready[gen_mi_slot*1+:1] ),
.m_axi_awid (),
.m_axi_awaddr (),
.m_axi_awlen (),
.m_axi_awsize (),
.m_axi_awburst (),
.m_axi_awlock (),
.m_axi_awcache (),
.m_axi_awprot (),
.m_axi_awregion (),
.m_axi_awqos (),
.m_axi_awuser (),
.m_axi_awvalid (),
.m_axi_awready ({1{1'b0}}),
.m_axi_wid (mi_wid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.m_axi_wdata (mi_wdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.m_axi_wstrb (mi_wstrb[gen_mi_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8]),
.m_axi_wlast (mi_wlast[gen_mi_slot]),
.m_axi_wuser (mi_wuser[gen_mi_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH]),
.m_axi_wvalid (mi_wvalid[gen_mi_slot]),
.m_axi_wready (mi_wready[gen_mi_slot]),
.m_axi_bid (mi_bid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.m_axi_bresp (mi_bresp[gen_mi_slot*2+:2] ),
.m_axi_buser (mi_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] ),
.m_axi_bvalid (mi_bvalid[gen_mi_slot*1+:1] ),
.m_axi_bready (mi_bready[gen_mi_slot*1+:1] ),
.m_axi_arid (),
.m_axi_araddr (),
.m_axi_arlen (),
.m_axi_arsize (),
.m_axi_arburst (),
.m_axi_arlock (),
.m_axi_arcache (),
.m_axi_arprot (),
.m_axi_arregion (),
.m_axi_arqos (),
.m_axi_aruser (),
.m_axi_arvalid (),
.m_axi_arready ({1{1'b0}}),
.m_axi_rid (mi_rid[gen_mi_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] ),
.m_axi_rdata (mi_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] ),
.m_axi_rresp (mi_rresp[gen_mi_slot*2+:2] ),
.m_axi_rlast (mi_rlast[gen_mi_slot*1+:1] ),
.m_axi_ruser (mi_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] ),
.m_axi_rvalid (mi_rvalid[gen_mi_slot*1+:1] ),
.m_axi_rready (mi_rready[gen_mi_slot*1+:1] )
);
end // gen_master_slots (Next gen_mi_slot)
// Highest row of *ready_carry contains accumulated OR across all SI-slots, for each MI-slot.
assign st_mr_bready = bready_carry[(C_NUM_SLAVE_SLOTS-1)*(C_NUM_MASTER_SLOTS+1) +: C_NUM_MASTER_SLOTS+1];
assign st_mr_rready = rready_carry[(C_NUM_SLAVE_SLOTS-1)*(C_NUM_MASTER_SLOTS+1) +: C_NUM_MASTER_SLOTS+1];
// Assign MI-side B, R and W channel ports (exclude error handler signals).
assign mi_bid[0+:C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH] = M_AXI_BID;
assign mi_bvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_BVALID;
assign mi_bresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_BRESP;
assign mi_buser[0+:C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH] = M_AXI_BUSER;
assign M_AXI_BREADY = mi_bready[0+:C_NUM_MASTER_SLOTS];
assign mi_rid[0+:C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH] = M_AXI_RID;
assign mi_rlast[0+:C_NUM_MASTER_SLOTS] = M_AXI_RLAST;
assign mi_rvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_RVALID;
assign mi_rresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_RRESP;
assign mi_ruser[0+:C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH] = M_AXI_RUSER;
assign mi_rdata[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH] = M_AXI_RDATA;
assign M_AXI_RREADY = mi_rready[0+:C_NUM_MASTER_SLOTS];
assign M_AXI_WLAST = mi_wlast[0+:C_NUM_MASTER_SLOTS];
assign M_AXI_WVALID = mi_wvalid[0+:C_NUM_MASTER_SLOTS];
assign M_AXI_WUSER = mi_wuser[0+:C_NUM_MASTER_SLOTS*C_AXI_WUSER_WIDTH];
assign M_AXI_WID = (C_AXI_PROTOCOL == P_AXI3) ? mi_wid[0+:C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH] : 0;
assign M_AXI_WDATA = mi_wdata[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH];
assign M_AXI_WSTRB = mi_wstrb[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH/8];
assign mi_wready[0+:C_NUM_MASTER_SLOTS] = M_AXI_WREADY;
axi_crossbar_v2_1_addr_arbiter # // "AA": Addr Arbiter (AW channel)
(
.C_FAMILY (C_FAMILY),
.C_NUM_M (C_NUM_MASTER_SLOTS+1),
.C_NUM_S (C_NUM_SLAVE_SLOTS),
.C_NUM_S_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_MESG_WIDTH (P_AA_AWMESG_WIDTH),
.C_ARB_PRIORITY (C_S_AXI_ARB_PRIORITY)
)
addr_arbiter_aw
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of SI-side AW command request inputs
.S_MESG (tmp_aa_awmesg),
.S_TARGET_HOT (st_aa_awtarget_hot),
.S_VALID (ss_aa_awvalid),
.S_VALID_QUAL (st_aa_awvalid_qual),
.S_READY (ss_aa_awready),
// Granted AW command output
.M_MESG (aa_mi_awmesg),
.M_TARGET_HOT (aa_mi_awtarget_hot), // MI-slot targeted by granted command
.M_GRANT_ENC (aa_wm_awgrant_enc), // SI-slot index of granted command
.M_VALID (aa_sa_awvalid),
.M_READY (aa_sa_awready),
.ISSUING_LIMIT (mi_awmaxissuing)
);
// Broadcast AW transfer payload to all MI-slots
assign M_AXI_AWID = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[0+:C_AXI_ID_WIDTH]}};
assign M_AXI_AWADDR = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+:C_AXI_ADDR_WIDTH]}};
assign M_AXI_AWLEN = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +:8]}};
assign M_AXI_AWSIZE = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8 +:3]}};
assign M_AXI_AWLOCK = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3 +:2]}};
assign M_AXI_AWPROT = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2 +:3]}};
assign M_AXI_AWREGION = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3 +:4]}};
assign M_AXI_AWBURST = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4 +:2]}};
assign M_AXI_AWCACHE = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2 +:4]}};
assign M_AXI_AWQOS = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4 +:4]}};
assign M_AXI_AWUSER = {C_NUM_MASTER_SLOTS{aa_mi_awmesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4+4 +:C_AXI_AWUSER_WIDTH]}};
axi_crossbar_v2_1_addr_arbiter # // "AA": Addr Arbiter (AR channel)
(
.C_FAMILY (C_FAMILY),
.C_NUM_M (C_NUM_MASTER_SLOTS+1),
.C_NUM_S (C_NUM_SLAVE_SLOTS),
.C_NUM_S_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_MESG_WIDTH (P_AA_ARMESG_WIDTH),
.C_ARB_PRIORITY (C_S_AXI_ARB_PRIORITY)
)
addr_arbiter_ar
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of SI-side AR command request inputs
.S_MESG (tmp_aa_armesg),
.S_TARGET_HOT (st_aa_artarget_hot),
.S_VALID_QUAL (st_aa_arvalid_qual),
.S_VALID (st_aa_arvalid),
.S_READY (st_aa_arready),
// Granted AR command output
.M_MESG (aa_mi_armesg),
.M_TARGET_HOT (aa_mi_artarget_hot), // MI-slot targeted by granted command
.M_GRANT_ENC (aa_mi_argrant_enc),
.M_VALID (aa_mi_arvalid), // SI-slot index of granted command
.M_READY (aa_mi_arready),
.ISSUING_LIMIT (mi_armaxissuing)
);
if (C_DEBUG) begin : gen_debug_trans_seq
// DEBUG WRITE TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_aw_trans_seq_i <= 1;
end else begin
if (aa_sa_awvalid && aa_sa_awready) begin
debug_aw_trans_seq_i <= debug_aw_trans_seq_i + 1;
end
end
end
// DEBUG READ TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_ar_trans_seq_i <= 1;
end else begin
if (aa_mi_arvalid && aa_mi_arready) begin
debug_ar_trans_seq_i <= debug_ar_trans_seq_i + 1;
end
end
end
end // gen_debug_trans_seq
// Broadcast AR transfer payload to all MI-slots
assign M_AXI_ARID = {C_NUM_MASTER_SLOTS{aa_mi_armesg[0+:C_AXI_ID_WIDTH]}};
assign M_AXI_ARADDR = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+:C_AXI_ADDR_WIDTH]}};
assign M_AXI_ARLEN = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +:8]}};
assign M_AXI_ARSIZE = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8 +:3]}};
assign M_AXI_ARLOCK = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3 +:2]}};
assign M_AXI_ARPROT = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2 +:3]}};
assign M_AXI_ARREGION = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3 +:4]}};
assign M_AXI_ARBURST = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4 +:2]}};
assign M_AXI_ARCACHE = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2 +:4]}};
assign M_AXI_ARQOS = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4 +:4]}};
assign M_AXI_ARUSER = {C_NUM_MASTER_SLOTS{aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+4+2+4+4 +:C_AXI_ARUSER_WIDTH]}};
// AW arbiter command transfer completes upon completion of both M-side AW-channel transfer and W-mux address acceptance (command push).
axi_crossbar_v2_1_splitter # // "SA": Splitter for Write Addr Arbiter
(
.C_NUM_M (2)
)
splitter_aw_mi
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (aa_sa_awvalid),
.S_READY (aa_sa_awready),
.M_VALID ({mi_awvalid_en, sa_wm_awvalid_en}),
.M_READY ({mi_awready_mux, sa_wm_awready_mux})
);
assign mi_awvalid = aa_mi_awtarget_hot & {C_NUM_MASTER_SLOTS+1{mi_awvalid_en}};
assign mi_awready_mux = |(aa_mi_awtarget_hot & mi_awready);
assign M_AXI_AWVALID = mi_awvalid[0+:C_NUM_MASTER_SLOTS]; // Slot C_NUM_MASTER_SLOTS+1 is the error handler
assign mi_awready[0+:C_NUM_MASTER_SLOTS] = M_AXI_AWREADY;
assign sa_wm_awvalid = aa_mi_awtarget_hot & {C_NUM_MASTER_SLOTS+1{sa_wm_awvalid_en}};
assign sa_wm_awready_mux = |(aa_mi_awtarget_hot & sa_wm_awready);
assign mi_arvalid = aa_mi_artarget_hot & {C_NUM_MASTER_SLOTS+1{aa_mi_arvalid}};
assign aa_mi_arready = |(aa_mi_artarget_hot & mi_arready);
assign M_AXI_ARVALID = mi_arvalid[0+:C_NUM_MASTER_SLOTS]; // Slot C_NUM_MASTER_SLOTS+1 is the error handler
assign mi_arready[0+:C_NUM_MASTER_SLOTS] = M_AXI_ARREADY;
// MI-slot # C_NUM_MASTER_SLOTS is the error handler
if (C_RANGE_CHECK) begin : gen_decerr_slave
axi_crossbar_v2_1_decerr_slave #
(
.C_AXI_ID_WIDTH (C_AXI_ID_WIDTH),
.C_AXI_DATA_WIDTH (C_AXI_DATA_WIDTH),
.C_AXI_RUSER_WIDTH (C_AXI_RUSER_WIDTH),
.C_AXI_BUSER_WIDTH (C_AXI_BUSER_WIDTH),
.C_AXI_PROTOCOL (C_AXI_PROTOCOL),
.C_RESP (P_DECERR)
)
decerr_slave_inst
(
.S_AXI_ACLK (ACLK),
.S_AXI_ARESET (reset),
.S_AXI_AWID (aa_mi_awmesg[0+:C_AXI_ID_WIDTH]),
.S_AXI_AWVALID (mi_awvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_AWREADY (mi_awready[C_NUM_MASTER_SLOTS]),
.S_AXI_WLAST (mi_wlast[C_NUM_MASTER_SLOTS]),
.S_AXI_WVALID (mi_wvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_WREADY (mi_wready[C_NUM_MASTER_SLOTS]),
.S_AXI_BID (mi_bid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_AXI_BRESP (mi_bresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_BUSER (mi_buser[C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH]),
.S_AXI_BVALID (mi_bvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_BREADY (mi_bready[C_NUM_MASTER_SLOTS]),
.S_AXI_ARID (aa_mi_armesg[0+:C_AXI_ID_WIDTH]),
.S_AXI_ARLEN (aa_mi_armesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +:8]),
.S_AXI_ARVALID (mi_arvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_ARREADY (mi_arready[C_NUM_MASTER_SLOTS]),
.S_AXI_RID (mi_rid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH]),
.S_AXI_RDATA (mi_rdata[C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.S_AXI_RRESP (mi_rresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_RUSER (mi_ruser[C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH]),
.S_AXI_RLAST (mi_rlast[C_NUM_MASTER_SLOTS]),
.S_AXI_RVALID (mi_rvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_RREADY (mi_rready[C_NUM_MASTER_SLOTS])
);
end else begin : gen_no_decerr_slave
assign mi_awready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_wready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_arready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_awready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_awready[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_bid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign mi_bresp[C_NUM_MASTER_SLOTS*2+:2] = 0;
assign mi_buser[C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH] = 0;
assign mi_bvalid[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_rid[C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH] = 0;
assign mi_rdata[C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH] = 0;
assign mi_rresp[C_NUM_MASTER_SLOTS*2+:2] = 0;
assign mi_ruser[C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH] = 0;
assign mi_rlast[C_NUM_MASTER_SLOTS] = 1'b0;
assign mi_rvalid[C_NUM_MASTER_SLOTS] = 1'b0;
end // gen_decerr_slave
endgenerate
endmodule
|
module Mux_Array
#(parameter SWR=26, parameter EWR=5)
(
input wire clk,
input wire rst,
input wire load_i,
input wire [SWR-1:0] Data_i,
input wire FSM_left_right_i,
input wire [EWR-1:0] Shift_Value_i,
input wire bit_shift_i,
output wire [SWR-1:0] Data_o
);
////
wire [SWR-1:0] Data_array[EWR+1:0];
//////////////////7
genvar k;//Level
///////////////////77777
Rotate_Mux_Array #(.SWR(SWR)) first_rotate(
.Data_i(Data_i),
.select_i(FSM_left_right_i),
.Data_o(Data_array [0][SWR-1:0])
);
generate for (k=0; k < 3; k=k+1) begin
shift_mux_array #(.SWR(SWR), .LEVEL(k)) shift_mux_array(
.Data_i(Data_array[k]),
.select_i(Shift_Value_i[k]),
.bit_shift_i(bit_shift_i),
.Data_o(Data_array[k+1])
);
end
endgenerate
RegisterAdd #(.W(SWR)) Mid_Reg(
.clk(clk),
.rst(rst),
.load(1'b1),
.D(Data_array[3]),
.Q(Data_array[4])
);
generate for (k=3; k < EWR; k=k+1) begin
shift_mux_array #(.SWR(SWR), .LEVEL(k)) shift_mux_array(
.Data_i(Data_array[k+1]),
.select_i(Shift_Value_i[k]),
.bit_shift_i(bit_shift_i),
.Data_o(Data_array[k+2])
);
end
endgenerate
Rotate_Mux_Array #(.SWR(SWR)) last_rotate(
.Data_i(Data_array[EWR+1]),
.select_i(FSM_left_right_i),
.Data_o(Data_o)
);
endmodule
|
module Mux_Array
#(parameter SWR=26, parameter EWR=5)
(
input wire clk,
input wire rst,
input wire load_i,
input wire [SWR-1:0] Data_i,
input wire FSM_left_right_i,
input wire [EWR-1:0] Shift_Value_i,
input wire bit_shift_i,
output wire [SWR-1:0] Data_o
);
////
wire [SWR-1:0] Data_array[EWR+1:0];
//////////////////7
genvar k;//Level
///////////////////77777
Rotate_Mux_Array #(.SWR(SWR)) first_rotate(
.Data_i(Data_i),
.select_i(FSM_left_right_i),
.Data_o(Data_array [0][SWR-1:0])
);
generate for (k=0; k < 3; k=k+1) begin
shift_mux_array #(.SWR(SWR), .LEVEL(k)) shift_mux_array(
.Data_i(Data_array[k]),
.select_i(Shift_Value_i[k]),
.bit_shift_i(bit_shift_i),
.Data_o(Data_array[k+1])
);
end
endgenerate
RegisterAdd #(.W(SWR)) Mid_Reg(
.clk(clk),
.rst(rst),
.load(1'b1),
.D(Data_array[3]),
.Q(Data_array[4])
);
generate for (k=3; k < EWR; k=k+1) begin
shift_mux_array #(.SWR(SWR), .LEVEL(k)) shift_mux_array(
.Data_i(Data_array[k+1]),
.select_i(Shift_Value_i[k]),
.bit_shift_i(bit_shift_i),
.Data_o(Data_array[k+2])
);
end
endgenerate
Rotate_Mux_Array #(.SWR(SWR)) last_rotate(
.Data_i(Data_array[EWR+1]),
.select_i(FSM_left_right_i),
.Data_o(Data_o)
);
endmodule
|
module processing_system7_bfm_v2_0_5_intr_wr_mem(
sw_clk,
rstn,
full,
WR_DATA_ACK_OCM,
WR_DATA_ACK_DDR,
WR_ADDR,
WR_DATA,
WR_BYTES,
WR_QOS,
WR_DATA_VALID_OCM,
WR_DATA_VALID_DDR
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
/* local parameters for interconnect wr fifo model */
input sw_clk, rstn;
output full;
input WR_DATA_ACK_DDR, WR_DATA_ACK_OCM;
output reg WR_DATA_VALID_DDR, WR_DATA_VALID_OCM;
output reg [max_burst_bits-1:0] WR_DATA;
output reg [addr_width-1:0] WR_ADDR;
output reg [max_burst_bytes_width:0] WR_BYTES;
output reg [axi_qos_width-1:0] WR_QOS;
reg [intr_cnt_width-1:0] wr_ptr = 0, rd_ptr = 0;
reg [wr_fifo_data_bits-1:0] wr_fifo [0:intr_max_outstanding-1];
wire empty;
assign empty = (wr_ptr === rd_ptr)?1'b1: 1'b0;
assign full = ((wr_ptr[intr_cnt_width-1]!== rd_ptr[intr_cnt_width-1]) && (wr_ptr[intr_cnt_width-2:0] === rd_ptr[intr_cnt_width-2:0]))?1'b1 :1'b0;
parameter SEND_DATA = 0, WAIT_ACK = 1;
reg state;
task automatic write_mem;
input [wr_fifo_data_bits-1:0] data;
begin
wr_fifo[wr_ptr[intr_cnt_width-2:0]] = data;
if(wr_ptr[intr_cnt_width-2:0] === intr_max_outstanding-1)
wr_ptr[intr_cnt_width-2:0] = 0;
else
wr_ptr = wr_ptr + 1;
end
endtask
always@(negedge rstn or posedge sw_clk)
begin
if(!rstn) begin
wr_ptr = 0;
rd_ptr = 0;
WR_DATA_VALID_DDR = 1'b0;
WR_DATA_VALID_OCM = 1'b0;
WR_QOS = 0;
state = SEND_DATA;
end else begin
case(state)
SEND_DATA :begin
state = SEND_DATA;
WR_DATA_VALID_OCM = 1'b0;
WR_DATA_VALID_DDR = 1'b0;
if(!empty) begin
WR_DATA = wr_fifo[rd_ptr[intr_cnt_width-2:0]][wr_data_msb : wr_data_lsb];
WR_ADDR = wr_fifo[rd_ptr[intr_cnt_width-2:0]][wr_addr_msb : wr_addr_lsb];
WR_BYTES = wr_fifo[rd_ptr[intr_cnt_width-2:0]][wr_bytes_msb : wr_bytes_lsb];
WR_QOS = wr_fifo[rd_ptr[intr_cnt_width-2:0]][wr_qos_msb : wr_qos_lsb];
state = WAIT_ACK;
case(decode_address(wr_fifo[rd_ptr[intr_cnt_width-2:0]][wr_addr_msb : wr_addr_lsb]))
OCM_MEM : WR_DATA_VALID_OCM = 1;
DDR_MEM : WR_DATA_VALID_DDR = 1;
default : state = SEND_DATA;
endcase
if(rd_ptr[intr_cnt_width-2:0] === intr_max_outstanding-1) begin
rd_ptr[intr_cnt_width-2:0] = 0;
end else begin
rd_ptr = rd_ptr+1;
end
end
end
WAIT_ACK :begin
state = WAIT_ACK;
if(WR_DATA_ACK_OCM | WR_DATA_ACK_DDR) begin
WR_DATA_VALID_OCM = 1'b0;
WR_DATA_VALID_DDR = 1'b0;
state = SEND_DATA;
end
end
endcase
end
end
endmodule
|
module axi_crossbar_v2_1_addr_arbiter #
(
parameter C_FAMILY = "none",
parameter integer C_NUM_S = 1,
parameter integer C_NUM_S_LOG = 1,
parameter integer C_NUM_M = 1,
parameter integer C_MESG_WIDTH = 1,
parameter [C_NUM_S*32-1:0] C_ARB_PRIORITY = {C_NUM_S{32'h00000000}}
// Arbitration priority among each SI slot.
// Higher values indicate higher priority.
// Format: C_NUM_SLAVE_SLOTS{Bit32};
// Range: 'h0-'hF.
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Ports
input wire [C_NUM_S*C_MESG_WIDTH-1:0] S_MESG,
input wire [C_NUM_S*C_NUM_M-1:0] S_TARGET_HOT,
input wire [C_NUM_S-1:0] S_VALID,
input wire [C_NUM_S-1:0] S_VALID_QUAL,
output wire [C_NUM_S-1:0] S_READY,
// Master Ports
output wire [C_MESG_WIDTH-1:0] M_MESG,
output wire [C_NUM_M-1:0] M_TARGET_HOT,
output wire [C_NUM_S_LOG-1:0] M_GRANT_ENC,
output wire M_VALID,
input wire M_READY,
// Sideband input
input wire [C_NUM_M-1:0] ISSUING_LIMIT
);
// Generates a mask for all input slots that are priority based
function [C_NUM_S-1:0] f_prio_mask
(
input integer null_arg
);
reg [C_NUM_S-1:0] mask;
integer i;
begin
mask = 0;
for (i=0; i < C_NUM_S; i=i+1) begin
mask[i] = (C_ARB_PRIORITY[i*32+:32] != 0);
end
f_prio_mask = mask;
end
endfunction
// Convert 16-bit one-hot to 4-bit binary
function [3:0] f_hot2enc
(
input [15:0] one_hot
);
begin
f_hot2enc[0] = |(one_hot & 16'b1010101010101010);
f_hot2enc[1] = |(one_hot & 16'b1100110011001100);
f_hot2enc[2] = |(one_hot & 16'b1111000011110000);
f_hot2enc[3] = |(one_hot & 16'b1111111100000000);
end
endfunction
localparam [C_NUM_S-1:0] P_PRIO_MASK = f_prio_mask(0);
reg m_valid_i;
reg [C_NUM_S-1:0] s_ready_i;
reg [C_NUM_S-1:0] qual_reg;
reg [C_NUM_S-1:0] grant_hot;
reg [C_NUM_S-1:0] last_rr_hot;
reg any_grant;
reg any_prio;
reg found_prio;
reg [C_NUM_S-1:0] which_prio_hot;
reg [C_NUM_S-1:0] next_prio_hot;
reg [C_NUM_S_LOG-1:0] which_prio_enc;
reg [C_NUM_S_LOG-1:0] next_prio_enc;
reg [4:0] current_highest;
wire [C_NUM_S-1:0] valid_rr;
reg [15:0] next_rr_hot;
reg [C_NUM_S_LOG-1:0] next_rr_enc;
reg [C_NUM_S*C_NUM_S-1:0] carry_rr;
reg [C_NUM_S*C_NUM_S-1:0] mask_rr;
reg found_rr;
wire [C_NUM_S-1:0] next_hot;
wire [C_NUM_S_LOG-1:0] next_enc;
reg prio_stall;
integer i;
wire [C_NUM_S-1:0] valid_qual_i;
reg [C_NUM_S_LOG-1:0] m_grant_enc_i;
reg [C_NUM_M-1:0] m_target_hot_i;
wire [C_NUM_M-1:0] m_target_hot_mux;
reg [C_MESG_WIDTH-1:0] m_mesg_i;
wire [C_MESG_WIDTH-1:0] m_mesg_mux;
genvar gen_si;
assign M_VALID = m_valid_i;
assign S_READY = s_ready_i;
assign M_GRANT_ENC = m_grant_enc_i;
assign M_MESG = m_mesg_i;
assign M_TARGET_HOT = m_target_hot_i;
generate
if (C_NUM_S>1) begin : gen_arbiter
always @(posedge ACLK) begin
if (ARESET) begin
qual_reg <= 0;
end else begin
qual_reg <= valid_qual_i | ~S_VALID; // Don't disqualify when bus not VALID (valid_qual_i would be garbage)
end
end
for (gen_si=0; gen_si<C_NUM_S; gen_si=gen_si+1) begin : gen_req_qual
assign valid_qual_i[gen_si] = S_VALID_QUAL[gen_si] & (|(S_TARGET_HOT[gen_si*C_NUM_M+:C_NUM_M] & ~ISSUING_LIMIT));
end
/////////////////////////////////////////////////////////////////////////////
// Grant a new request when there is none still pending.
// If no qualified requests found, de-assert M_VALID.
/////////////////////////////////////////////////////////////////////////////
assign next_hot = found_prio ? next_prio_hot : next_rr_hot;
assign next_enc = found_prio ? next_prio_enc : next_rr_enc;
always @(posedge ACLK) begin
if (ARESET) begin
m_valid_i <= 0;
s_ready_i <= 0;
grant_hot <= 0;
any_grant <= 1'b0;
m_grant_enc_i <= 0;
last_rr_hot <= {1'b1, {C_NUM_S-1{1'b0}}};
m_target_hot_i <= 0;
end else begin
s_ready_i <= 0;
if (m_valid_i) begin
// Stall 1 cycle after each master-side completion.
if (M_READY) begin // Master-side completion
m_valid_i <= 1'b0;
grant_hot <= 0;
any_grant <= 1'b0;
end
end else if (any_grant) begin
m_valid_i <= 1'b1;
s_ready_i <= grant_hot; // Assert S_AW/READY for 1 cycle to complete SI address transfer (regardless of M_AREADY)
end else begin
if ((found_prio | found_rr) & ~prio_stall) begin
// Waste 1 cycle and re-arbitrate if target of highest prio hit issuing limit in previous cycle (valid_qual_i).
if (|(next_hot & valid_qual_i)) begin
grant_hot <= next_hot;
m_grant_enc_i <= next_enc;
any_grant <= 1'b1;
if (~found_prio) begin
last_rr_hot <= next_rr_hot;
end
m_target_hot_i <= m_target_hot_mux;
end
end
end
end
end
/////////////////////////////////////////////////////////////////////////////
// Fixed Priority arbiter
// Selects next request to grant from among inputs with PRIO > 0, if any.
/////////////////////////////////////////////////////////////////////////////
always @ * begin : ALG_PRIO
integer ip;
any_prio = 1'b0;
prio_stall = 1'b0;
which_prio_hot = 0;
which_prio_enc = 0;
current_highest = 0;
for (ip=0; ip < C_NUM_S; ip=ip+1) begin
// Disqualify slot if target hit issuing limit (pass to lower prio slot).
if (P_PRIO_MASK[ip] & S_VALID[ip] & qual_reg[ip]) begin
if ({1'b0, C_ARB_PRIORITY[ip*32+:4]} > current_highest) begin
current_highest[0+:4] = C_ARB_PRIORITY[ip*32+:4];
// Stall 1 cycle when highest prio is recovering from SI-side handshake.
// (Do not allow lower-prio slot to win arbitration.)
if (s_ready_i[ip]) begin
any_prio = 1'b0;
prio_stall = 1'b1;
which_prio_hot = 0;
which_prio_enc = 0;
end else begin
any_prio = 1'b1;
which_prio_hot = 1'b1 << ip;
which_prio_enc = ip;
end
end
end
end
found_prio = any_prio;
next_prio_hot = which_prio_hot;
next_prio_enc = which_prio_enc;
end
/////////////////////////////////////////////////////////////////////////////
// Round-robin arbiter
// Selects next request to grant from among inputs with PRIO = 0, if any.
/////////////////////////////////////////////////////////////////////////////
// Disqualify slot if target hit issuing limit 2 or more cycles earlier (pass to next RR slot).
// Disqualify for 1 cycle a slot that is recovering from SI-side handshake (s_ready_i),
// and allow arbitration to pass to any other RR requester.
assign valid_rr = ~P_PRIO_MASK & S_VALID & ~s_ready_i & qual_reg;
always @ * begin : ALG_RR
integer ir, jr, nr;
next_rr_hot = 0;
for (ir=0;ir<C_NUM_S;ir=ir+1) begin
nr = (ir>0) ? (ir-1) : (C_NUM_S-1);
carry_rr[ir*C_NUM_S] = last_rr_hot[nr];
mask_rr[ir*C_NUM_S] = ~valid_rr[nr];
for (jr=1;jr<C_NUM_S;jr=jr+1) begin
nr = (ir-jr > 0) ? (ir-jr-1) : (C_NUM_S+ir-jr-1);
carry_rr[ir*C_NUM_S+jr] = carry_rr[ir*C_NUM_S+jr-1] | (last_rr_hot[nr] & mask_rr[ir*C_NUM_S+jr-1]);
if (jr < C_NUM_S-1) begin
mask_rr[ir*C_NUM_S+jr] = mask_rr[ir*C_NUM_S+jr-1] & ~valid_rr[nr];
end
end
next_rr_hot[ir] = valid_rr[ir] & carry_rr[(ir+1)*C_NUM_S-1];
end
next_rr_enc = f_hot2enc(next_rr_hot);
found_rr = |(next_rr_hot);
end
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_S),
.C_SEL_WIDTH (C_NUM_S_LOG),
.C_DATA_WIDTH (C_MESG_WIDTH)
) mux_mesg
(
.S (m_grant_enc_i),
.A (S_MESG),
.O (m_mesg_mux),
.OE (1'b1)
);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_S),
.C_SEL_WIDTH (C_NUM_S_LOG),
.C_DATA_WIDTH (C_NUM_M)
) si_amesg_mux_inst
(
.S (next_enc),
.A (S_TARGET_HOT),
.O (m_target_hot_mux),
.OE (1'b1)
);
always @(posedge ACLK) begin
if (ARESET) begin
m_mesg_i <= 0;
end else if (~m_valid_i) begin
m_mesg_i <= m_mesg_mux;
end
end
end else begin : gen_no_arbiter
assign valid_qual_i = S_VALID_QUAL & |(S_TARGET_HOT & ~ISSUING_LIMIT);
always @ (posedge ACLK) begin
if (ARESET) begin
m_valid_i <= 1'b0;
s_ready_i <= 1'b0;
m_grant_enc_i <= 0;
end else begin
s_ready_i <= 1'b0;
if (m_valid_i) begin
if (M_READY) begin
m_valid_i <= 1'b0;
end
end else if (S_VALID[0] & valid_qual_i[0] & ~s_ready_i) begin
m_valid_i <= 1'b1;
s_ready_i <= 1'b1;
m_target_hot_i <= S_TARGET_HOT;
end
end
end
always @(posedge ACLK) begin
if (ARESET) begin
m_mesg_i <= 0;
end else if (~m_valid_i) begin
m_mesg_i <= S_MESG;
end
end
end // gen_arbiter
endgenerate
endmodule
|
module processing_system7_bfm_v2_0_5_intr_rd_mem(
sw_clk,
rstn,
full,
empty,
req,
invalid_rd_req,
rd_info,
RD_DATA_OCM,
RD_DATA_DDR,
RD_DATA_VALID_OCM,
RD_DATA_VALID_DDR
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input sw_clk, rstn;
output full, empty;
input RD_DATA_VALID_DDR, RD_DATA_VALID_OCM;
input [max_burst_bits-1:0] RD_DATA_DDR, RD_DATA_OCM;
input req, invalid_rd_req;
input [rd_info_bits-1:0] rd_info;
reg [intr_cnt_width-1:0] wr_ptr = 0, rd_ptr = 0;
reg [rd_afi_fifo_bits-1:0] rd_fifo [0:intr_max_outstanding-1]; // Data, addr, size, burst, len, RID, RRESP, valid bytes
wire full, empty;
assign empty = (wr_ptr === rd_ptr)?1'b1: 1'b0;
assign full = ((wr_ptr[intr_cnt_width-1]!== rd_ptr[intr_cnt_width-1]) && (wr_ptr[intr_cnt_width-2:0] === rd_ptr[intr_cnt_width-2:0]))?1'b1 :1'b0;
/* read from the fifo */
task read_mem;
output [rd_afi_fifo_bits-1:0] data;
begin
data = rd_fifo[rd_ptr[intr_cnt_width-1:0]];
if(rd_ptr[intr_cnt_width-2:0] === intr_max_outstanding-1)
rd_ptr[intr_cnt_width-2:0] = 0;
else
rd_ptr = rd_ptr + 1;
end
endtask
reg state;
reg invalid_rd;
/* write in the fifo */
always@(negedge rstn or posedge sw_clk)
begin
if(!rstn) begin
wr_ptr = 0;
rd_ptr = 0;
state = 0;
invalid_rd = 0;
end else begin
case (state)
0 : begin
state = 0;
invalid_rd = 0;
if(req)begin
state = 1;
invalid_rd = invalid_rd_req;
end
end
1 : begin
state = 1;
if(RD_DATA_VALID_OCM | RD_DATA_VALID_DDR | invalid_rd) begin
if(RD_DATA_VALID_DDR)
rd_fifo[wr_ptr[intr_cnt_width-2:0]] = {RD_DATA_DDR,rd_info};
else if(RD_DATA_VALID_OCM)
rd_fifo[wr_ptr[intr_cnt_width-2:0]] = {RD_DATA_OCM,rd_info};
else
rd_fifo[wr_ptr[intr_cnt_width-2:0]] = rd_info;
if(wr_ptr[intr_cnt_width-2:0] === intr_max_outstanding-1)
wr_ptr[intr_cnt_width-2:0] = 0;
else
wr_ptr = wr_ptr + 1;
state = 0;
invalid_rd = 0;
end
end
endcase
end
end
endmodule
|
module axi_crossbar_v2_1_splitter #
(
parameter integer C_NUM_M = 2 // Number of master ports = [2:16]
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Port
input wire S_VALID,
output wire S_READY,
// Master Ports
output wire [C_NUM_M-1:0] M_VALID,
input wire [C_NUM_M-1:0] M_READY
);
reg [C_NUM_M-1:0] m_ready_d;
wire s_ready_i;
wire [C_NUM_M-1:0] m_valid_i;
always @(posedge ACLK) begin
if (ARESET | s_ready_i) m_ready_d <= {C_NUM_M{1'b0}};
else m_ready_d <= m_ready_d | (m_valid_i & M_READY);
end
assign s_ready_i = &(m_ready_d | M_READY);
assign m_valid_i = {C_NUM_M{S_VALID}} & ~m_ready_d;
assign M_VALID = m_valid_i;
assign S_READY = s_ready_i;
endmodule
|
module axi_crossbar_v2_1_splitter #
(
parameter integer C_NUM_M = 2 // Number of master ports = [2:16]
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Port
input wire S_VALID,
output wire S_READY,
// Master Ports
output wire [C_NUM_M-1:0] M_VALID,
input wire [C_NUM_M-1:0] M_READY
);
reg [C_NUM_M-1:0] m_ready_d;
wire s_ready_i;
wire [C_NUM_M-1:0] m_valid_i;
always @(posedge ACLK) begin
if (ARESET | s_ready_i) m_ready_d <= {C_NUM_M{1'b0}};
else m_ready_d <= m_ready_d | (m_valid_i & M_READY);
end
assign s_ready_i = &(m_ready_d | M_READY);
assign m_valid_i = {C_NUM_M{S_VALID}} & ~m_ready_d;
assign M_VALID = m_valid_i;
assign S_READY = s_ready_i;
endmodule
|
module axi_crossbar_v2_1_splitter #
(
parameter integer C_NUM_M = 2 // Number of master ports = [2:16]
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Port
input wire S_VALID,
output wire S_READY,
// Master Ports
output wire [C_NUM_M-1:0] M_VALID,
input wire [C_NUM_M-1:0] M_READY
);
reg [C_NUM_M-1:0] m_ready_d;
wire s_ready_i;
wire [C_NUM_M-1:0] m_valid_i;
always @(posedge ACLK) begin
if (ARESET | s_ready_i) m_ready_d <= {C_NUM_M{1'b0}};
else m_ready_d <= m_ready_d | (m_valid_i & M_READY);
end
assign s_ready_i = &(m_ready_d | M_READY);
assign m_valid_i = {C_NUM_M{S_VALID}} & ~m_ready_d;
assign M_VALID = m_valid_i;
assign S_READY = s_ready_i;
endmodule
|
module axi_crossbar_v2_1_splitter #
(
parameter integer C_NUM_M = 2 // Number of master ports = [2:16]
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Port
input wire S_VALID,
output wire S_READY,
// Master Ports
output wire [C_NUM_M-1:0] M_VALID,
input wire [C_NUM_M-1:0] M_READY
);
reg [C_NUM_M-1:0] m_ready_d;
wire s_ready_i;
wire [C_NUM_M-1:0] m_valid_i;
always @(posedge ACLK) begin
if (ARESET | s_ready_i) m_ready_d <= {C_NUM_M{1'b0}};
else m_ready_d <= m_ready_d | (m_valid_i & M_READY);
end
assign s_ready_i = &(m_ready_d | M_READY);
assign m_valid_i = {C_NUM_M{S_VALID}} & ~m_ready_d;
assign M_VALID = m_valid_i;
assign S_READY = s_ready_i;
endmodule
|
module axi_crossbar_v2_1_splitter #
(
parameter integer C_NUM_M = 2 // Number of master ports = [2:16]
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Port
input wire S_VALID,
output wire S_READY,
// Master Ports
output wire [C_NUM_M-1:0] M_VALID,
input wire [C_NUM_M-1:0] M_READY
);
reg [C_NUM_M-1:0] m_ready_d;
wire s_ready_i;
wire [C_NUM_M-1:0] m_valid_i;
always @(posedge ACLK) begin
if (ARESET | s_ready_i) m_ready_d <= {C_NUM_M{1'b0}};
else m_ready_d <= m_ready_d | (m_valid_i & M_READY);
end
assign s_ready_i = &(m_ready_d | M_READY);
assign m_valid_i = {C_NUM_M{S_VALID}} & ~m_ready_d;
assign M_VALID = m_valid_i;
assign S_READY = s_ready_i;
endmodule
|
module axi_crossbar_v2_1_splitter #
(
parameter integer C_NUM_M = 2 // Number of master ports = [2:16]
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Port
input wire S_VALID,
output wire S_READY,
// Master Ports
output wire [C_NUM_M-1:0] M_VALID,
input wire [C_NUM_M-1:0] M_READY
);
reg [C_NUM_M-1:0] m_ready_d;
wire s_ready_i;
wire [C_NUM_M-1:0] m_valid_i;
always @(posedge ACLK) begin
if (ARESET | s_ready_i) m_ready_d <= {C_NUM_M{1'b0}};
else m_ready_d <= m_ready_d | (m_valid_i & M_READY);
end
assign s_ready_i = &(m_ready_d | M_READY);
assign m_valid_i = {C_NUM_M{S_VALID}} & ~m_ready_d;
assign M_VALID = m_valid_i;
assign S_READY = s_ready_i;
endmodule
|
module axi_crossbar_v2_1_crossbar_sasd #
(
parameter C_FAMILY = "none",
parameter integer C_NUM_SLAVE_SLOTS = 1,
parameter integer C_NUM_MASTER_SLOTS = 1,
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_AXI_ID_WIDTH = 1,
parameter integer C_AXI_ADDR_WIDTH = 32,
parameter integer C_AXI_DATA_WIDTH = 32,
parameter integer C_AXI_PROTOCOL = 0,
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_BASE_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_HIGH_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_BASE_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_HIGH_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter integer C_AXI_SUPPORTS_USER_SIGNALS = 0,
parameter integer C_AXI_AWUSER_WIDTH = 1,
parameter integer C_AXI_ARUSER_WIDTH = 1,
parameter integer C_AXI_WUSER_WIDTH = 1,
parameter integer C_AXI_RUSER_WIDTH = 1,
parameter integer C_AXI_BUSER_WIDTH = 1,
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_WRITE = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_READ = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_WRITE = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_READ = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_ARB_PRIORITY = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_SECURE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_ERR_MODE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter integer C_R_REGISTER = 0,
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE = 0,
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESETN,
// Slave Interface Write Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_AWID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_AWADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_AWLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_AWUSER_WIDTH-1:0] S_AXI_AWUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWREADY,
// Slave Interface Write Data Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_WID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_WDATA,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH/8-1:0] S_AXI_WSTRB,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WLAST,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_WUSER_WIDTH-1:0] S_AXI_WUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WREADY,
// Slave Interface Write Response Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_BID,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_BRESP,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_BUSER_WIDTH-1:0] S_AXI_BUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BREADY,
// Slave Interface Read Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_ARID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_ARADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_ARLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ARUSER_WIDTH-1:0] S_AXI_ARUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARREADY,
// Slave Interface Read Data Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_RID,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_RDATA,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_RRESP,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RLAST,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_RUSER_WIDTH-1:0] S_AXI_RUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RREADY,
// Master Interface Write Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_AWID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_AWADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_AWLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_AWUSER_WIDTH-1:0] M_AXI_AWUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWREADY,
// Master Interface Write Data Ports
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_WID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_WDATA,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH/8-1:0] M_AXI_WSTRB,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WLAST,
output wire [C_NUM_MASTER_SLOTS*C_AXI_WUSER_WIDTH-1:0] M_AXI_WUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WREADY,
// Master Interface Write Response Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_BID, // Unused
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_BRESP,
input wire [C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH-1:0] M_AXI_BUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BREADY,
// Master Interface Read Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_ARID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_ARADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_ARLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ARUSER_WIDTH-1:0] M_AXI_ARUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARREADY,
// Master Interface Read Data Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_RID, // Unused
input wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_RDATA,
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_RRESP,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RLAST,
input wire [C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH-1:0] M_AXI_RUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RREADY
);
localparam integer P_AXI4 = 0;
localparam integer P_AXI3 = 1;
localparam integer P_AXILITE = 2;
localparam integer P_NUM_MASTER_SLOTS_DE = C_RANGE_CHECK ? C_NUM_MASTER_SLOTS+1 : C_NUM_MASTER_SLOTS;
localparam integer P_NUM_MASTER_SLOTS_LOG = (C_NUM_MASTER_SLOTS>1) ? f_ceil_log2(C_NUM_MASTER_SLOTS) : 1;
localparam integer P_NUM_MASTER_SLOTS_DE_LOG = (P_NUM_MASTER_SLOTS_DE>1) ? f_ceil_log2(P_NUM_MASTER_SLOTS_DE) : 1;
localparam integer P_NUM_SLAVE_SLOTS_LOG = (C_NUM_SLAVE_SLOTS>1) ? f_ceil_log2(C_NUM_SLAVE_SLOTS) : 1;
localparam integer P_AXI_AUSER_WIDTH = (C_AXI_AWUSER_WIDTH > C_AXI_ARUSER_WIDTH) ? C_AXI_AWUSER_WIDTH : C_AXI_ARUSER_WIDTH;
localparam integer P_AXI_WID_WIDTH = (C_AXI_PROTOCOL == P_AXI3) ? C_AXI_ID_WIDTH : 1;
localparam integer P_AMESG_WIDTH = C_AXI_ID_WIDTH + C_AXI_ADDR_WIDTH + 8+3+2+3+2+4+4 + P_AXI_AUSER_WIDTH + 4;
localparam integer P_BMESG_WIDTH = 2 + C_AXI_BUSER_WIDTH;
localparam integer P_RMESG_WIDTH = 1+2 + C_AXI_DATA_WIDTH + C_AXI_RUSER_WIDTH;
localparam integer P_WMESG_WIDTH = 1 + C_AXI_DATA_WIDTH + C_AXI_DATA_WIDTH/8 + C_AXI_WUSER_WIDTH + P_AXI_WID_WIDTH;
localparam [31:0] P_AXILITE_ERRMODE = 32'h00000001;
localparam integer P_NONSECURE_BIT = 1;
localparam [C_NUM_MASTER_SLOTS-1:0] P_M_SECURE_MASK = f_bit32to1_mi(C_M_AXI_SECURE); // Mask of secure MI-slots
localparam [C_NUM_MASTER_SLOTS-1:0] P_M_AXILITE_MASK = f_m_axilite(0); // Mask of axilite rule-check MI-slots
localparam [1:0] P_FIXED = 2'b00;
localparam integer P_BYPASS = 0;
localparam integer P_LIGHTWT = 7;
localparam integer P_FULLY_REG = 1;
localparam integer P_R_REG_CONFIG = C_R_REGISTER == 8 ? // "Automatic" reg-slice
(C_RANGE_CHECK ? ((C_AXI_PROTOCOL == P_AXILITE) ? P_LIGHTWT : P_FULLY_REG) : P_BYPASS) : // Bypass if no R-channel mux
C_R_REGISTER;
localparam P_DECERR = 2'b11;
//---------------------------------------------------------------------------
// Functions
//---------------------------------------------------------------------------
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// Isolate thread bits of input S_ID and add to BASE_ID (RNG00) to form MI-side ID value
// only for end-point SI-slots
function [C_AXI_ID_WIDTH-1:0] f_extend_ID
(
input [C_AXI_ID_WIDTH-1:0] s_id,
input integer slot
);
begin
f_extend_ID = C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] | (s_id & (C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] ^ C_S_AXI_HIGH_ID[slot*64+:C_AXI_ID_WIDTH]));
end
endfunction
// Convert Bit32 vector of range [0,1] to Bit1 vector on MI
function [C_NUM_MASTER_SLOTS-1:0] f_bit32to1_mi
(input [C_NUM_MASTER_SLOTS*32-1:0] vec32);
integer mi;
begin
for (mi=0; mi<C_NUM_MASTER_SLOTS; mi=mi+1) begin
f_bit32to1_mi[mi] = vec32[mi*32];
end
end
endfunction
// AxiLite error-checking mask (on MI)
function [C_NUM_MASTER_SLOTS-1:0] f_m_axilite
(
input integer null_arg
);
integer mi;
begin
for (mi=0; mi<C_NUM_MASTER_SLOTS; mi=mi+1) begin
f_m_axilite[mi] = (C_M_AXI_ERR_MODE[mi*32+:32] == P_AXILITE_ERRMODE);
end
end
endfunction
genvar gen_si_slot;
genvar gen_mi_slot;
wire [C_NUM_SLAVE_SLOTS*P_AMESG_WIDTH-1:0] si_awmesg ;
wire [C_NUM_SLAVE_SLOTS*P_AMESG_WIDTH-1:0] si_armesg ;
wire [P_AMESG_WIDTH-1:0] aa_amesg ;
wire [C_AXI_ID_WIDTH-1:0] mi_aid ;
wire [C_AXI_ADDR_WIDTH-1:0] mi_aaddr ;
wire [8-1:0] mi_alen ;
wire [3-1:0] mi_asize ;
wire [2-1:0] mi_alock ;
wire [3-1:0] mi_aprot ;
wire [2-1:0] mi_aburst ;
wire [4-1:0] mi_acache ;
wire [4-1:0] mi_aregion ;
wire [4-1:0] mi_aqos ;
wire [P_AXI_AUSER_WIDTH-1:0] mi_auser ;
wire [4-1:0] target_region ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] aa_grant_hot ;
wire [P_NUM_SLAVE_SLOTS_LOG-1:0] aa_grant_enc ;
wire aa_grant_rnw ;
wire aa_grant_any ;
wire [C_NUM_MASTER_SLOTS-1:0] target_mi_hot ;
wire [P_NUM_MASTER_SLOTS_LOG-1:0] target_mi_enc ;
reg [P_NUM_MASTER_SLOTS_DE-1:0] m_atarget_hot ;
reg [P_NUM_MASTER_SLOTS_DE_LOG-1:0] m_atarget_enc ;
wire [P_NUM_MASTER_SLOTS_DE_LOG-1:0] m_atarget_enc_comb ;
wire match;
wire any_error ;
wire [7:0] m_aerror_i ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_awvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_awready ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_arvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_arready ;
wire aa_awvalid ;
wire aa_awready ;
wire aa_arvalid ;
wire aa_arready ;
wire mi_awvalid_en;
wire mi_awready_mux;
wire mi_arvalid_en;
wire mi_arready_mux;
wire w_transfer_en;
wire w_complete_mux;
wire b_transfer_en;
wire b_complete_mux;
wire r_transfer_en;
wire r_complete_mux;
wire target_secure;
wire target_write;
wire target_read;
wire target_axilite;
wire [P_BMESG_WIDTH-1:0] si_bmesg ;
wire [P_NUM_MASTER_SLOTS_DE*P_BMESG_WIDTH-1:0] mi_bmesg ;
wire [P_NUM_MASTER_SLOTS_DE*2-1:0] mi_bresp ;
wire [P_NUM_MASTER_SLOTS_DE*C_AXI_BUSER_WIDTH-1:0] mi_buser ;
wire [2-1:0] si_bresp ;
wire [C_AXI_BUSER_WIDTH-1:0] si_buser ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_bvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_bready ;
wire aa_bvalid ;
wire aa_bready ;
wire si_bready ;
wire [C_NUM_SLAVE_SLOTS-1:0] si_bvalid;
wire [P_RMESG_WIDTH-1:0] aa_rmesg ;
wire [P_RMESG_WIDTH-1:0] sr_rmesg ;
wire [P_NUM_MASTER_SLOTS_DE*P_RMESG_WIDTH-1:0] mi_rmesg ;
wire [P_NUM_MASTER_SLOTS_DE*2-1:0] mi_rresp ;
wire [P_NUM_MASTER_SLOTS_DE*C_AXI_RUSER_WIDTH-1:0] mi_ruser ;
wire [P_NUM_MASTER_SLOTS_DE*C_AXI_DATA_WIDTH-1:0] mi_rdata ;
wire [P_NUM_MASTER_SLOTS_DE*1-1:0] mi_rlast ;
wire [2-1:0] si_rresp ;
wire [C_AXI_RUSER_WIDTH-1:0] si_ruser ;
wire [C_AXI_DATA_WIDTH-1:0] si_rdata ;
wire si_rlast ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_rvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_rready ;
wire aa_rvalid ;
wire aa_rready ;
wire sr_rvalid ;
wire si_rready ;
wire sr_rready ;
wire [C_NUM_SLAVE_SLOTS-1:0] si_rvalid;
wire [C_NUM_SLAVE_SLOTS*P_WMESG_WIDTH-1:0] si_wmesg ;
wire [P_WMESG_WIDTH-1:0] mi_wmesg ;
wire [C_AXI_ID_WIDTH-1:0] mi_wid ;
wire [C_AXI_DATA_WIDTH-1:0] mi_wdata ;
wire [C_AXI_DATA_WIDTH/8-1:0] mi_wstrb ;
wire [C_AXI_WUSER_WIDTH-1:0] mi_wuser ;
wire [1-1:0] mi_wlast ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_wvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_wready ;
wire aa_wvalid ;
wire aa_wready ;
wire [C_NUM_SLAVE_SLOTS-1:0] si_wready;
reg [7:0] debug_r_beat_cnt_i;
reg [7:0] debug_w_beat_cnt_i;
reg [7:0] debug_aw_trans_seq_i;
reg [7:0] debug_ar_trans_seq_i;
reg aresetn_d = 1'b0; // Reset delay register
always @(posedge ACLK) begin
if (~ARESETN) begin
aresetn_d <= 1'b0;
end else begin
aresetn_d <= ARESETN;
end
end
wire reset;
assign reset = ~aresetn_d;
generate
axi_crossbar_v2_1_addr_arbiter_sasd #
(
.C_FAMILY (C_FAMILY),
.C_NUM_S (C_NUM_SLAVE_SLOTS),
.C_NUM_S_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_AMESG_WIDTH (P_AMESG_WIDTH),
.C_GRANT_ENC (1),
.C_ARB_PRIORITY (C_S_AXI_ARB_PRIORITY)
)
addr_arbiter_inst
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of SI-side AW command request inputs
.S_AWMESG (si_awmesg),
.S_ARMESG (si_armesg),
.S_AWVALID (S_AXI_AWVALID),
.S_AWREADY (S_AXI_AWREADY),
.S_ARVALID (S_AXI_ARVALID),
.S_ARREADY (S_AXI_ARREADY),
.M_GRANT_ENC (aa_grant_enc),
.M_GRANT_HOT (aa_grant_hot), // SI-slot 1-hot mask of granted command
.M_GRANT_ANY (aa_grant_any),
.M_GRANT_RNW (aa_grant_rnw),
.M_AMESG (aa_amesg), // Either S_AWMESG or S_ARMESG, as indicated by M_AWVALID and M_ARVALID.
.M_AWVALID (aa_awvalid),
.M_AWREADY (aa_awready),
.M_ARVALID (aa_arvalid),
.M_ARREADY (aa_arready)
);
if (C_ADDR_DECODE) begin : gen_addr_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_MASTER_SLOTS),
.C_NUM_TARGETS_LOG (P_NUM_MASTER_SLOTS_LOG),
.C_NUM_RANGES (C_NUM_ADDR_RANGES),
.C_ADDR_WIDTH (C_AXI_ADDR_WIDTH),
.C_TARGET_ENC (1),
.C_TARGET_HOT (1),
.C_REGION_ENC (1),
.C_BASE_ADDR (C_M_AXI_BASE_ADDR),
.C_HIGH_ADDR (C_M_AXI_HIGH_ADDR),
.C_TARGET_QUAL ({C_NUM_MASTER_SLOTS{1'b1}}),
.C_RESOLUTION (2)
)
addr_decoder_inst
(
.ADDR (mi_aaddr),
.TARGET_HOT (target_mi_hot),
.TARGET_ENC (target_mi_enc),
.MATCH (match),
.REGION (target_region)
);
end else begin : gen_no_addr_decoder
assign target_mi_hot = 1;
assign match = 1'b1;
assign target_region = 4'b0000;
end // gen_addr_decoder
// AW-channel arbiter command transfer completes upon completion of both M-side AW-channel transfer and B channel completion.
axi_crossbar_v2_1_splitter #
(
.C_NUM_M (3)
)
splitter_aw
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (aa_awvalid),
.S_READY (aa_awready),
.M_VALID ({mi_awvalid_en, w_transfer_en, b_transfer_en}),
.M_READY ({mi_awready_mux, w_complete_mux, b_complete_mux})
);
// AR-channel arbiter command transfer completes upon completion of both M-side AR-channel transfer and R channel completion.
axi_crossbar_v2_1_splitter #
(
.C_NUM_M (2)
)
splitter_ar
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (aa_arvalid),
.S_READY (aa_arready),
.M_VALID ({mi_arvalid_en, r_transfer_en}),
.M_READY ({mi_arready_mux, r_complete_mux})
);
assign target_secure = |(target_mi_hot & P_M_SECURE_MASK);
assign target_write = |(target_mi_hot & C_M_AXI_SUPPORTS_WRITE);
assign target_read = |(target_mi_hot & C_M_AXI_SUPPORTS_READ);
assign target_axilite = |(target_mi_hot & P_M_AXILITE_MASK);
assign any_error = C_RANGE_CHECK && (m_aerror_i != 0); // DECERR if error-detection enabled and any error condition.
assign m_aerror_i[0] = ~match; // Invalid target address
assign m_aerror_i[1] = target_secure && mi_aprot[P_NONSECURE_BIT]; // TrustZone violation
assign m_aerror_i[2] = target_axilite && ((mi_alen != 0) ||
(mi_asize[1:0] == 2'b11) || (mi_asize[2] == 1'b1)); // AxiLite access violation
assign m_aerror_i[3] = (~aa_grant_rnw && ~target_write) ||
(aa_grant_rnw && ~target_read); // R/W direction unsupported by target
assign m_aerror_i[7:4] = 4'b0000; // Reserved
assign m_atarget_enc_comb = any_error ? (P_NUM_MASTER_SLOTS_DE-1) : target_mi_enc; // Select MI slot or decerr_slave
always @(posedge ACLK) begin
if (reset) begin
m_atarget_hot <= 0;
m_atarget_enc <= 0;
end else begin
m_atarget_hot <= {P_NUM_MASTER_SLOTS_DE{aa_grant_any}} & (any_error ? {1'b1, {C_NUM_MASTER_SLOTS{1'b0}}} : {1'b0, target_mi_hot}); // Select MI slot or decerr_slave
m_atarget_enc <= m_atarget_enc_comb;
end
end
// Receive AWREADY from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_awready_mux_inst
(
.S (m_atarget_enc),
.A (mi_awready),
.O (mi_awready_mux),
.OE (mi_awvalid_en)
);
// Receive ARREADY from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_arready_mux_inst
(
.S (m_atarget_enc),
.A (mi_arready),
.O (mi_arready_mux),
.OE (mi_arvalid_en)
);
assign mi_awvalid = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{mi_awvalid_en}}; // Assert AWVALID on targeted MI.
assign mi_arvalid = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{mi_arvalid_en}}; // Assert ARVALID on targeted MI.
assign M_AXI_AWVALID = mi_awvalid[0+:C_NUM_MASTER_SLOTS]; // Propagate to MI slots.
assign M_AXI_ARVALID = mi_arvalid[0+:C_NUM_MASTER_SLOTS]; // Propagate to MI slots.
assign mi_awready[0+:C_NUM_MASTER_SLOTS] = M_AXI_AWREADY; // Copy from MI slots.
assign mi_arready[0+:C_NUM_MASTER_SLOTS] = M_AXI_ARREADY; // Copy from MI slots.
// Receive WREADY from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_wready_mux_inst
(
.S (m_atarget_enc),
.A (mi_wready),
.O (aa_wready),
.OE (w_transfer_en)
);
assign mi_wvalid = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{aa_wvalid}}; // Assert WVALID on targeted MI.
assign si_wready = aa_grant_hot & {C_NUM_SLAVE_SLOTS{aa_wready}}; // Assert WREADY on granted SI.
assign S_AXI_WREADY = si_wready;
assign w_complete_mux = aa_wready & aa_wvalid & mi_wlast; // W burst complete on on designated SI/MI.
// Receive RREADY from granted SI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (1)
) si_rready_mux_inst
(
.S (aa_grant_enc),
.A (S_AXI_RREADY),
.O (si_rready),
.OE (r_transfer_en)
);
assign sr_rready = si_rready & r_transfer_en;
assign mi_rready = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{aa_rready}}; // Assert RREADY on targeted MI.
assign si_rvalid = aa_grant_hot & {C_NUM_SLAVE_SLOTS{sr_rvalid}}; // Assert RVALID on granted SI.
assign S_AXI_RVALID = si_rvalid;
assign r_complete_mux = sr_rready & sr_rvalid & si_rlast; // R burst complete on on designated SI/MI.
// Receive BREADY from granted SI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (1)
) si_bready_mux_inst
(
.S (aa_grant_enc),
.A (S_AXI_BREADY),
.O (si_bready),
.OE (b_transfer_en)
);
assign aa_bready = si_bready & b_transfer_en;
assign mi_bready = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{aa_bready}}; // Assert BREADY on targeted MI.
assign si_bvalid = aa_grant_hot & {C_NUM_SLAVE_SLOTS{aa_bvalid}}; // Assert BVALID on granted SI.
assign S_AXI_BVALID = si_bvalid;
assign b_complete_mux = aa_bready & aa_bvalid; // B transfer complete on on designated SI/MI.
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_si_amesg
assign si_armesg[gen_si_slot*P_AMESG_WIDTH +: P_AMESG_WIDTH] = { // Concatenate from MSB to LSB
4'b0000,
// S_AXI_ARREGION[gen_si_slot*4+:4],
S_AXI_ARUSER[gen_si_slot*C_AXI_ARUSER_WIDTH +: C_AXI_ARUSER_WIDTH],
S_AXI_ARQOS[gen_si_slot*4+:4],
S_AXI_ARCACHE[gen_si_slot*4+:4],
S_AXI_ARBURST[gen_si_slot*2+:2],
S_AXI_ARPROT[gen_si_slot*3+:3],
S_AXI_ARLOCK[gen_si_slot*2+:2],
S_AXI_ARSIZE[gen_si_slot*3+:3],
S_AXI_ARLEN[gen_si_slot*8+:8],
S_AXI_ARADDR[gen_si_slot*C_AXI_ADDR_WIDTH +: C_AXI_ADDR_WIDTH],
f_extend_ID(S_AXI_ARID[gen_si_slot*C_AXI_ID_WIDTH +: C_AXI_ID_WIDTH], gen_si_slot)
};
assign si_awmesg[gen_si_slot*P_AMESG_WIDTH +: P_AMESG_WIDTH] = { // Concatenate from MSB to LSB
4'b0000,
// S_AXI_AWREGION[gen_si_slot*4+:4],
S_AXI_AWUSER[gen_si_slot*C_AXI_AWUSER_WIDTH +: C_AXI_AWUSER_WIDTH],
S_AXI_AWQOS[gen_si_slot*4+:4],
S_AXI_AWCACHE[gen_si_slot*4+:4],
S_AXI_AWBURST[gen_si_slot*2+:2],
S_AXI_AWPROT[gen_si_slot*3+:3],
S_AXI_AWLOCK[gen_si_slot*2+:2],
S_AXI_AWSIZE[gen_si_slot*3+:3],
S_AXI_AWLEN[gen_si_slot*8+:8],
S_AXI_AWADDR[gen_si_slot*C_AXI_ADDR_WIDTH +: C_AXI_ADDR_WIDTH],
f_extend_ID(S_AXI_AWID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot)
};
end // gen_si_amesg
assign mi_aid = aa_amesg[0 +: C_AXI_ID_WIDTH];
assign mi_aaddr = aa_amesg[C_AXI_ID_WIDTH +: C_AXI_ADDR_WIDTH];
assign mi_alen = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +: 8];
assign mi_asize = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8 +: 3];
assign mi_alock = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3 +: 2];
assign mi_aprot = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2 +: 3];
assign mi_aburst = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3 +: 2];
assign mi_acache = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2 +: 4];
assign mi_aqos = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2+4 +: 4];
assign mi_auser = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2+4+4 +: P_AXI_AUSER_WIDTH];
assign mi_aregion = (C_ADDR_DECODE != 0) ? target_region : aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2+4+4+P_AXI_AUSER_WIDTH +: 4];
// Broadcast AW transfer payload to all MI-slots
assign M_AXI_AWID = {C_NUM_MASTER_SLOTS{mi_aid}};
assign M_AXI_AWADDR = {C_NUM_MASTER_SLOTS{mi_aaddr}};
assign M_AXI_AWLEN = {C_NUM_MASTER_SLOTS{mi_alen }};
assign M_AXI_AWSIZE = {C_NUM_MASTER_SLOTS{mi_asize}};
assign M_AXI_AWLOCK = {C_NUM_MASTER_SLOTS{mi_alock}};
assign M_AXI_AWPROT = {C_NUM_MASTER_SLOTS{mi_aprot}};
assign M_AXI_AWREGION = {C_NUM_MASTER_SLOTS{mi_aregion}};
assign M_AXI_AWBURST = {C_NUM_MASTER_SLOTS{mi_aburst}};
assign M_AXI_AWCACHE = {C_NUM_MASTER_SLOTS{mi_acache}};
assign M_AXI_AWQOS = {C_NUM_MASTER_SLOTS{mi_aqos }};
assign M_AXI_AWUSER = {C_NUM_MASTER_SLOTS{mi_auser[0+:C_AXI_AWUSER_WIDTH] }};
// Broadcast AR transfer payload to all MI-slots
assign M_AXI_ARID = {C_NUM_MASTER_SLOTS{mi_aid}};
assign M_AXI_ARADDR = {C_NUM_MASTER_SLOTS{mi_aaddr}};
assign M_AXI_ARLEN = {C_NUM_MASTER_SLOTS{mi_alen }};
assign M_AXI_ARSIZE = {C_NUM_MASTER_SLOTS{mi_asize}};
assign M_AXI_ARLOCK = {C_NUM_MASTER_SLOTS{mi_alock}};
assign M_AXI_ARPROT = {C_NUM_MASTER_SLOTS{mi_aprot}};
assign M_AXI_ARREGION = {C_NUM_MASTER_SLOTS{mi_aregion}};
assign M_AXI_ARBURST = {C_NUM_MASTER_SLOTS{mi_aburst}};
assign M_AXI_ARCACHE = {C_NUM_MASTER_SLOTS{mi_acache}};
assign M_AXI_ARQOS = {C_NUM_MASTER_SLOTS{mi_aqos }};
assign M_AXI_ARUSER = {C_NUM_MASTER_SLOTS{mi_auser[0+:C_AXI_ARUSER_WIDTH] }};
// W-channel MI handshakes
assign M_AXI_WVALID = mi_wvalid[0+:C_NUM_MASTER_SLOTS];
assign mi_wready[0+:C_NUM_MASTER_SLOTS] = M_AXI_WREADY;
// Broadcast W transfer payload to all MI-slots
assign M_AXI_WLAST = {C_NUM_MASTER_SLOTS{mi_wlast}};
assign M_AXI_WUSER = {C_NUM_MASTER_SLOTS{mi_wuser}};
assign M_AXI_WDATA = {C_NUM_MASTER_SLOTS{mi_wdata}};
assign M_AXI_WSTRB = {C_NUM_MASTER_SLOTS{mi_wstrb}};
assign M_AXI_WID = {C_NUM_MASTER_SLOTS{mi_wid}};
// Broadcast R transfer payload to all SI-slots
assign S_AXI_RLAST = {C_NUM_SLAVE_SLOTS{si_rlast}};
assign S_AXI_RRESP = {C_NUM_SLAVE_SLOTS{si_rresp}};
assign S_AXI_RUSER = {C_NUM_SLAVE_SLOTS{si_ruser}};
assign S_AXI_RDATA = {C_NUM_SLAVE_SLOTS{si_rdata}};
assign S_AXI_RID = {C_NUM_SLAVE_SLOTS{mi_aid}};
// Broadcast B transfer payload to all SI-slots
assign S_AXI_BRESP = {C_NUM_SLAVE_SLOTS{si_bresp}};
assign S_AXI_BUSER = {C_NUM_SLAVE_SLOTS{si_buser}};
assign S_AXI_BID = {C_NUM_SLAVE_SLOTS{mi_aid}};
if (C_NUM_SLAVE_SLOTS>1) begin : gen_wmux
// SI WVALID mux.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (1)
) si_w_valid_mux_inst
(
.S (aa_grant_enc),
.A (S_AXI_WVALID),
.O (aa_wvalid),
.OE (w_transfer_en)
);
// SI W-channel payload mux
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (P_WMESG_WIDTH)
) si_w_payload_mux_inst
(
.S (aa_grant_enc),
.A (si_wmesg),
.O (mi_wmesg),
.OE (1'b1)
);
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_wmesg
assign si_wmesg[gen_si_slot*P_WMESG_WIDTH+:P_WMESG_WIDTH] = { // Concatenate from MSB to LSB
((C_AXI_PROTOCOL == P_AXI3) ? f_extend_ID(S_AXI_WID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot) : 1'b0),
S_AXI_WUSER[gen_si_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH],
S_AXI_WSTRB[gen_si_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8],
S_AXI_WDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH],
S_AXI_WLAST[gen_si_slot*1+:1]
};
end // gen_wmesg
assign mi_wlast = mi_wmesg[0];
assign mi_wdata = mi_wmesg[1 +: C_AXI_DATA_WIDTH];
assign mi_wstrb = mi_wmesg[1+C_AXI_DATA_WIDTH +: C_AXI_DATA_WIDTH/8];
assign mi_wuser = mi_wmesg[1+C_AXI_DATA_WIDTH+C_AXI_DATA_WIDTH/8 +: C_AXI_WUSER_WIDTH];
assign mi_wid = mi_wmesg[1+C_AXI_DATA_WIDTH+C_AXI_DATA_WIDTH/8+C_AXI_WUSER_WIDTH +: P_AXI_WID_WIDTH];
end else begin : gen_no_wmux
assign aa_wvalid = w_transfer_en & S_AXI_WVALID;
assign mi_wlast = S_AXI_WLAST;
assign mi_wdata = S_AXI_WDATA;
assign mi_wstrb = S_AXI_WSTRB;
assign mi_wuser = S_AXI_WUSER;
assign mi_wid = S_AXI_WID;
end // gen_wmux
// Receive RVALID from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_rvalid_mux_inst
(
.S (m_atarget_enc),
.A (mi_rvalid),
.O (aa_rvalid),
.OE (r_transfer_en)
);
// MI R-channel payload mux
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (P_RMESG_WIDTH)
) mi_rmesg_mux_inst
(
.S (m_atarget_enc),
.A (mi_rmesg),
.O (aa_rmesg),
.OE (1'b1)
);
axi_register_slice_v2_1_axic_register_slice #
(
.C_FAMILY (C_FAMILY),
.C_DATA_WIDTH (P_RMESG_WIDTH),
.C_REG_CONFIG (P_R_REG_CONFIG)
)
reg_slice_r
(
// System Signals
.ACLK(ACLK),
.ARESET(reset),
// Slave side
.S_PAYLOAD_DATA(aa_rmesg),
.S_VALID(aa_rvalid),
.S_READY(aa_rready),
// Master side
.M_PAYLOAD_DATA(sr_rmesg),
.M_VALID(sr_rvalid),
.M_READY(sr_rready)
);
assign mi_rvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_RVALID;
assign mi_rlast[0+:C_NUM_MASTER_SLOTS] = M_AXI_RLAST;
assign mi_rresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_RRESP;
assign mi_ruser[0+:C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH] = M_AXI_RUSER;
assign mi_rdata[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH] = M_AXI_RDATA;
assign M_AXI_RREADY = mi_rready[0+:C_NUM_MASTER_SLOTS];
for (gen_mi_slot=0; gen_mi_slot<P_NUM_MASTER_SLOTS_DE; gen_mi_slot=gen_mi_slot+1) begin : gen_rmesg
assign mi_rmesg[gen_mi_slot*P_RMESG_WIDTH+:P_RMESG_WIDTH] = { // Concatenate from MSB to LSB
mi_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH],
mi_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH],
mi_rresp[gen_mi_slot*2+:2],
mi_rlast[gen_mi_slot*1+:1]
};
end // gen_rmesg
assign si_rlast = sr_rmesg[0];
assign si_rresp = sr_rmesg[1 +: 2];
assign si_rdata = sr_rmesg[1+2 +: C_AXI_DATA_WIDTH];
assign si_ruser = sr_rmesg[1+2+C_AXI_DATA_WIDTH +: C_AXI_RUSER_WIDTH];
// Receive BVALID from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_bvalid_mux_inst
(
.S (m_atarget_enc),
.A (mi_bvalid),
.O (aa_bvalid),
.OE (b_transfer_en)
);
// MI B-channel payload mux
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (P_BMESG_WIDTH)
) mi_bmesg_mux_inst
(
.S (m_atarget_enc),
.A (mi_bmesg),
.O (si_bmesg),
.OE (1'b1)
);
assign mi_bvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_BVALID;
assign mi_bresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_BRESP;
assign mi_buser[0+:C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH] = M_AXI_BUSER;
assign M_AXI_BREADY = mi_bready[0+:C_NUM_MASTER_SLOTS];
for (gen_mi_slot=0; gen_mi_slot<P_NUM_MASTER_SLOTS_DE; gen_mi_slot=gen_mi_slot+1) begin : gen_bmesg
assign mi_bmesg[gen_mi_slot*P_BMESG_WIDTH+:P_BMESG_WIDTH] = { // Concatenate from MSB to LSB
mi_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH],
mi_bresp[gen_mi_slot*2+:2]
};
end // gen_bmesg
assign si_bresp = si_bmesg[0 +: 2];
assign si_buser = si_bmesg[2 +: C_AXI_BUSER_WIDTH];
if (C_DEBUG) begin : gen_debug_trans_seq
// DEBUG WRITE TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_aw_trans_seq_i <= 1;
end else begin
if (aa_awvalid && aa_awready) begin
debug_aw_trans_seq_i <= debug_aw_trans_seq_i + 1;
end
end
end
// DEBUG READ TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_ar_trans_seq_i <= 1;
end else begin
if (aa_arvalid && aa_arready) begin
debug_ar_trans_seq_i <= debug_ar_trans_seq_i + 1;
end
end
end
// DEBUG WRITE BEAT COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_w_beat_cnt_i <= 0;
end else if (aa_wready & aa_wvalid) begin
if (mi_wlast) begin
debug_w_beat_cnt_i <= 0;
end else begin
debug_w_beat_cnt_i <= debug_w_beat_cnt_i + 1;
end
end
end // Clocked process
// DEBUG READ BEAT COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_r_beat_cnt_i <= 0;
end else if (sr_rready & sr_rvalid) begin
if (si_rlast) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end // Clocked process
end // gen_debug_trans_seq
if (C_RANGE_CHECK) begin : gen_decerr
// Highest MI-slot (index C_NUM_MASTER_SLOTS) is the error handler
axi_crossbar_v2_1_decerr_slave #
(
.C_AXI_ID_WIDTH (1),
.C_AXI_DATA_WIDTH (C_AXI_DATA_WIDTH),
.C_AXI_RUSER_WIDTH (C_AXI_RUSER_WIDTH),
.C_AXI_BUSER_WIDTH (C_AXI_BUSER_WIDTH),
.C_AXI_PROTOCOL (C_AXI_PROTOCOL),
.C_RESP (P_DECERR)
)
decerr_slave_inst
(
.S_AXI_ACLK (ACLK),
.S_AXI_ARESET (reset),
.S_AXI_AWID (1'b0),
.S_AXI_AWVALID (mi_awvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_AWREADY (mi_awready[C_NUM_MASTER_SLOTS]),
.S_AXI_WLAST (mi_wlast),
.S_AXI_WVALID (mi_wvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_WREADY (mi_wready[C_NUM_MASTER_SLOTS]),
.S_AXI_BID (),
.S_AXI_BRESP (mi_bresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_BUSER (mi_buser[C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH]),
.S_AXI_BVALID (mi_bvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_BREADY (mi_bready[C_NUM_MASTER_SLOTS]),
.S_AXI_ARID (1'b0),
.S_AXI_ARLEN (mi_alen),
.S_AXI_ARVALID (mi_arvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_ARREADY (mi_arready[C_NUM_MASTER_SLOTS]),
.S_AXI_RID (),
.S_AXI_RDATA (mi_rdata[C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.S_AXI_RRESP (mi_rresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_RUSER (mi_ruser[C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH]),
.S_AXI_RLAST (mi_rlast[C_NUM_MASTER_SLOTS]),
.S_AXI_RVALID (mi_rvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_RREADY (mi_rready[C_NUM_MASTER_SLOTS])
);
end // gen_decerr
endgenerate
endmodule
|
module axi_crossbar_v2_1_crossbar_sasd #
(
parameter C_FAMILY = "none",
parameter integer C_NUM_SLAVE_SLOTS = 1,
parameter integer C_NUM_MASTER_SLOTS = 1,
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_AXI_ID_WIDTH = 1,
parameter integer C_AXI_ADDR_WIDTH = 32,
parameter integer C_AXI_DATA_WIDTH = 32,
parameter integer C_AXI_PROTOCOL = 0,
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_BASE_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64-1:0] C_M_AXI_HIGH_ADDR = {C_NUM_MASTER_SLOTS*C_NUM_ADDR_RANGES*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_BASE_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter [C_NUM_SLAVE_SLOTS*64-1:0] C_S_AXI_HIGH_ID = {C_NUM_SLAVE_SLOTS*64{1'b0}},
parameter integer C_AXI_SUPPORTS_USER_SIGNALS = 0,
parameter integer C_AXI_AWUSER_WIDTH = 1,
parameter integer C_AXI_ARUSER_WIDTH = 1,
parameter integer C_AXI_WUSER_WIDTH = 1,
parameter integer C_AXI_RUSER_WIDTH = 1,
parameter integer C_AXI_BUSER_WIDTH = 1,
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_WRITE = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_SLAVE_SLOTS-1:0] C_S_AXI_SUPPORTS_READ = {C_NUM_SLAVE_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_WRITE = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_MASTER_SLOTS-1:0] C_M_AXI_SUPPORTS_READ = {C_NUM_MASTER_SLOTS{1'b1}},
parameter [C_NUM_SLAVE_SLOTS*32-1:0] C_S_AXI_ARB_PRIORITY = {C_NUM_SLAVE_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_SECURE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter [C_NUM_MASTER_SLOTS*32-1:0] C_M_AXI_ERR_MODE = {C_NUM_MASTER_SLOTS{32'h00000000}},
parameter integer C_R_REGISTER = 0,
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE = 0,
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESETN,
// Slave Interface Write Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_AWID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_AWADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_AWLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_AWLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_AWPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_AWQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_AWUSER_WIDTH-1:0] S_AXI_AWUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_AWREADY,
// Slave Interface Write Data Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_WID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_WDATA,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH/8-1:0] S_AXI_WSTRB,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WLAST,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_WUSER_WIDTH-1:0] S_AXI_WUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_WREADY,
// Slave Interface Write Response Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_BID,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_BRESP,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_BUSER_WIDTH-1:0] S_AXI_BUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_BREADY,
// Slave Interface Read Address Ports
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_ARID,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ADDR_WIDTH-1:0] S_AXI_ARADDR,
input wire [C_NUM_SLAVE_SLOTS*8-1:0] S_AXI_ARLEN,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARSIZE,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARBURST,
input wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_ARLOCK,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARCACHE,
input wire [C_NUM_SLAVE_SLOTS*3-1:0] S_AXI_ARPROT,
// input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARREGION,
input wire [C_NUM_SLAVE_SLOTS*4-1:0] S_AXI_ARQOS,
input wire [C_NUM_SLAVE_SLOTS*C_AXI_ARUSER_WIDTH-1:0] S_AXI_ARUSER,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARVALID,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_ARREADY,
// Slave Interface Read Data Ports
output wire [C_NUM_SLAVE_SLOTS*C_AXI_ID_WIDTH-1:0] S_AXI_RID,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_DATA_WIDTH-1:0] S_AXI_RDATA,
output wire [C_NUM_SLAVE_SLOTS*2-1:0] S_AXI_RRESP,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RLAST,
output wire [C_NUM_SLAVE_SLOTS*C_AXI_RUSER_WIDTH-1:0] S_AXI_RUSER,
output wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RVALID,
input wire [C_NUM_SLAVE_SLOTS-1:0] S_AXI_RREADY,
// Master Interface Write Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_AWID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_AWADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_AWLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_AWLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_AWPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_AWQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_AWUSER_WIDTH-1:0] M_AXI_AWUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_AWREADY,
// Master Interface Write Data Ports
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_WID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_WDATA,
output wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH/8-1:0] M_AXI_WSTRB,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WLAST,
output wire [C_NUM_MASTER_SLOTS*C_AXI_WUSER_WIDTH-1:0] M_AXI_WUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_WREADY,
// Master Interface Write Response Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_BID, // Unused
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_BRESP,
input wire [C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH-1:0] M_AXI_BUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_BREADY,
// Master Interface Read Address Port
output wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_ARID,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ADDR_WIDTH-1:0] M_AXI_ARADDR,
output wire [C_NUM_MASTER_SLOTS*8-1:0] M_AXI_ARLEN,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARSIZE,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARBURST,
output wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_ARLOCK,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARCACHE,
output wire [C_NUM_MASTER_SLOTS*3-1:0] M_AXI_ARPROT,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARREGION,
output wire [C_NUM_MASTER_SLOTS*4-1:0] M_AXI_ARQOS,
output wire [C_NUM_MASTER_SLOTS*C_AXI_ARUSER_WIDTH-1:0] M_AXI_ARUSER,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARVALID,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_ARREADY,
// Master Interface Read Data Ports
input wire [C_NUM_MASTER_SLOTS*C_AXI_ID_WIDTH-1:0] M_AXI_RID, // Unused
input wire [C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH-1:0] M_AXI_RDATA,
input wire [C_NUM_MASTER_SLOTS*2-1:0] M_AXI_RRESP,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RLAST,
input wire [C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH-1:0] M_AXI_RUSER,
input wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RVALID,
output wire [C_NUM_MASTER_SLOTS-1:0] M_AXI_RREADY
);
localparam integer P_AXI4 = 0;
localparam integer P_AXI3 = 1;
localparam integer P_AXILITE = 2;
localparam integer P_NUM_MASTER_SLOTS_DE = C_RANGE_CHECK ? C_NUM_MASTER_SLOTS+1 : C_NUM_MASTER_SLOTS;
localparam integer P_NUM_MASTER_SLOTS_LOG = (C_NUM_MASTER_SLOTS>1) ? f_ceil_log2(C_NUM_MASTER_SLOTS) : 1;
localparam integer P_NUM_MASTER_SLOTS_DE_LOG = (P_NUM_MASTER_SLOTS_DE>1) ? f_ceil_log2(P_NUM_MASTER_SLOTS_DE) : 1;
localparam integer P_NUM_SLAVE_SLOTS_LOG = (C_NUM_SLAVE_SLOTS>1) ? f_ceil_log2(C_NUM_SLAVE_SLOTS) : 1;
localparam integer P_AXI_AUSER_WIDTH = (C_AXI_AWUSER_WIDTH > C_AXI_ARUSER_WIDTH) ? C_AXI_AWUSER_WIDTH : C_AXI_ARUSER_WIDTH;
localparam integer P_AXI_WID_WIDTH = (C_AXI_PROTOCOL == P_AXI3) ? C_AXI_ID_WIDTH : 1;
localparam integer P_AMESG_WIDTH = C_AXI_ID_WIDTH + C_AXI_ADDR_WIDTH + 8+3+2+3+2+4+4 + P_AXI_AUSER_WIDTH + 4;
localparam integer P_BMESG_WIDTH = 2 + C_AXI_BUSER_WIDTH;
localparam integer P_RMESG_WIDTH = 1+2 + C_AXI_DATA_WIDTH + C_AXI_RUSER_WIDTH;
localparam integer P_WMESG_WIDTH = 1 + C_AXI_DATA_WIDTH + C_AXI_DATA_WIDTH/8 + C_AXI_WUSER_WIDTH + P_AXI_WID_WIDTH;
localparam [31:0] P_AXILITE_ERRMODE = 32'h00000001;
localparam integer P_NONSECURE_BIT = 1;
localparam [C_NUM_MASTER_SLOTS-1:0] P_M_SECURE_MASK = f_bit32to1_mi(C_M_AXI_SECURE); // Mask of secure MI-slots
localparam [C_NUM_MASTER_SLOTS-1:0] P_M_AXILITE_MASK = f_m_axilite(0); // Mask of axilite rule-check MI-slots
localparam [1:0] P_FIXED = 2'b00;
localparam integer P_BYPASS = 0;
localparam integer P_LIGHTWT = 7;
localparam integer P_FULLY_REG = 1;
localparam integer P_R_REG_CONFIG = C_R_REGISTER == 8 ? // "Automatic" reg-slice
(C_RANGE_CHECK ? ((C_AXI_PROTOCOL == P_AXILITE) ? P_LIGHTWT : P_FULLY_REG) : P_BYPASS) : // Bypass if no R-channel mux
C_R_REGISTER;
localparam P_DECERR = 2'b11;
//---------------------------------------------------------------------------
// Functions
//---------------------------------------------------------------------------
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// Isolate thread bits of input S_ID and add to BASE_ID (RNG00) to form MI-side ID value
// only for end-point SI-slots
function [C_AXI_ID_WIDTH-1:0] f_extend_ID
(
input [C_AXI_ID_WIDTH-1:0] s_id,
input integer slot
);
begin
f_extend_ID = C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] | (s_id & (C_S_AXI_BASE_ID[slot*64+:C_AXI_ID_WIDTH] ^ C_S_AXI_HIGH_ID[slot*64+:C_AXI_ID_WIDTH]));
end
endfunction
// Convert Bit32 vector of range [0,1] to Bit1 vector on MI
function [C_NUM_MASTER_SLOTS-1:0] f_bit32to1_mi
(input [C_NUM_MASTER_SLOTS*32-1:0] vec32);
integer mi;
begin
for (mi=0; mi<C_NUM_MASTER_SLOTS; mi=mi+1) begin
f_bit32to1_mi[mi] = vec32[mi*32];
end
end
endfunction
// AxiLite error-checking mask (on MI)
function [C_NUM_MASTER_SLOTS-1:0] f_m_axilite
(
input integer null_arg
);
integer mi;
begin
for (mi=0; mi<C_NUM_MASTER_SLOTS; mi=mi+1) begin
f_m_axilite[mi] = (C_M_AXI_ERR_MODE[mi*32+:32] == P_AXILITE_ERRMODE);
end
end
endfunction
genvar gen_si_slot;
genvar gen_mi_slot;
wire [C_NUM_SLAVE_SLOTS*P_AMESG_WIDTH-1:0] si_awmesg ;
wire [C_NUM_SLAVE_SLOTS*P_AMESG_WIDTH-1:0] si_armesg ;
wire [P_AMESG_WIDTH-1:0] aa_amesg ;
wire [C_AXI_ID_WIDTH-1:0] mi_aid ;
wire [C_AXI_ADDR_WIDTH-1:0] mi_aaddr ;
wire [8-1:0] mi_alen ;
wire [3-1:0] mi_asize ;
wire [2-1:0] mi_alock ;
wire [3-1:0] mi_aprot ;
wire [2-1:0] mi_aburst ;
wire [4-1:0] mi_acache ;
wire [4-1:0] mi_aregion ;
wire [4-1:0] mi_aqos ;
wire [P_AXI_AUSER_WIDTH-1:0] mi_auser ;
wire [4-1:0] target_region ;
wire [C_NUM_SLAVE_SLOTS*1-1:0] aa_grant_hot ;
wire [P_NUM_SLAVE_SLOTS_LOG-1:0] aa_grant_enc ;
wire aa_grant_rnw ;
wire aa_grant_any ;
wire [C_NUM_MASTER_SLOTS-1:0] target_mi_hot ;
wire [P_NUM_MASTER_SLOTS_LOG-1:0] target_mi_enc ;
reg [P_NUM_MASTER_SLOTS_DE-1:0] m_atarget_hot ;
reg [P_NUM_MASTER_SLOTS_DE_LOG-1:0] m_atarget_enc ;
wire [P_NUM_MASTER_SLOTS_DE_LOG-1:0] m_atarget_enc_comb ;
wire match;
wire any_error ;
wire [7:0] m_aerror_i ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_awvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_awready ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_arvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_arready ;
wire aa_awvalid ;
wire aa_awready ;
wire aa_arvalid ;
wire aa_arready ;
wire mi_awvalid_en;
wire mi_awready_mux;
wire mi_arvalid_en;
wire mi_arready_mux;
wire w_transfer_en;
wire w_complete_mux;
wire b_transfer_en;
wire b_complete_mux;
wire r_transfer_en;
wire r_complete_mux;
wire target_secure;
wire target_write;
wire target_read;
wire target_axilite;
wire [P_BMESG_WIDTH-1:0] si_bmesg ;
wire [P_NUM_MASTER_SLOTS_DE*P_BMESG_WIDTH-1:0] mi_bmesg ;
wire [P_NUM_MASTER_SLOTS_DE*2-1:0] mi_bresp ;
wire [P_NUM_MASTER_SLOTS_DE*C_AXI_BUSER_WIDTH-1:0] mi_buser ;
wire [2-1:0] si_bresp ;
wire [C_AXI_BUSER_WIDTH-1:0] si_buser ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_bvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_bready ;
wire aa_bvalid ;
wire aa_bready ;
wire si_bready ;
wire [C_NUM_SLAVE_SLOTS-1:0] si_bvalid;
wire [P_RMESG_WIDTH-1:0] aa_rmesg ;
wire [P_RMESG_WIDTH-1:0] sr_rmesg ;
wire [P_NUM_MASTER_SLOTS_DE*P_RMESG_WIDTH-1:0] mi_rmesg ;
wire [P_NUM_MASTER_SLOTS_DE*2-1:0] mi_rresp ;
wire [P_NUM_MASTER_SLOTS_DE*C_AXI_RUSER_WIDTH-1:0] mi_ruser ;
wire [P_NUM_MASTER_SLOTS_DE*C_AXI_DATA_WIDTH-1:0] mi_rdata ;
wire [P_NUM_MASTER_SLOTS_DE*1-1:0] mi_rlast ;
wire [2-1:0] si_rresp ;
wire [C_AXI_RUSER_WIDTH-1:0] si_ruser ;
wire [C_AXI_DATA_WIDTH-1:0] si_rdata ;
wire si_rlast ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_rvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_rready ;
wire aa_rvalid ;
wire aa_rready ;
wire sr_rvalid ;
wire si_rready ;
wire sr_rready ;
wire [C_NUM_SLAVE_SLOTS-1:0] si_rvalid;
wire [C_NUM_SLAVE_SLOTS*P_WMESG_WIDTH-1:0] si_wmesg ;
wire [P_WMESG_WIDTH-1:0] mi_wmesg ;
wire [C_AXI_ID_WIDTH-1:0] mi_wid ;
wire [C_AXI_DATA_WIDTH-1:0] mi_wdata ;
wire [C_AXI_DATA_WIDTH/8-1:0] mi_wstrb ;
wire [C_AXI_WUSER_WIDTH-1:0] mi_wuser ;
wire [1-1:0] mi_wlast ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_wvalid ;
wire [P_NUM_MASTER_SLOTS_DE-1:0] mi_wready ;
wire aa_wvalid ;
wire aa_wready ;
wire [C_NUM_SLAVE_SLOTS-1:0] si_wready;
reg [7:0] debug_r_beat_cnt_i;
reg [7:0] debug_w_beat_cnt_i;
reg [7:0] debug_aw_trans_seq_i;
reg [7:0] debug_ar_trans_seq_i;
reg aresetn_d = 1'b0; // Reset delay register
always @(posedge ACLK) begin
if (~ARESETN) begin
aresetn_d <= 1'b0;
end else begin
aresetn_d <= ARESETN;
end
end
wire reset;
assign reset = ~aresetn_d;
generate
axi_crossbar_v2_1_addr_arbiter_sasd #
(
.C_FAMILY (C_FAMILY),
.C_NUM_S (C_NUM_SLAVE_SLOTS),
.C_NUM_S_LOG (P_NUM_SLAVE_SLOTS_LOG),
.C_AMESG_WIDTH (P_AMESG_WIDTH),
.C_GRANT_ENC (1),
.C_ARB_PRIORITY (C_S_AXI_ARB_PRIORITY)
)
addr_arbiter_inst
(
.ACLK (ACLK),
.ARESET (reset),
// Vector of SI-side AW command request inputs
.S_AWMESG (si_awmesg),
.S_ARMESG (si_armesg),
.S_AWVALID (S_AXI_AWVALID),
.S_AWREADY (S_AXI_AWREADY),
.S_ARVALID (S_AXI_ARVALID),
.S_ARREADY (S_AXI_ARREADY),
.M_GRANT_ENC (aa_grant_enc),
.M_GRANT_HOT (aa_grant_hot), // SI-slot 1-hot mask of granted command
.M_GRANT_ANY (aa_grant_any),
.M_GRANT_RNW (aa_grant_rnw),
.M_AMESG (aa_amesg), // Either S_AWMESG or S_ARMESG, as indicated by M_AWVALID and M_ARVALID.
.M_AWVALID (aa_awvalid),
.M_AWREADY (aa_awready),
.M_ARVALID (aa_arvalid),
.M_ARREADY (aa_arready)
);
if (C_ADDR_DECODE) begin : gen_addr_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_MASTER_SLOTS),
.C_NUM_TARGETS_LOG (P_NUM_MASTER_SLOTS_LOG),
.C_NUM_RANGES (C_NUM_ADDR_RANGES),
.C_ADDR_WIDTH (C_AXI_ADDR_WIDTH),
.C_TARGET_ENC (1),
.C_TARGET_HOT (1),
.C_REGION_ENC (1),
.C_BASE_ADDR (C_M_AXI_BASE_ADDR),
.C_HIGH_ADDR (C_M_AXI_HIGH_ADDR),
.C_TARGET_QUAL ({C_NUM_MASTER_SLOTS{1'b1}}),
.C_RESOLUTION (2)
)
addr_decoder_inst
(
.ADDR (mi_aaddr),
.TARGET_HOT (target_mi_hot),
.TARGET_ENC (target_mi_enc),
.MATCH (match),
.REGION (target_region)
);
end else begin : gen_no_addr_decoder
assign target_mi_hot = 1;
assign match = 1'b1;
assign target_region = 4'b0000;
end // gen_addr_decoder
// AW-channel arbiter command transfer completes upon completion of both M-side AW-channel transfer and B channel completion.
axi_crossbar_v2_1_splitter #
(
.C_NUM_M (3)
)
splitter_aw
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (aa_awvalid),
.S_READY (aa_awready),
.M_VALID ({mi_awvalid_en, w_transfer_en, b_transfer_en}),
.M_READY ({mi_awready_mux, w_complete_mux, b_complete_mux})
);
// AR-channel arbiter command transfer completes upon completion of both M-side AR-channel transfer and R channel completion.
axi_crossbar_v2_1_splitter #
(
.C_NUM_M (2)
)
splitter_ar
(
.ACLK (ACLK),
.ARESET (reset),
.S_VALID (aa_arvalid),
.S_READY (aa_arready),
.M_VALID ({mi_arvalid_en, r_transfer_en}),
.M_READY ({mi_arready_mux, r_complete_mux})
);
assign target_secure = |(target_mi_hot & P_M_SECURE_MASK);
assign target_write = |(target_mi_hot & C_M_AXI_SUPPORTS_WRITE);
assign target_read = |(target_mi_hot & C_M_AXI_SUPPORTS_READ);
assign target_axilite = |(target_mi_hot & P_M_AXILITE_MASK);
assign any_error = C_RANGE_CHECK && (m_aerror_i != 0); // DECERR if error-detection enabled and any error condition.
assign m_aerror_i[0] = ~match; // Invalid target address
assign m_aerror_i[1] = target_secure && mi_aprot[P_NONSECURE_BIT]; // TrustZone violation
assign m_aerror_i[2] = target_axilite && ((mi_alen != 0) ||
(mi_asize[1:0] == 2'b11) || (mi_asize[2] == 1'b1)); // AxiLite access violation
assign m_aerror_i[3] = (~aa_grant_rnw && ~target_write) ||
(aa_grant_rnw && ~target_read); // R/W direction unsupported by target
assign m_aerror_i[7:4] = 4'b0000; // Reserved
assign m_atarget_enc_comb = any_error ? (P_NUM_MASTER_SLOTS_DE-1) : target_mi_enc; // Select MI slot or decerr_slave
always @(posedge ACLK) begin
if (reset) begin
m_atarget_hot <= 0;
m_atarget_enc <= 0;
end else begin
m_atarget_hot <= {P_NUM_MASTER_SLOTS_DE{aa_grant_any}} & (any_error ? {1'b1, {C_NUM_MASTER_SLOTS{1'b0}}} : {1'b0, target_mi_hot}); // Select MI slot or decerr_slave
m_atarget_enc <= m_atarget_enc_comb;
end
end
// Receive AWREADY from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_awready_mux_inst
(
.S (m_atarget_enc),
.A (mi_awready),
.O (mi_awready_mux),
.OE (mi_awvalid_en)
);
// Receive ARREADY from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_arready_mux_inst
(
.S (m_atarget_enc),
.A (mi_arready),
.O (mi_arready_mux),
.OE (mi_arvalid_en)
);
assign mi_awvalid = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{mi_awvalid_en}}; // Assert AWVALID on targeted MI.
assign mi_arvalid = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{mi_arvalid_en}}; // Assert ARVALID on targeted MI.
assign M_AXI_AWVALID = mi_awvalid[0+:C_NUM_MASTER_SLOTS]; // Propagate to MI slots.
assign M_AXI_ARVALID = mi_arvalid[0+:C_NUM_MASTER_SLOTS]; // Propagate to MI slots.
assign mi_awready[0+:C_NUM_MASTER_SLOTS] = M_AXI_AWREADY; // Copy from MI slots.
assign mi_arready[0+:C_NUM_MASTER_SLOTS] = M_AXI_ARREADY; // Copy from MI slots.
// Receive WREADY from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_wready_mux_inst
(
.S (m_atarget_enc),
.A (mi_wready),
.O (aa_wready),
.OE (w_transfer_en)
);
assign mi_wvalid = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{aa_wvalid}}; // Assert WVALID on targeted MI.
assign si_wready = aa_grant_hot & {C_NUM_SLAVE_SLOTS{aa_wready}}; // Assert WREADY on granted SI.
assign S_AXI_WREADY = si_wready;
assign w_complete_mux = aa_wready & aa_wvalid & mi_wlast; // W burst complete on on designated SI/MI.
// Receive RREADY from granted SI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (1)
) si_rready_mux_inst
(
.S (aa_grant_enc),
.A (S_AXI_RREADY),
.O (si_rready),
.OE (r_transfer_en)
);
assign sr_rready = si_rready & r_transfer_en;
assign mi_rready = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{aa_rready}}; // Assert RREADY on targeted MI.
assign si_rvalid = aa_grant_hot & {C_NUM_SLAVE_SLOTS{sr_rvalid}}; // Assert RVALID on granted SI.
assign S_AXI_RVALID = si_rvalid;
assign r_complete_mux = sr_rready & sr_rvalid & si_rlast; // R burst complete on on designated SI/MI.
// Receive BREADY from granted SI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (1)
) si_bready_mux_inst
(
.S (aa_grant_enc),
.A (S_AXI_BREADY),
.O (si_bready),
.OE (b_transfer_en)
);
assign aa_bready = si_bready & b_transfer_en;
assign mi_bready = m_atarget_hot & {P_NUM_MASTER_SLOTS_DE{aa_bready}}; // Assert BREADY on targeted MI.
assign si_bvalid = aa_grant_hot & {C_NUM_SLAVE_SLOTS{aa_bvalid}}; // Assert BVALID on granted SI.
assign S_AXI_BVALID = si_bvalid;
assign b_complete_mux = aa_bready & aa_bvalid; // B transfer complete on on designated SI/MI.
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_si_amesg
assign si_armesg[gen_si_slot*P_AMESG_WIDTH +: P_AMESG_WIDTH] = { // Concatenate from MSB to LSB
4'b0000,
// S_AXI_ARREGION[gen_si_slot*4+:4],
S_AXI_ARUSER[gen_si_slot*C_AXI_ARUSER_WIDTH +: C_AXI_ARUSER_WIDTH],
S_AXI_ARQOS[gen_si_slot*4+:4],
S_AXI_ARCACHE[gen_si_slot*4+:4],
S_AXI_ARBURST[gen_si_slot*2+:2],
S_AXI_ARPROT[gen_si_slot*3+:3],
S_AXI_ARLOCK[gen_si_slot*2+:2],
S_AXI_ARSIZE[gen_si_slot*3+:3],
S_AXI_ARLEN[gen_si_slot*8+:8],
S_AXI_ARADDR[gen_si_slot*C_AXI_ADDR_WIDTH +: C_AXI_ADDR_WIDTH],
f_extend_ID(S_AXI_ARID[gen_si_slot*C_AXI_ID_WIDTH +: C_AXI_ID_WIDTH], gen_si_slot)
};
assign si_awmesg[gen_si_slot*P_AMESG_WIDTH +: P_AMESG_WIDTH] = { // Concatenate from MSB to LSB
4'b0000,
// S_AXI_AWREGION[gen_si_slot*4+:4],
S_AXI_AWUSER[gen_si_slot*C_AXI_AWUSER_WIDTH +: C_AXI_AWUSER_WIDTH],
S_AXI_AWQOS[gen_si_slot*4+:4],
S_AXI_AWCACHE[gen_si_slot*4+:4],
S_AXI_AWBURST[gen_si_slot*2+:2],
S_AXI_AWPROT[gen_si_slot*3+:3],
S_AXI_AWLOCK[gen_si_slot*2+:2],
S_AXI_AWSIZE[gen_si_slot*3+:3],
S_AXI_AWLEN[gen_si_slot*8+:8],
S_AXI_AWADDR[gen_si_slot*C_AXI_ADDR_WIDTH +: C_AXI_ADDR_WIDTH],
f_extend_ID(S_AXI_AWID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot)
};
end // gen_si_amesg
assign mi_aid = aa_amesg[0 +: C_AXI_ID_WIDTH];
assign mi_aaddr = aa_amesg[C_AXI_ID_WIDTH +: C_AXI_ADDR_WIDTH];
assign mi_alen = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH +: 8];
assign mi_asize = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8 +: 3];
assign mi_alock = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3 +: 2];
assign mi_aprot = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2 +: 3];
assign mi_aburst = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3 +: 2];
assign mi_acache = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2 +: 4];
assign mi_aqos = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2+4 +: 4];
assign mi_auser = aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2+4+4 +: P_AXI_AUSER_WIDTH];
assign mi_aregion = (C_ADDR_DECODE != 0) ? target_region : aa_amesg[C_AXI_ID_WIDTH+C_AXI_ADDR_WIDTH+8+3+2+3+2+4+4+P_AXI_AUSER_WIDTH +: 4];
// Broadcast AW transfer payload to all MI-slots
assign M_AXI_AWID = {C_NUM_MASTER_SLOTS{mi_aid}};
assign M_AXI_AWADDR = {C_NUM_MASTER_SLOTS{mi_aaddr}};
assign M_AXI_AWLEN = {C_NUM_MASTER_SLOTS{mi_alen }};
assign M_AXI_AWSIZE = {C_NUM_MASTER_SLOTS{mi_asize}};
assign M_AXI_AWLOCK = {C_NUM_MASTER_SLOTS{mi_alock}};
assign M_AXI_AWPROT = {C_NUM_MASTER_SLOTS{mi_aprot}};
assign M_AXI_AWREGION = {C_NUM_MASTER_SLOTS{mi_aregion}};
assign M_AXI_AWBURST = {C_NUM_MASTER_SLOTS{mi_aburst}};
assign M_AXI_AWCACHE = {C_NUM_MASTER_SLOTS{mi_acache}};
assign M_AXI_AWQOS = {C_NUM_MASTER_SLOTS{mi_aqos }};
assign M_AXI_AWUSER = {C_NUM_MASTER_SLOTS{mi_auser[0+:C_AXI_AWUSER_WIDTH] }};
// Broadcast AR transfer payload to all MI-slots
assign M_AXI_ARID = {C_NUM_MASTER_SLOTS{mi_aid}};
assign M_AXI_ARADDR = {C_NUM_MASTER_SLOTS{mi_aaddr}};
assign M_AXI_ARLEN = {C_NUM_MASTER_SLOTS{mi_alen }};
assign M_AXI_ARSIZE = {C_NUM_MASTER_SLOTS{mi_asize}};
assign M_AXI_ARLOCK = {C_NUM_MASTER_SLOTS{mi_alock}};
assign M_AXI_ARPROT = {C_NUM_MASTER_SLOTS{mi_aprot}};
assign M_AXI_ARREGION = {C_NUM_MASTER_SLOTS{mi_aregion}};
assign M_AXI_ARBURST = {C_NUM_MASTER_SLOTS{mi_aburst}};
assign M_AXI_ARCACHE = {C_NUM_MASTER_SLOTS{mi_acache}};
assign M_AXI_ARQOS = {C_NUM_MASTER_SLOTS{mi_aqos }};
assign M_AXI_ARUSER = {C_NUM_MASTER_SLOTS{mi_auser[0+:C_AXI_ARUSER_WIDTH] }};
// W-channel MI handshakes
assign M_AXI_WVALID = mi_wvalid[0+:C_NUM_MASTER_SLOTS];
assign mi_wready[0+:C_NUM_MASTER_SLOTS] = M_AXI_WREADY;
// Broadcast W transfer payload to all MI-slots
assign M_AXI_WLAST = {C_NUM_MASTER_SLOTS{mi_wlast}};
assign M_AXI_WUSER = {C_NUM_MASTER_SLOTS{mi_wuser}};
assign M_AXI_WDATA = {C_NUM_MASTER_SLOTS{mi_wdata}};
assign M_AXI_WSTRB = {C_NUM_MASTER_SLOTS{mi_wstrb}};
assign M_AXI_WID = {C_NUM_MASTER_SLOTS{mi_wid}};
// Broadcast R transfer payload to all SI-slots
assign S_AXI_RLAST = {C_NUM_SLAVE_SLOTS{si_rlast}};
assign S_AXI_RRESP = {C_NUM_SLAVE_SLOTS{si_rresp}};
assign S_AXI_RUSER = {C_NUM_SLAVE_SLOTS{si_ruser}};
assign S_AXI_RDATA = {C_NUM_SLAVE_SLOTS{si_rdata}};
assign S_AXI_RID = {C_NUM_SLAVE_SLOTS{mi_aid}};
// Broadcast B transfer payload to all SI-slots
assign S_AXI_BRESP = {C_NUM_SLAVE_SLOTS{si_bresp}};
assign S_AXI_BUSER = {C_NUM_SLAVE_SLOTS{si_buser}};
assign S_AXI_BID = {C_NUM_SLAVE_SLOTS{mi_aid}};
if (C_NUM_SLAVE_SLOTS>1) begin : gen_wmux
// SI WVALID mux.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (1)
) si_w_valid_mux_inst
(
.S (aa_grant_enc),
.A (S_AXI_WVALID),
.O (aa_wvalid),
.OE (w_transfer_en)
);
// SI W-channel payload mux
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (C_NUM_SLAVE_SLOTS),
.C_SEL_WIDTH (P_NUM_SLAVE_SLOTS_LOG),
.C_DATA_WIDTH (P_WMESG_WIDTH)
) si_w_payload_mux_inst
(
.S (aa_grant_enc),
.A (si_wmesg),
.O (mi_wmesg),
.OE (1'b1)
);
for (gen_si_slot=0; gen_si_slot<C_NUM_SLAVE_SLOTS; gen_si_slot=gen_si_slot+1) begin : gen_wmesg
assign si_wmesg[gen_si_slot*P_WMESG_WIDTH+:P_WMESG_WIDTH] = { // Concatenate from MSB to LSB
((C_AXI_PROTOCOL == P_AXI3) ? f_extend_ID(S_AXI_WID[gen_si_slot*C_AXI_ID_WIDTH+:C_AXI_ID_WIDTH], gen_si_slot) : 1'b0),
S_AXI_WUSER[gen_si_slot*C_AXI_WUSER_WIDTH+:C_AXI_WUSER_WIDTH],
S_AXI_WSTRB[gen_si_slot*C_AXI_DATA_WIDTH/8+:C_AXI_DATA_WIDTH/8],
S_AXI_WDATA[gen_si_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH],
S_AXI_WLAST[gen_si_slot*1+:1]
};
end // gen_wmesg
assign mi_wlast = mi_wmesg[0];
assign mi_wdata = mi_wmesg[1 +: C_AXI_DATA_WIDTH];
assign mi_wstrb = mi_wmesg[1+C_AXI_DATA_WIDTH +: C_AXI_DATA_WIDTH/8];
assign mi_wuser = mi_wmesg[1+C_AXI_DATA_WIDTH+C_AXI_DATA_WIDTH/8 +: C_AXI_WUSER_WIDTH];
assign mi_wid = mi_wmesg[1+C_AXI_DATA_WIDTH+C_AXI_DATA_WIDTH/8+C_AXI_WUSER_WIDTH +: P_AXI_WID_WIDTH];
end else begin : gen_no_wmux
assign aa_wvalid = w_transfer_en & S_AXI_WVALID;
assign mi_wlast = S_AXI_WLAST;
assign mi_wdata = S_AXI_WDATA;
assign mi_wstrb = S_AXI_WSTRB;
assign mi_wuser = S_AXI_WUSER;
assign mi_wid = S_AXI_WID;
end // gen_wmux
// Receive RVALID from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_rvalid_mux_inst
(
.S (m_atarget_enc),
.A (mi_rvalid),
.O (aa_rvalid),
.OE (r_transfer_en)
);
// MI R-channel payload mux
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (P_RMESG_WIDTH)
) mi_rmesg_mux_inst
(
.S (m_atarget_enc),
.A (mi_rmesg),
.O (aa_rmesg),
.OE (1'b1)
);
axi_register_slice_v2_1_axic_register_slice #
(
.C_FAMILY (C_FAMILY),
.C_DATA_WIDTH (P_RMESG_WIDTH),
.C_REG_CONFIG (P_R_REG_CONFIG)
)
reg_slice_r
(
// System Signals
.ACLK(ACLK),
.ARESET(reset),
// Slave side
.S_PAYLOAD_DATA(aa_rmesg),
.S_VALID(aa_rvalid),
.S_READY(aa_rready),
// Master side
.M_PAYLOAD_DATA(sr_rmesg),
.M_VALID(sr_rvalid),
.M_READY(sr_rready)
);
assign mi_rvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_RVALID;
assign mi_rlast[0+:C_NUM_MASTER_SLOTS] = M_AXI_RLAST;
assign mi_rresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_RRESP;
assign mi_ruser[0+:C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH] = M_AXI_RUSER;
assign mi_rdata[0+:C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH] = M_AXI_RDATA;
assign M_AXI_RREADY = mi_rready[0+:C_NUM_MASTER_SLOTS];
for (gen_mi_slot=0; gen_mi_slot<P_NUM_MASTER_SLOTS_DE; gen_mi_slot=gen_mi_slot+1) begin : gen_rmesg
assign mi_rmesg[gen_mi_slot*P_RMESG_WIDTH+:P_RMESG_WIDTH] = { // Concatenate from MSB to LSB
mi_ruser[gen_mi_slot*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH],
mi_rdata[gen_mi_slot*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH],
mi_rresp[gen_mi_slot*2+:2],
mi_rlast[gen_mi_slot*1+:1]
};
end // gen_rmesg
assign si_rlast = sr_rmesg[0];
assign si_rresp = sr_rmesg[1 +: 2];
assign si_rdata = sr_rmesg[1+2 +: C_AXI_DATA_WIDTH];
assign si_ruser = sr_rmesg[1+2+C_AXI_DATA_WIDTH +: C_AXI_RUSER_WIDTH];
// Receive BVALID from targeted MI.
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (1)
) mi_bvalid_mux_inst
(
.S (m_atarget_enc),
.A (mi_bvalid),
.O (aa_bvalid),
.OE (b_transfer_en)
);
// MI B-channel payload mux
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY ("rtl"),
.C_RATIO (P_NUM_MASTER_SLOTS_DE),
.C_SEL_WIDTH (P_NUM_MASTER_SLOTS_DE_LOG),
.C_DATA_WIDTH (P_BMESG_WIDTH)
) mi_bmesg_mux_inst
(
.S (m_atarget_enc),
.A (mi_bmesg),
.O (si_bmesg),
.OE (1'b1)
);
assign mi_bvalid[0+:C_NUM_MASTER_SLOTS] = M_AXI_BVALID;
assign mi_bresp[0+:C_NUM_MASTER_SLOTS*2] = M_AXI_BRESP;
assign mi_buser[0+:C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH] = M_AXI_BUSER;
assign M_AXI_BREADY = mi_bready[0+:C_NUM_MASTER_SLOTS];
for (gen_mi_slot=0; gen_mi_slot<P_NUM_MASTER_SLOTS_DE; gen_mi_slot=gen_mi_slot+1) begin : gen_bmesg
assign mi_bmesg[gen_mi_slot*P_BMESG_WIDTH+:P_BMESG_WIDTH] = { // Concatenate from MSB to LSB
mi_buser[gen_mi_slot*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH],
mi_bresp[gen_mi_slot*2+:2]
};
end // gen_bmesg
assign si_bresp = si_bmesg[0 +: 2];
assign si_buser = si_bmesg[2 +: C_AXI_BUSER_WIDTH];
if (C_DEBUG) begin : gen_debug_trans_seq
// DEBUG WRITE TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_aw_trans_seq_i <= 1;
end else begin
if (aa_awvalid && aa_awready) begin
debug_aw_trans_seq_i <= debug_aw_trans_seq_i + 1;
end
end
end
// DEBUG READ TRANSACTION SEQUENCE COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_ar_trans_seq_i <= 1;
end else begin
if (aa_arvalid && aa_arready) begin
debug_ar_trans_seq_i <= debug_ar_trans_seq_i + 1;
end
end
end
// DEBUG WRITE BEAT COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_w_beat_cnt_i <= 0;
end else if (aa_wready & aa_wvalid) begin
if (mi_wlast) begin
debug_w_beat_cnt_i <= 0;
end else begin
debug_w_beat_cnt_i <= debug_w_beat_cnt_i + 1;
end
end
end // Clocked process
// DEBUG READ BEAT COUNTER
always @(posedge ACLK) begin
if (reset) begin
debug_r_beat_cnt_i <= 0;
end else if (sr_rready & sr_rvalid) begin
if (si_rlast) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end // Clocked process
end // gen_debug_trans_seq
if (C_RANGE_CHECK) begin : gen_decerr
// Highest MI-slot (index C_NUM_MASTER_SLOTS) is the error handler
axi_crossbar_v2_1_decerr_slave #
(
.C_AXI_ID_WIDTH (1),
.C_AXI_DATA_WIDTH (C_AXI_DATA_WIDTH),
.C_AXI_RUSER_WIDTH (C_AXI_RUSER_WIDTH),
.C_AXI_BUSER_WIDTH (C_AXI_BUSER_WIDTH),
.C_AXI_PROTOCOL (C_AXI_PROTOCOL),
.C_RESP (P_DECERR)
)
decerr_slave_inst
(
.S_AXI_ACLK (ACLK),
.S_AXI_ARESET (reset),
.S_AXI_AWID (1'b0),
.S_AXI_AWVALID (mi_awvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_AWREADY (mi_awready[C_NUM_MASTER_SLOTS]),
.S_AXI_WLAST (mi_wlast),
.S_AXI_WVALID (mi_wvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_WREADY (mi_wready[C_NUM_MASTER_SLOTS]),
.S_AXI_BID (),
.S_AXI_BRESP (mi_bresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_BUSER (mi_buser[C_NUM_MASTER_SLOTS*C_AXI_BUSER_WIDTH+:C_AXI_BUSER_WIDTH]),
.S_AXI_BVALID (mi_bvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_BREADY (mi_bready[C_NUM_MASTER_SLOTS]),
.S_AXI_ARID (1'b0),
.S_AXI_ARLEN (mi_alen),
.S_AXI_ARVALID (mi_arvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_ARREADY (mi_arready[C_NUM_MASTER_SLOTS]),
.S_AXI_RID (),
.S_AXI_RDATA (mi_rdata[C_NUM_MASTER_SLOTS*C_AXI_DATA_WIDTH+:C_AXI_DATA_WIDTH]),
.S_AXI_RRESP (mi_rresp[C_NUM_MASTER_SLOTS*2+:2]),
.S_AXI_RUSER (mi_ruser[C_NUM_MASTER_SLOTS*C_AXI_RUSER_WIDTH+:C_AXI_RUSER_WIDTH]),
.S_AXI_RLAST (mi_rlast[C_NUM_MASTER_SLOTS]),
.S_AXI_RVALID (mi_rvalid[C_NUM_MASTER_SLOTS]),
.S_AXI_RREADY (mi_rready[C_NUM_MASTER_SLOTS])
);
end // gen_decerr
endgenerate
endmodule
|
module processing_system7_bfm_v2_0_5_arb_hp2_3(
sw_clk,
rstn,
w_qos_hp2,
r_qos_hp2,
w_qos_hp3,
r_qos_hp3,
wr_ack_ddr_hp2,
wr_data_hp2,
wr_addr_hp2,
wr_bytes_hp2,
wr_dv_ddr_hp2,
rd_req_ddr_hp2,
rd_addr_hp2,
rd_bytes_hp2,
rd_data_ddr_hp2,
rd_dv_ddr_hp2,
wr_ack_ddr_hp3,
wr_data_hp3,
wr_addr_hp3,
wr_bytes_hp3,
wr_dv_ddr_hp3,
rd_req_ddr_hp3,
rd_addr_hp3,
rd_bytes_hp3,
rd_data_ddr_hp3,
rd_dv_ddr_hp3,
ddr_wr_ack,
ddr_wr_dv,
ddr_rd_req,
ddr_rd_dv,
ddr_rd_qos,
ddr_wr_qos,
ddr_wr_addr,
ddr_wr_data,
ddr_wr_bytes,
ddr_rd_addr,
ddr_rd_data,
ddr_rd_bytes
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input sw_clk;
input rstn;
input [axi_qos_width-1:0] w_qos_hp2;
input [axi_qos_width-1:0] r_qos_hp2;
input [axi_qos_width-1:0] w_qos_hp3;
input [axi_qos_width-1:0] r_qos_hp3;
input [axi_qos_width-1:0] ddr_rd_qos;
input [axi_qos_width-1:0] ddr_wr_qos;
output wr_ack_ddr_hp2;
input [max_burst_bits-1:0] wr_data_hp2;
input [addr_width-1:0] wr_addr_hp2;
input [max_burst_bytes_width:0] wr_bytes_hp2;
output wr_dv_ddr_hp2;
input rd_req_ddr_hp2;
input [addr_width-1:0] rd_addr_hp2;
input [max_burst_bytes_width:0] rd_bytes_hp2;
output [max_burst_bits-1:0] rd_data_ddr_hp2;
output rd_dv_ddr_hp2;
output wr_ack_ddr_hp3;
input [max_burst_bits-1:0] wr_data_hp3;
input [addr_width-1:0] wr_addr_hp3;
input [max_burst_bytes_width:0] wr_bytes_hp3;
output wr_dv_ddr_hp3;
input rd_req_ddr_hp3;
input [addr_width-1:0] rd_addr_hp3;
input [max_burst_bytes_width:0] rd_bytes_hp3;
output [max_burst_bits-1:0] rd_data_ddr_hp3;
output rd_dv_ddr_hp3;
input ddr_wr_ack;
output ddr_wr_dv;
output [addr_width-1:0]ddr_wr_addr;
output [max_burst_bits-1:0]ddr_wr_data;
output [max_burst_bytes_width:0]ddr_wr_bytes;
input ddr_rd_dv;
input [max_burst_bits-1:0] ddr_rd_data;
output ddr_rd_req;
output [addr_width-1:0] ddr_rd_addr;
output [max_burst_bytes_width:0] ddr_rd_bytes;
processing_system7_bfm_v2_0_5_arb_wr ddr_hp_wr(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(w_qos_hp2),
.qos2(w_qos_hp3),
.prt_dv1(wr_dv_ddr_hp2),
.prt_dv2(wr_dv_ddr_hp3),
.prt_data1(wr_data_hp2),
.prt_data2(wr_data_hp3),
.prt_addr1(wr_addr_hp2),
.prt_addr2(wr_addr_hp3),
.prt_bytes1(wr_bytes_hp2),
.prt_bytes2(wr_bytes_hp3),
.prt_ack1(wr_ack_ddr_hp2),
.prt_ack2(wr_ack_ddr_hp3),
.prt_req(ddr_wr_dv),
.prt_qos(ddr_wr_qos),
.prt_data(ddr_wr_data),
.prt_addr(ddr_wr_addr),
.prt_bytes(ddr_wr_bytes),
.prt_ack(ddr_wr_ack)
);
processing_system7_bfm_v2_0_5_arb_rd ddr_hp_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(r_qos_hp2),
.qos2(r_qos_hp3),
.prt_req1(rd_req_ddr_hp2),
.prt_req2(rd_req_ddr_hp3),
.prt_data1(rd_data_ddr_hp2),
.prt_data2(rd_data_ddr_hp3),
.prt_addr1(rd_addr_hp2),
.prt_addr2(rd_addr_hp3),
.prt_bytes1(rd_bytes_hp2),
.prt_bytes2(rd_bytes_hp3),
.prt_dv1(rd_dv_ddr_hp2),
.prt_dv2(rd_dv_ddr_hp3),
.prt_req(ddr_rd_req),
.prt_qos(ddr_rd_qos),
.prt_data(ddr_rd_data),
.prt_addr(ddr_rd_addr),
.prt_bytes(ddr_rd_bytes),
.prt_dv(ddr_rd_dv)
);
endmodule
|
module processing_system7_bfm_v2_0_5_arb_hp2_3(
sw_clk,
rstn,
w_qos_hp2,
r_qos_hp2,
w_qos_hp3,
r_qos_hp3,
wr_ack_ddr_hp2,
wr_data_hp2,
wr_addr_hp2,
wr_bytes_hp2,
wr_dv_ddr_hp2,
rd_req_ddr_hp2,
rd_addr_hp2,
rd_bytes_hp2,
rd_data_ddr_hp2,
rd_dv_ddr_hp2,
wr_ack_ddr_hp3,
wr_data_hp3,
wr_addr_hp3,
wr_bytes_hp3,
wr_dv_ddr_hp3,
rd_req_ddr_hp3,
rd_addr_hp3,
rd_bytes_hp3,
rd_data_ddr_hp3,
rd_dv_ddr_hp3,
ddr_wr_ack,
ddr_wr_dv,
ddr_rd_req,
ddr_rd_dv,
ddr_rd_qos,
ddr_wr_qos,
ddr_wr_addr,
ddr_wr_data,
ddr_wr_bytes,
ddr_rd_addr,
ddr_rd_data,
ddr_rd_bytes
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input sw_clk;
input rstn;
input [axi_qos_width-1:0] w_qos_hp2;
input [axi_qos_width-1:0] r_qos_hp2;
input [axi_qos_width-1:0] w_qos_hp3;
input [axi_qos_width-1:0] r_qos_hp3;
input [axi_qos_width-1:0] ddr_rd_qos;
input [axi_qos_width-1:0] ddr_wr_qos;
output wr_ack_ddr_hp2;
input [max_burst_bits-1:0] wr_data_hp2;
input [addr_width-1:0] wr_addr_hp2;
input [max_burst_bytes_width:0] wr_bytes_hp2;
output wr_dv_ddr_hp2;
input rd_req_ddr_hp2;
input [addr_width-1:0] rd_addr_hp2;
input [max_burst_bytes_width:0] rd_bytes_hp2;
output [max_burst_bits-1:0] rd_data_ddr_hp2;
output rd_dv_ddr_hp2;
output wr_ack_ddr_hp3;
input [max_burst_bits-1:0] wr_data_hp3;
input [addr_width-1:0] wr_addr_hp3;
input [max_burst_bytes_width:0] wr_bytes_hp3;
output wr_dv_ddr_hp3;
input rd_req_ddr_hp3;
input [addr_width-1:0] rd_addr_hp3;
input [max_burst_bytes_width:0] rd_bytes_hp3;
output [max_burst_bits-1:0] rd_data_ddr_hp3;
output rd_dv_ddr_hp3;
input ddr_wr_ack;
output ddr_wr_dv;
output [addr_width-1:0]ddr_wr_addr;
output [max_burst_bits-1:0]ddr_wr_data;
output [max_burst_bytes_width:0]ddr_wr_bytes;
input ddr_rd_dv;
input [max_burst_bits-1:0] ddr_rd_data;
output ddr_rd_req;
output [addr_width-1:0] ddr_rd_addr;
output [max_burst_bytes_width:0] ddr_rd_bytes;
processing_system7_bfm_v2_0_5_arb_wr ddr_hp_wr(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(w_qos_hp2),
.qos2(w_qos_hp3),
.prt_dv1(wr_dv_ddr_hp2),
.prt_dv2(wr_dv_ddr_hp3),
.prt_data1(wr_data_hp2),
.prt_data2(wr_data_hp3),
.prt_addr1(wr_addr_hp2),
.prt_addr2(wr_addr_hp3),
.prt_bytes1(wr_bytes_hp2),
.prt_bytes2(wr_bytes_hp3),
.prt_ack1(wr_ack_ddr_hp2),
.prt_ack2(wr_ack_ddr_hp3),
.prt_req(ddr_wr_dv),
.prt_qos(ddr_wr_qos),
.prt_data(ddr_wr_data),
.prt_addr(ddr_wr_addr),
.prt_bytes(ddr_wr_bytes),
.prt_ack(ddr_wr_ack)
);
processing_system7_bfm_v2_0_5_arb_rd ddr_hp_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(r_qos_hp2),
.qos2(r_qos_hp3),
.prt_req1(rd_req_ddr_hp2),
.prt_req2(rd_req_ddr_hp3),
.prt_data1(rd_data_ddr_hp2),
.prt_data2(rd_data_ddr_hp3),
.prt_addr1(rd_addr_hp2),
.prt_addr2(rd_addr_hp3),
.prt_bytes1(rd_bytes_hp2),
.prt_bytes2(rd_bytes_hp3),
.prt_dv1(rd_dv_ddr_hp2),
.prt_dv2(rd_dv_ddr_hp3),
.prt_req(ddr_rd_req),
.prt_qos(ddr_rd_qos),
.prt_data(ddr_rd_data),
.prt_addr(ddr_rd_addr),
.prt_bytes(ddr_rd_bytes),
.prt_dv(ddr_rd_dv)
);
endmodule
|
module processing_system7_bfm_v2_0_5_gen_clock(
ps_clk,
sw_clk,
fclk_clk3,
fclk_clk2,
fclk_clk1,
fclk_clk0
);
input ps_clk;
output sw_clk;
output fclk_clk3;
output fclk_clk2;
output fclk_clk1;
output fclk_clk0;
parameter freq_clk3 = 50;
parameter freq_clk2 = 50;
parameter freq_clk1 = 50;
parameter freq_clk0 = 50;
reg clk0 = 1'b0;
reg clk1 = 1'b0;
reg clk2 = 1'b0;
reg clk3 = 1'b0;
reg sw_clk = 1'b0;
assign fclk_clk0 = clk0;
assign fclk_clk1 = clk1;
assign fclk_clk2 = clk2;
assign fclk_clk3 = clk3;
real clk3_p = (1000.00/freq_clk3)/2;
real clk2_p = (1000.00/freq_clk2)/2;
real clk1_p = (1000.00/freq_clk1)/2;
real clk0_p = (1000.00/freq_clk0)/2;
always #(clk3_p) clk3 = !clk3;
always #(clk2_p) clk2 = !clk2;
always #(clk1_p) clk1 = !clk1;
always #(clk0_p) clk0 = !clk0;
always #(0.5) sw_clk = !sw_clk;
endmodule
|
module processing_system7_bfm_v2_0_5_gen_clock(
ps_clk,
sw_clk,
fclk_clk3,
fclk_clk2,
fclk_clk1,
fclk_clk0
);
input ps_clk;
output sw_clk;
output fclk_clk3;
output fclk_clk2;
output fclk_clk1;
output fclk_clk0;
parameter freq_clk3 = 50;
parameter freq_clk2 = 50;
parameter freq_clk1 = 50;
parameter freq_clk0 = 50;
reg clk0 = 1'b0;
reg clk1 = 1'b0;
reg clk2 = 1'b0;
reg clk3 = 1'b0;
reg sw_clk = 1'b0;
assign fclk_clk0 = clk0;
assign fclk_clk1 = clk1;
assign fclk_clk2 = clk2;
assign fclk_clk3 = clk3;
real clk3_p = (1000.00/freq_clk3)/2;
real clk2_p = (1000.00/freq_clk2)/2;
real clk1_p = (1000.00/freq_clk1)/2;
real clk0_p = (1000.00/freq_clk0)/2;
always #(clk3_p) clk3 = !clk3;
always #(clk2_p) clk2 = !clk2;
always #(clk1_p) clk1 = !clk1;
always #(clk0_p) clk0 = !clk0;
always #(0.5) sw_clk = !sw_clk;
endmodule
|
module processing_system7_bfm_v2_0_5_ocmc(
rstn,
sw_clk,
/* Goes to port 0 of OCM */
ocm_wr_ack_port0,
ocm_wr_dv_port0,
ocm_rd_req_port0,
ocm_rd_dv_port0,
ocm_wr_addr_port0,
ocm_wr_data_port0,
ocm_wr_bytes_port0,
ocm_rd_addr_port0,
ocm_rd_data_port0,
ocm_rd_bytes_port0,
ocm_wr_qos_port0,
ocm_rd_qos_port0,
/* Goes to port 1 of OCM */
ocm_wr_ack_port1,
ocm_wr_dv_port1,
ocm_rd_req_port1,
ocm_rd_dv_port1,
ocm_wr_addr_port1,
ocm_wr_data_port1,
ocm_wr_bytes_port1,
ocm_rd_addr_port1,
ocm_rd_data_port1,
ocm_rd_bytes_port1,
ocm_wr_qos_port1,
ocm_rd_qos_port1
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input rstn;
input sw_clk;
output ocm_wr_ack_port0;
input ocm_wr_dv_port0;
input ocm_rd_req_port0;
output ocm_rd_dv_port0;
input[addr_width-1:0] ocm_wr_addr_port0;
input[max_burst_bits-1:0] ocm_wr_data_port0;
input[max_burst_bytes_width:0] ocm_wr_bytes_port0;
input[addr_width-1:0] ocm_rd_addr_port0;
output[max_burst_bits-1:0] ocm_rd_data_port0;
input[max_burst_bytes_width:0] ocm_rd_bytes_port0;
input [axi_qos_width-1:0] ocm_wr_qos_port0;
input [axi_qos_width-1:0] ocm_rd_qos_port0;
output ocm_wr_ack_port1;
input ocm_wr_dv_port1;
input ocm_rd_req_port1;
output ocm_rd_dv_port1;
input[addr_width-1:0] ocm_wr_addr_port1;
input[max_burst_bits-1:0] ocm_wr_data_port1;
input[max_burst_bytes_width:0] ocm_wr_bytes_port1;
input[addr_width-1:0] ocm_rd_addr_port1;
output[max_burst_bits-1:0] ocm_rd_data_port1;
input[max_burst_bytes_width:0] ocm_rd_bytes_port1;
input[axi_qos_width-1:0] ocm_wr_qos_port1;
input[axi_qos_width-1:0] ocm_rd_qos_port1;
wire [axi_qos_width-1:0] wr_qos;
wire wr_req;
wire [max_burst_bits-1:0] wr_data;
wire [addr_width-1:0] wr_addr;
wire [max_burst_bytes_width:0] wr_bytes;
reg wr_ack;
wire [axi_qos_width-1:0] rd_qos;
reg [max_burst_bits-1:0] rd_data;
wire [addr_width-1:0] rd_addr;
wire [max_burst_bytes_width:0] rd_bytes;
reg rd_dv;
wire rd_req;
processing_system7_bfm_v2_0_5_arb_wr ocm_write_ports (
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(ocm_wr_qos_port0),
.qos2(ocm_wr_qos_port1),
.prt_dv1(ocm_wr_dv_port0),
.prt_dv2(ocm_wr_dv_port1),
.prt_data1(ocm_wr_data_port0),
.prt_data2(ocm_wr_data_port1),
.prt_addr1(ocm_wr_addr_port0),
.prt_addr2(ocm_wr_addr_port1),
.prt_bytes1(ocm_wr_bytes_port0),
.prt_bytes2(ocm_wr_bytes_port1),
.prt_ack1(ocm_wr_ack_port0),
.prt_ack2(ocm_wr_ack_port1),
.prt_qos(wr_qos),
.prt_req(wr_req),
.prt_data(wr_data),
.prt_addr(wr_addr),
.prt_bytes(wr_bytes),
.prt_ack(wr_ack)
);
processing_system7_bfm_v2_0_5_arb_rd ocm_read_ports (
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(ocm_rd_qos_port0),
.qos2(ocm_rd_qos_port1),
.prt_req1(ocm_rd_req_port0),
.prt_req2(ocm_rd_req_port1),
.prt_data1(ocm_rd_data_port0),
.prt_data2(ocm_rd_data_port1),
.prt_addr1(ocm_rd_addr_port0),
.prt_addr2(ocm_rd_addr_port1),
.prt_bytes1(ocm_rd_bytes_port0),
.prt_bytes2(ocm_rd_bytes_port1),
.prt_dv1(ocm_rd_dv_port0),
.prt_dv2(ocm_rd_dv_port1),
.prt_qos(rd_qos),
.prt_req(rd_req),
.prt_data(rd_data),
.prt_addr(rd_addr),
.prt_bytes(rd_bytes),
.prt_dv(rd_dv)
);
processing_system7_bfm_v2_0_5_ocm_mem ocm();
reg [1:0] state;
always@(posedge sw_clk or negedge rstn)
begin
if(!rstn) begin
wr_ack <= 0;
rd_dv <= 0;
state <= 2'd0;
end else begin
case(state)
0:begin
state <= 0;
wr_ack <= 0;
rd_dv <= 0;
if(wr_req) begin
ocm.write_mem(wr_data , wr_addr, wr_bytes);
wr_ack <= 1;
state <= 1;
end
if(rd_req) begin
ocm.read_mem(rd_data,rd_addr, rd_bytes);
rd_dv <= 1;
state <= 1;
end
end
1:begin
wr_ack <= 0;
rd_dv <= 0;
state <= 0;
end
endcase
end /// if
end// always
endmodule
|
module processing_system7_bfm_v2_0_5_ocmc(
rstn,
sw_clk,
/* Goes to port 0 of OCM */
ocm_wr_ack_port0,
ocm_wr_dv_port0,
ocm_rd_req_port0,
ocm_rd_dv_port0,
ocm_wr_addr_port0,
ocm_wr_data_port0,
ocm_wr_bytes_port0,
ocm_rd_addr_port0,
ocm_rd_data_port0,
ocm_rd_bytes_port0,
ocm_wr_qos_port0,
ocm_rd_qos_port0,
/* Goes to port 1 of OCM */
ocm_wr_ack_port1,
ocm_wr_dv_port1,
ocm_rd_req_port1,
ocm_rd_dv_port1,
ocm_wr_addr_port1,
ocm_wr_data_port1,
ocm_wr_bytes_port1,
ocm_rd_addr_port1,
ocm_rd_data_port1,
ocm_rd_bytes_port1,
ocm_wr_qos_port1,
ocm_rd_qos_port1
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input rstn;
input sw_clk;
output ocm_wr_ack_port0;
input ocm_wr_dv_port0;
input ocm_rd_req_port0;
output ocm_rd_dv_port0;
input[addr_width-1:0] ocm_wr_addr_port0;
input[max_burst_bits-1:0] ocm_wr_data_port0;
input[max_burst_bytes_width:0] ocm_wr_bytes_port0;
input[addr_width-1:0] ocm_rd_addr_port0;
output[max_burst_bits-1:0] ocm_rd_data_port0;
input[max_burst_bytes_width:0] ocm_rd_bytes_port0;
input [axi_qos_width-1:0] ocm_wr_qos_port0;
input [axi_qos_width-1:0] ocm_rd_qos_port0;
output ocm_wr_ack_port1;
input ocm_wr_dv_port1;
input ocm_rd_req_port1;
output ocm_rd_dv_port1;
input[addr_width-1:0] ocm_wr_addr_port1;
input[max_burst_bits-1:0] ocm_wr_data_port1;
input[max_burst_bytes_width:0] ocm_wr_bytes_port1;
input[addr_width-1:0] ocm_rd_addr_port1;
output[max_burst_bits-1:0] ocm_rd_data_port1;
input[max_burst_bytes_width:0] ocm_rd_bytes_port1;
input[axi_qos_width-1:0] ocm_wr_qos_port1;
input[axi_qos_width-1:0] ocm_rd_qos_port1;
wire [axi_qos_width-1:0] wr_qos;
wire wr_req;
wire [max_burst_bits-1:0] wr_data;
wire [addr_width-1:0] wr_addr;
wire [max_burst_bytes_width:0] wr_bytes;
reg wr_ack;
wire [axi_qos_width-1:0] rd_qos;
reg [max_burst_bits-1:0] rd_data;
wire [addr_width-1:0] rd_addr;
wire [max_burst_bytes_width:0] rd_bytes;
reg rd_dv;
wire rd_req;
processing_system7_bfm_v2_0_5_arb_wr ocm_write_ports (
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(ocm_wr_qos_port0),
.qos2(ocm_wr_qos_port1),
.prt_dv1(ocm_wr_dv_port0),
.prt_dv2(ocm_wr_dv_port1),
.prt_data1(ocm_wr_data_port0),
.prt_data2(ocm_wr_data_port1),
.prt_addr1(ocm_wr_addr_port0),
.prt_addr2(ocm_wr_addr_port1),
.prt_bytes1(ocm_wr_bytes_port0),
.prt_bytes2(ocm_wr_bytes_port1),
.prt_ack1(ocm_wr_ack_port0),
.prt_ack2(ocm_wr_ack_port1),
.prt_qos(wr_qos),
.prt_req(wr_req),
.prt_data(wr_data),
.prt_addr(wr_addr),
.prt_bytes(wr_bytes),
.prt_ack(wr_ack)
);
processing_system7_bfm_v2_0_5_arb_rd ocm_read_ports (
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(ocm_rd_qos_port0),
.qos2(ocm_rd_qos_port1),
.prt_req1(ocm_rd_req_port0),
.prt_req2(ocm_rd_req_port1),
.prt_data1(ocm_rd_data_port0),
.prt_data2(ocm_rd_data_port1),
.prt_addr1(ocm_rd_addr_port0),
.prt_addr2(ocm_rd_addr_port1),
.prt_bytes1(ocm_rd_bytes_port0),
.prt_bytes2(ocm_rd_bytes_port1),
.prt_dv1(ocm_rd_dv_port0),
.prt_dv2(ocm_rd_dv_port1),
.prt_qos(rd_qos),
.prt_req(rd_req),
.prt_data(rd_data),
.prt_addr(rd_addr),
.prt_bytes(rd_bytes),
.prt_dv(rd_dv)
);
processing_system7_bfm_v2_0_5_ocm_mem ocm();
reg [1:0] state;
always@(posedge sw_clk or negedge rstn)
begin
if(!rstn) begin
wr_ack <= 0;
rd_dv <= 0;
state <= 2'd0;
end else begin
case(state)
0:begin
state <= 0;
wr_ack <= 0;
rd_dv <= 0;
if(wr_req) begin
ocm.write_mem(wr_data , wr_addr, wr_bytes);
wr_ack <= 1;
state <= 1;
end
if(rd_req) begin
ocm.read_mem(rd_data,rd_addr, rd_bytes);
rd_dv <= 1;
state <= 1;
end
end
1:begin
wr_ack <= 0;
rd_dv <= 0;
state <= 0;
end
endcase
end /// if
end// always
endmodule
|
module processing_system7_bfm_v2_0_5_fmsw_gp(
sw_clk,
rstn,
w_qos_gp0,
r_qos_gp0,
wr_ack_ocm_gp0,
wr_ack_ddr_gp0,
wr_data_gp0,
wr_addr_gp0,
wr_bytes_gp0,
wr_dv_ocm_gp0,
wr_dv_ddr_gp0,
rd_req_ocm_gp0,
rd_req_ddr_gp0,
rd_req_reg_gp0,
rd_addr_gp0,
rd_bytes_gp0,
rd_data_ocm_gp0,
rd_data_ddr_gp0,
rd_data_reg_gp0,
rd_dv_ocm_gp0,
rd_dv_ddr_gp0,
rd_dv_reg_gp0,
w_qos_gp1,
r_qos_gp1,
wr_ack_ocm_gp1,
wr_ack_ddr_gp1,
wr_data_gp1,
wr_addr_gp1,
wr_bytes_gp1,
wr_dv_ocm_gp1,
wr_dv_ddr_gp1,
rd_req_ocm_gp1,
rd_req_ddr_gp1,
rd_req_reg_gp1,
rd_addr_gp1,
rd_bytes_gp1,
rd_data_ocm_gp1,
rd_data_ddr_gp1,
rd_data_reg_gp1,
rd_dv_ocm_gp1,
rd_dv_ddr_gp1,
rd_dv_reg_gp1,
ocm_wr_ack,
ocm_wr_dv,
ocm_rd_req,
ocm_rd_dv,
ddr_wr_ack,
ddr_wr_dv,
ddr_rd_req,
ddr_rd_dv,
reg_rd_req,
reg_rd_dv,
ocm_wr_qos,
ddr_wr_qos,
ocm_rd_qos,
ddr_rd_qos,
reg_rd_qos,
ocm_wr_addr,
ocm_wr_data,
ocm_wr_bytes,
ocm_rd_addr,
ocm_rd_data,
ocm_rd_bytes,
ddr_wr_addr,
ddr_wr_data,
ddr_wr_bytes,
ddr_rd_addr,
ddr_rd_data,
ddr_rd_bytes,
reg_rd_addr,
reg_rd_data,
reg_rd_bytes
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input sw_clk;
input rstn;
input [axi_qos_width-1:0]w_qos_gp0;
input [axi_qos_width-1:0]r_qos_gp0;
input [axi_qos_width-1:0]w_qos_gp1;
input [axi_qos_width-1:0]r_qos_gp1;
output [axi_qos_width-1:0]ocm_wr_qos;
output [axi_qos_width-1:0]ocm_rd_qos;
output [axi_qos_width-1:0]ddr_wr_qos;
output [axi_qos_width-1:0]ddr_rd_qos;
output [axi_qos_width-1:0]reg_rd_qos;
output wr_ack_ocm_gp0;
output wr_ack_ddr_gp0;
input [max_burst_bits-1:0] wr_data_gp0;
input [addr_width-1:0] wr_addr_gp0;
input [max_burst_bytes_width:0] wr_bytes_gp0;
output wr_dv_ocm_gp0;
output wr_dv_ddr_gp0;
input rd_req_ocm_gp0;
input rd_req_ddr_gp0;
input rd_req_reg_gp0;
input [addr_width-1:0] rd_addr_gp0;
input [max_burst_bytes_width:0] rd_bytes_gp0;
output [max_burst_bits-1:0] rd_data_ocm_gp0;
output [max_burst_bits-1:0] rd_data_ddr_gp0;
output [max_burst_bits-1:0] rd_data_reg_gp0;
output rd_dv_ocm_gp0;
output rd_dv_ddr_gp0;
output rd_dv_reg_gp0;
output wr_ack_ocm_gp1;
output wr_ack_ddr_gp1;
input [max_burst_bits-1:0] wr_data_gp1;
input [addr_width-1:0] wr_addr_gp1;
input [max_burst_bytes_width:0] wr_bytes_gp1;
output wr_dv_ocm_gp1;
output wr_dv_ddr_gp1;
input rd_req_ocm_gp1;
input rd_req_ddr_gp1;
input rd_req_reg_gp1;
input [addr_width-1:0] rd_addr_gp1;
input [max_burst_bytes_width:0] rd_bytes_gp1;
output [max_burst_bits-1:0] rd_data_ocm_gp1;
output [max_burst_bits-1:0] rd_data_ddr_gp1;
output [max_burst_bits-1:0] rd_data_reg_gp1;
output rd_dv_ocm_gp1;
output rd_dv_ddr_gp1;
output rd_dv_reg_gp1;
input ocm_wr_ack;
output ocm_wr_dv;
output [addr_width-1:0]ocm_wr_addr;
output [max_burst_bits-1:0]ocm_wr_data;
output [max_burst_bytes_width:0]ocm_wr_bytes;
input ocm_rd_dv;
input [max_burst_bits-1:0] ocm_rd_data;
output ocm_rd_req;
output [addr_width-1:0] ocm_rd_addr;
output [max_burst_bytes_width:0] ocm_rd_bytes;
input ddr_wr_ack;
output ddr_wr_dv;
output [addr_width-1:0]ddr_wr_addr;
output [max_burst_bits-1:0]ddr_wr_data;
output [max_burst_bytes_width:0]ddr_wr_bytes;
input ddr_rd_dv;
input [max_burst_bits-1:0] ddr_rd_data;
output ddr_rd_req;
output [addr_width-1:0] ddr_rd_addr;
output [max_burst_bytes_width:0] ddr_rd_bytes;
input reg_rd_dv;
input [max_burst_bits-1:0] reg_rd_data;
output reg_rd_req;
output [addr_width-1:0] reg_rd_addr;
output [max_burst_bytes_width:0] reg_rd_bytes;
processing_system7_bfm_v2_0_5_arb_wr ocm_gp_wr(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(w_qos_gp0),
.qos2(w_qos_gp1),
.prt_dv1(wr_dv_ocm_gp0),
.prt_dv2(wr_dv_ocm_gp1),
.prt_data1(wr_data_gp0),
.prt_data2(wr_data_gp1),
.prt_addr1(wr_addr_gp0),
.prt_addr2(wr_addr_gp1),
.prt_bytes1(wr_bytes_gp0),
.prt_bytes2(wr_bytes_gp1),
.prt_ack1(wr_ack_ocm_gp0),
.prt_ack2(wr_ack_ocm_gp1),
.prt_req(ocm_wr_dv),
.prt_qos(ocm_wr_qos),
.prt_data(ocm_wr_data),
.prt_addr(ocm_wr_addr),
.prt_bytes(ocm_wr_bytes),
.prt_ack(ocm_wr_ack)
);
processing_system7_bfm_v2_0_5_arb_wr ddr_gp_wr(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(w_qos_gp0),
.qos2(w_qos_gp1),
.prt_dv1(wr_dv_ddr_gp0),
.prt_dv2(wr_dv_ddr_gp1),
.prt_data1(wr_data_gp0),
.prt_data2(wr_data_gp1),
.prt_addr1(wr_addr_gp0),
.prt_addr2(wr_addr_gp1),
.prt_bytes1(wr_bytes_gp0),
.prt_bytes2(wr_bytes_gp1),
.prt_ack1(wr_ack_ddr_gp0),
.prt_ack2(wr_ack_ddr_gp1),
.prt_req(ddr_wr_dv),
.prt_qos(ddr_wr_qos),
.prt_data(ddr_wr_data),
.prt_addr(ddr_wr_addr),
.prt_bytes(ddr_wr_bytes),
.prt_ack(ddr_wr_ack)
);
processing_system7_bfm_v2_0_5_arb_rd ocm_gp_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(r_qos_gp0),
.qos2(r_qos_gp1),
.prt_req1(rd_req_ocm_gp0),
.prt_req2(rd_req_ocm_gp1),
.prt_data1(rd_data_ocm_gp0),
.prt_data2(rd_data_ocm_gp1),
.prt_addr1(rd_addr_gp0),
.prt_addr2(rd_addr_gp1),
.prt_bytes1(rd_bytes_gp0),
.prt_bytes2(rd_bytes_gp1),
.prt_dv1(rd_dv_ocm_gp0),
.prt_dv2(rd_dv_ocm_gp1),
.prt_req(ocm_rd_req),
.prt_qos(ocm_rd_qos),
.prt_data(ocm_rd_data),
.prt_addr(ocm_rd_addr),
.prt_bytes(ocm_rd_bytes),
.prt_dv(ocm_rd_dv)
);
processing_system7_bfm_v2_0_5_arb_rd ddr_gp_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(r_qos_gp0),
.qos2(r_qos_gp1),
.prt_req1(rd_req_ddr_gp0),
.prt_req2(rd_req_ddr_gp1),
.prt_data1(rd_data_ddr_gp0),
.prt_data2(rd_data_ddr_gp1),
.prt_addr1(rd_addr_gp0),
.prt_addr2(rd_addr_gp1),
.prt_bytes1(rd_bytes_gp0),
.prt_bytes2(rd_bytes_gp1),
.prt_dv1(rd_dv_ddr_gp0),
.prt_dv2(rd_dv_ddr_gp1),
.prt_req(ddr_rd_req),
.prt_qos(ddr_rd_qos),
.prt_data(ddr_rd_data),
.prt_addr(ddr_rd_addr),
.prt_bytes(ddr_rd_bytes),
.prt_dv(ddr_rd_dv)
);
processing_system7_bfm_v2_0_5_arb_rd reg_gp_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(r_qos_gp0),
.qos2(r_qos_gp1),
.prt_req1(rd_req_reg_gp0),
.prt_req2(rd_req_reg_gp1),
.prt_data1(rd_data_reg_gp0),
.prt_data2(rd_data_reg_gp1),
.prt_addr1(rd_addr_gp0),
.prt_addr2(rd_addr_gp1),
.prt_bytes1(rd_bytes_gp0),
.prt_bytes2(rd_bytes_gp1),
.prt_dv1(rd_dv_reg_gp0),
.prt_dv2(rd_dv_reg_gp1),
.prt_req(reg_rd_req),
.prt_qos(reg_rd_qos),
.prt_data(reg_rd_data),
.prt_addr(reg_rd_addr),
.prt_bytes(reg_rd_bytes),
.prt_dv(reg_rd_dv)
);
endmodule
|
module processing_system7_bfm_v2_0_5_arb_hp0_1(
sw_clk,
rstn,
w_qos_hp0,
r_qos_hp0,
w_qos_hp1,
r_qos_hp1,
wr_ack_ddr_hp0,
wr_data_hp0,
wr_addr_hp0,
wr_bytes_hp0,
wr_dv_ddr_hp0,
rd_req_ddr_hp0,
rd_addr_hp0,
rd_bytes_hp0,
rd_data_ddr_hp0,
rd_dv_ddr_hp0,
wr_ack_ddr_hp1,
wr_data_hp1,
wr_addr_hp1,
wr_bytes_hp1,
wr_dv_ddr_hp1,
rd_req_ddr_hp1,
rd_addr_hp1,
rd_bytes_hp1,
rd_data_ddr_hp1,
rd_dv_ddr_hp1,
ddr_wr_ack,
ddr_wr_dv,
ddr_rd_req,
ddr_rd_dv,
ddr_rd_qos,
ddr_wr_qos,
ddr_wr_addr,
ddr_wr_data,
ddr_wr_bytes,
ddr_rd_addr,
ddr_rd_data,
ddr_rd_bytes
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input sw_clk;
input rstn;
input [axi_qos_width-1:0] w_qos_hp0;
input [axi_qos_width-1:0] r_qos_hp0;
input [axi_qos_width-1:0] w_qos_hp1;
input [axi_qos_width-1:0] r_qos_hp1;
input [axi_qos_width-1:0] ddr_rd_qos;
input [axi_qos_width-1:0] ddr_wr_qos;
output wr_ack_ddr_hp0;
input [max_burst_bits-1:0] wr_data_hp0;
input [addr_width-1:0] wr_addr_hp0;
input [max_burst_bytes_width:0] wr_bytes_hp0;
output wr_dv_ddr_hp0;
input rd_req_ddr_hp0;
input [addr_width-1:0] rd_addr_hp0;
input [max_burst_bytes_width:0] rd_bytes_hp0;
output [max_burst_bits-1:0] rd_data_ddr_hp0;
output rd_dv_ddr_hp0;
output wr_ack_ddr_hp1;
input [max_burst_bits-1:0] wr_data_hp1;
input [addr_width-1:0] wr_addr_hp1;
input [max_burst_bytes_width:0] wr_bytes_hp1;
output wr_dv_ddr_hp1;
input rd_req_ddr_hp1;
input [addr_width-1:0] rd_addr_hp1;
input [max_burst_bytes_width:0] rd_bytes_hp1;
output [max_burst_bits-1:0] rd_data_ddr_hp1;
output rd_dv_ddr_hp1;
input ddr_wr_ack;
output ddr_wr_dv;
output [addr_width-1:0]ddr_wr_addr;
output [max_burst_bits-1:0]ddr_wr_data;
output [max_burst_bytes_width:0]ddr_wr_bytes;
input ddr_rd_dv;
input [max_burst_bits-1:0] ddr_rd_data;
output ddr_rd_req;
output [addr_width-1:0] ddr_rd_addr;
output [max_burst_bytes_width:0] ddr_rd_bytes;
processing_system7_bfm_v2_0_5_arb_wr ddr_hp_wr(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(w_qos_hp0),
.qos2(w_qos_hp1),
.prt_dv1(wr_dv_ddr_hp0),
.prt_dv2(wr_dv_ddr_hp1),
.prt_data1(wr_data_hp0),
.prt_data2(wr_data_hp1),
.prt_addr1(wr_addr_hp0),
.prt_addr2(wr_addr_hp1),
.prt_bytes1(wr_bytes_hp0),
.prt_bytes2(wr_bytes_hp1),
.prt_ack1(wr_ack_ddr_hp0),
.prt_ack2(wr_ack_ddr_hp1),
.prt_req(ddr_wr_dv),
.prt_qos(ddr_wr_qos),
.prt_data(ddr_wr_data),
.prt_addr(ddr_wr_addr),
.prt_bytes(ddr_wr_bytes),
.prt_ack(ddr_wr_ack)
);
processing_system7_bfm_v2_0_5_arb_rd ddr_hp_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(r_qos_hp0),
.qos2(r_qos_hp1),
.prt_req1(rd_req_ddr_hp0),
.prt_req2(rd_req_ddr_hp1),
.prt_data1(rd_data_ddr_hp0),
.prt_data2(rd_data_ddr_hp1),
.prt_addr1(rd_addr_hp0),
.prt_addr2(rd_addr_hp1),
.prt_bytes1(rd_bytes_hp0),
.prt_bytes2(rd_bytes_hp1),
.prt_dv1(rd_dv_ddr_hp0),
.prt_dv2(rd_dv_ddr_hp1),
.prt_qos(ddr_rd_qos),
.prt_req(ddr_rd_req),
.prt_data(ddr_rd_data),
.prt_addr(ddr_rd_addr),
.prt_bytes(ddr_rd_bytes),
.prt_dv(ddr_rd_dv)
);
endmodule
|
module processing_system7_bfm_v2_0_5_arb_hp0_1(
sw_clk,
rstn,
w_qos_hp0,
r_qos_hp0,
w_qos_hp1,
r_qos_hp1,
wr_ack_ddr_hp0,
wr_data_hp0,
wr_addr_hp0,
wr_bytes_hp0,
wr_dv_ddr_hp0,
rd_req_ddr_hp0,
rd_addr_hp0,
rd_bytes_hp0,
rd_data_ddr_hp0,
rd_dv_ddr_hp0,
wr_ack_ddr_hp1,
wr_data_hp1,
wr_addr_hp1,
wr_bytes_hp1,
wr_dv_ddr_hp1,
rd_req_ddr_hp1,
rd_addr_hp1,
rd_bytes_hp1,
rd_data_ddr_hp1,
rd_dv_ddr_hp1,
ddr_wr_ack,
ddr_wr_dv,
ddr_rd_req,
ddr_rd_dv,
ddr_rd_qos,
ddr_wr_qos,
ddr_wr_addr,
ddr_wr_data,
ddr_wr_bytes,
ddr_rd_addr,
ddr_rd_data,
ddr_rd_bytes
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input sw_clk;
input rstn;
input [axi_qos_width-1:0] w_qos_hp0;
input [axi_qos_width-1:0] r_qos_hp0;
input [axi_qos_width-1:0] w_qos_hp1;
input [axi_qos_width-1:0] r_qos_hp1;
input [axi_qos_width-1:0] ddr_rd_qos;
input [axi_qos_width-1:0] ddr_wr_qos;
output wr_ack_ddr_hp0;
input [max_burst_bits-1:0] wr_data_hp0;
input [addr_width-1:0] wr_addr_hp0;
input [max_burst_bytes_width:0] wr_bytes_hp0;
output wr_dv_ddr_hp0;
input rd_req_ddr_hp0;
input [addr_width-1:0] rd_addr_hp0;
input [max_burst_bytes_width:0] rd_bytes_hp0;
output [max_burst_bits-1:0] rd_data_ddr_hp0;
output rd_dv_ddr_hp0;
output wr_ack_ddr_hp1;
input [max_burst_bits-1:0] wr_data_hp1;
input [addr_width-1:0] wr_addr_hp1;
input [max_burst_bytes_width:0] wr_bytes_hp1;
output wr_dv_ddr_hp1;
input rd_req_ddr_hp1;
input [addr_width-1:0] rd_addr_hp1;
input [max_burst_bytes_width:0] rd_bytes_hp1;
output [max_burst_bits-1:0] rd_data_ddr_hp1;
output rd_dv_ddr_hp1;
input ddr_wr_ack;
output ddr_wr_dv;
output [addr_width-1:0]ddr_wr_addr;
output [max_burst_bits-1:0]ddr_wr_data;
output [max_burst_bytes_width:0]ddr_wr_bytes;
input ddr_rd_dv;
input [max_burst_bits-1:0] ddr_rd_data;
output ddr_rd_req;
output [addr_width-1:0] ddr_rd_addr;
output [max_burst_bytes_width:0] ddr_rd_bytes;
processing_system7_bfm_v2_0_5_arb_wr ddr_hp_wr(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(w_qos_hp0),
.qos2(w_qos_hp1),
.prt_dv1(wr_dv_ddr_hp0),
.prt_dv2(wr_dv_ddr_hp1),
.prt_data1(wr_data_hp0),
.prt_data2(wr_data_hp1),
.prt_addr1(wr_addr_hp0),
.prt_addr2(wr_addr_hp1),
.prt_bytes1(wr_bytes_hp0),
.prt_bytes2(wr_bytes_hp1),
.prt_ack1(wr_ack_ddr_hp0),
.prt_ack2(wr_ack_ddr_hp1),
.prt_req(ddr_wr_dv),
.prt_qos(ddr_wr_qos),
.prt_data(ddr_wr_data),
.prt_addr(ddr_wr_addr),
.prt_bytes(ddr_wr_bytes),
.prt_ack(ddr_wr_ack)
);
processing_system7_bfm_v2_0_5_arb_rd ddr_hp_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(r_qos_hp0),
.qos2(r_qos_hp1),
.prt_req1(rd_req_ddr_hp0),
.prt_req2(rd_req_ddr_hp1),
.prt_data1(rd_data_ddr_hp0),
.prt_data2(rd_data_ddr_hp1),
.prt_addr1(rd_addr_hp0),
.prt_addr2(rd_addr_hp1),
.prt_bytes1(rd_bytes_hp0),
.prt_bytes2(rd_bytes_hp1),
.prt_dv1(rd_dv_ddr_hp0),
.prt_dv2(rd_dv_ddr_hp1),
.prt_qos(ddr_rd_qos),
.prt_req(ddr_rd_req),
.prt_data(ddr_rd_data),
.prt_addr(ddr_rd_addr),
.prt_bytes(ddr_rd_bytes),
.prt_dv(ddr_rd_dv)
);
endmodule
|
module axi_crossbar_v2_1_decerr_slave #
(
parameter integer C_AXI_ID_WIDTH = 1,
parameter integer C_AXI_DATA_WIDTH = 32,
parameter integer C_AXI_BUSER_WIDTH = 1,
parameter integer C_AXI_RUSER_WIDTH = 1,
parameter integer C_AXI_PROTOCOL = 0,
parameter integer C_RESP = 2'b11
)
(
input wire S_AXI_ACLK,
input wire S_AXI_ARESET,
input wire [(C_AXI_ID_WIDTH-1):0] S_AXI_AWID,
input wire S_AXI_AWVALID,
output wire S_AXI_AWREADY,
input wire S_AXI_WLAST,
input wire S_AXI_WVALID,
output wire S_AXI_WREADY,
output wire [(C_AXI_ID_WIDTH-1):0] S_AXI_BID,
output wire [1:0] S_AXI_BRESP,
output wire [C_AXI_BUSER_WIDTH-1:0] S_AXI_BUSER,
output wire S_AXI_BVALID,
input wire S_AXI_BREADY,
input wire [(C_AXI_ID_WIDTH-1):0] S_AXI_ARID,
input wire [7:0] S_AXI_ARLEN,
input wire S_AXI_ARVALID,
output wire S_AXI_ARREADY,
output wire [(C_AXI_ID_WIDTH-1):0] S_AXI_RID,
output wire [(C_AXI_DATA_WIDTH-1):0] S_AXI_RDATA,
output wire [1:0] S_AXI_RRESP,
output wire [C_AXI_RUSER_WIDTH-1:0] S_AXI_RUSER,
output wire S_AXI_RLAST,
output wire S_AXI_RVALID,
input wire S_AXI_RREADY
);
reg s_axi_awready_i;
reg s_axi_wready_i;
reg s_axi_bvalid_i;
reg s_axi_arready_i;
reg s_axi_rvalid_i;
localparam P_WRITE_IDLE = 2'b00;
localparam P_WRITE_DATA = 2'b01;
localparam P_WRITE_RESP = 2'b10;
localparam P_READ_IDLE = 1'b0;
localparam P_READ_DATA = 1'b1;
localparam integer P_AXI4 = 0;
localparam integer P_AXI3 = 1;
localparam integer P_AXILITE = 2;
assign S_AXI_BRESP = C_RESP;
assign S_AXI_RRESP = C_RESP;
assign S_AXI_RDATA = {C_AXI_DATA_WIDTH{1'b0}};
assign S_AXI_BUSER = {C_AXI_BUSER_WIDTH{1'b0}};
assign S_AXI_RUSER = {C_AXI_RUSER_WIDTH{1'b0}};
assign S_AXI_AWREADY = s_axi_awready_i;
assign S_AXI_WREADY = s_axi_wready_i;
assign S_AXI_BVALID = s_axi_bvalid_i;
assign S_AXI_ARREADY = s_axi_arready_i;
assign S_AXI_RVALID = s_axi_rvalid_i;
generate
if (C_AXI_PROTOCOL == P_AXILITE) begin : gen_axilite
assign S_AXI_RLAST = 1'b1;
assign S_AXI_BID = 0;
assign S_AXI_RID = 0;
always @(posedge S_AXI_ACLK) begin
if (S_AXI_ARESET) begin
s_axi_awready_i <= 1'b0;
s_axi_wready_i <= 1'b0;
s_axi_bvalid_i <= 1'b0;
end else begin
if (s_axi_bvalid_i) begin
if (S_AXI_BREADY) begin
s_axi_bvalid_i <= 1'b0;
end
end else if (S_AXI_AWVALID & S_AXI_WVALID) begin
if (s_axi_awready_i) begin
s_axi_awready_i <= 1'b0;
s_axi_wready_i <= 1'b0;
s_axi_bvalid_i <= 1'b1;
end else begin
s_axi_awready_i <= 1'b1;
s_axi_wready_i <= 1'b1;
end
end
end
end
always @(posedge S_AXI_ACLK) begin
if (S_AXI_ARESET) begin
s_axi_arready_i <= 1'b0;
s_axi_rvalid_i <= 1'b0;
end else begin
if (s_axi_rvalid_i) begin
if (S_AXI_RREADY) begin
s_axi_rvalid_i <= 1'b0;
end
end else if (S_AXI_ARVALID & s_axi_arready_i) begin
s_axi_arready_i <= 1'b0;
s_axi_rvalid_i <= 1'b1;
end else begin
s_axi_arready_i <= 1'b1;
end
end
end
end else begin : gen_axi
reg s_axi_rlast_i;
reg [(C_AXI_ID_WIDTH-1):0] s_axi_bid_i;
reg [(C_AXI_ID_WIDTH-1):0] s_axi_rid_i;
reg [7:0] read_cnt;
reg [1:0] write_cs;
reg [0:0] read_cs;
assign S_AXI_RLAST = s_axi_rlast_i;
assign S_AXI_BID = s_axi_bid_i;
assign S_AXI_RID = s_axi_rid_i;
always @(posedge S_AXI_ACLK) begin
if (S_AXI_ARESET) begin
write_cs <= P_WRITE_IDLE;
s_axi_awready_i <= 1'b0;
s_axi_wready_i <= 1'b0;
s_axi_bvalid_i <= 1'b0;
s_axi_bid_i <= 0;
end else begin
case (write_cs)
P_WRITE_IDLE:
begin
if (S_AXI_AWVALID & s_axi_awready_i) begin
s_axi_awready_i <= 1'b0;
s_axi_bid_i <= S_AXI_AWID;
s_axi_wready_i <= 1'b1;
write_cs <= P_WRITE_DATA;
end else begin
s_axi_awready_i <= 1'b1;
end
end
P_WRITE_DATA:
begin
if (S_AXI_WVALID & S_AXI_WLAST) begin
s_axi_wready_i <= 1'b0;
s_axi_bvalid_i <= 1'b1;
write_cs <= P_WRITE_RESP;
end
end
P_WRITE_RESP:
begin
if (S_AXI_BREADY) begin
s_axi_bvalid_i <= 1'b0;
s_axi_awready_i <= 1'b1;
write_cs <= P_WRITE_IDLE;
end
end
endcase
end
end
always @(posedge S_AXI_ACLK) begin
if (S_AXI_ARESET) begin
read_cs <= P_READ_IDLE;
s_axi_arready_i <= 1'b0;
s_axi_rvalid_i <= 1'b0;
s_axi_rlast_i <= 1'b0;
s_axi_rid_i <= 0;
read_cnt <= 0;
end else begin
case (read_cs)
P_READ_IDLE:
begin
if (S_AXI_ARVALID & s_axi_arready_i) begin
s_axi_arready_i <= 1'b0;
s_axi_rid_i <= S_AXI_ARID;
read_cnt <= S_AXI_ARLEN;
s_axi_rvalid_i <= 1'b1;
if (S_AXI_ARLEN == 0) begin
s_axi_rlast_i <= 1'b1;
end else begin
s_axi_rlast_i <= 1'b0;
end
read_cs <= P_READ_DATA;
end else begin
s_axi_arready_i <= 1'b1;
end
end
P_READ_DATA:
begin
if (S_AXI_RREADY) begin
if (read_cnt == 0) begin
s_axi_rvalid_i <= 1'b0;
s_axi_rlast_i <= 1'b0;
s_axi_arready_i <= 1'b1;
read_cs <= P_READ_IDLE;
end else begin
if (read_cnt == 1) begin
s_axi_rlast_i <= 1'b1;
end
read_cnt <= read_cnt - 1;
end
end
end
endcase
end
end
end
endgenerate
endmodule
|
module axi_crossbar_v2_1_si_transactor #
(
parameter C_FAMILY = "none",
parameter integer C_SI = 0, // SI-slot number of current instance.
parameter integer C_DIR = 0, // Direction: 0 = Write; 1 = Read.
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_NUM_M = 2,
parameter integer C_NUM_M_LOG = 1,
parameter integer C_ACCEPTANCE = 1, // Acceptance limit of this SI-slot.
parameter integer C_ACCEPTANCE_LOG = 0, // Width of acceptance counter for this SI-slot.
parameter integer C_ID_WIDTH = 1,
parameter integer C_THREAD_ID_WIDTH = 0,
parameter integer C_ADDR_WIDTH = 32,
parameter integer C_AMESG_WIDTH = 1, // Used for AW or AR channel payload, depending on instantiation.
parameter integer C_RMESG_WIDTH = 1, // Used for B or R channel payload, depending on instantiation.
parameter [C_ID_WIDTH-1:0] C_BASE_ID = {C_ID_WIDTH{1'b0}},
parameter [C_ID_WIDTH-1:0] C_HIGH_ID = {C_ID_WIDTH{1'b0}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_BASE_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_HIGH_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b0}},
parameter integer C_SINGLE_THREAD = 0,
parameter [C_NUM_M-1:0] C_TARGET_QUAL = {C_NUM_M{1'b1}},
parameter [C_NUM_M*32-1:0] C_M_AXI_SECURE = {C_NUM_M{32'h00000000}},
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE =0,
parameter [C_NUM_M*32-1:0] C_ERR_MODE = {C_NUM_M{32'h00000000}},
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Address Channel Interface Ports
input wire [C_ID_WIDTH-1:0] S_AID,
input wire [C_ADDR_WIDTH-1:0] S_AADDR,
input wire [8-1:0] S_ALEN,
input wire [3-1:0] S_ASIZE,
input wire [2-1:0] S_ABURST,
input wire [2-1:0] S_ALOCK,
input wire [3-1:0] S_APROT,
// input wire [4-1:0] S_AREGION,
input wire [C_AMESG_WIDTH-1:0] S_AMESG,
input wire S_AVALID,
output wire S_AREADY,
// Master Address Channel Interface Ports
output wire [C_ID_WIDTH-1:0] M_AID,
output wire [C_ADDR_WIDTH-1:0] M_AADDR,
output wire [8-1:0] M_ALEN,
output wire [3-1:0] M_ASIZE,
output wire [2-1:0] M_ALOCK,
output wire [3-1:0] M_APROT,
output wire [4-1:0] M_AREGION,
output wire [C_AMESG_WIDTH-1:0] M_AMESG,
output wire [(C_NUM_M+1)-1:0] M_ATARGET_HOT,
output wire [(C_NUM_M_LOG+1)-1:0] M_ATARGET_ENC,
output wire [7:0] M_AERROR,
output wire M_AVALID_QUAL,
output wire M_AVALID,
input wire M_AREADY,
// Slave Response Channel Interface Ports
output wire [C_ID_WIDTH-1:0] S_RID,
output wire [C_RMESG_WIDTH-1:0] S_RMESG,
output wire S_RLAST,
output wire S_RVALID,
input wire S_RREADY,
// Master Response Channel Interface Ports
input wire [(C_NUM_M+1)*C_ID_WIDTH-1:0] M_RID,
input wire [(C_NUM_M+1)*C_RMESG_WIDTH-1:0] M_RMESG,
input wire [(C_NUM_M+1)-1:0] M_RLAST,
input wire [(C_NUM_M+1)-1:0] M_RVALID,
output wire [(C_NUM_M+1)-1:0] M_RREADY,
input wire [(C_NUM_M+1)-1:0] M_RTARGET, // Does response ID from each MI-slot target this SI slot?
input wire [8-1:0] DEBUG_A_TRANS_SEQ
);
localparam integer P_WRITE = 0;
localparam integer P_READ = 1;
localparam integer P_RMUX_MESG_WIDTH = C_ID_WIDTH + C_RMESG_WIDTH + 1;
localparam [31:0] P_AXILITE_ERRMODE = 32'h00000001;
localparam integer P_NONSECURE_BIT = 1;
localparam integer P_NUM_M_LOG_M1 = C_NUM_M_LOG ? C_NUM_M_LOG : 1;
localparam [C_NUM_M-1:0] P_M_AXILITE = f_m_axilite(0); // Mask of AxiLite MI-slots
localparam [1:0] P_FIXED = 2'b00;
localparam integer P_NUM_M_DE_LOG = f_ceil_log2(C_NUM_M+1);
localparam integer P_THREAD_ID_WIDTH_M1 = (C_THREAD_ID_WIDTH > 0) ? C_THREAD_ID_WIDTH : 1;
localparam integer P_NUM_ID_VAL = 2**C_THREAD_ID_WIDTH;
localparam integer P_NUM_THREADS = (P_NUM_ID_VAL < C_ACCEPTANCE) ? P_NUM_ID_VAL : C_ACCEPTANCE;
localparam [C_NUM_M-1:0] P_M_SECURE_MASK = f_bit32to1_mi(C_M_AXI_SECURE); // Mask of secure MI-slots
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// AxiLite protocol flag vector
function [C_NUM_M-1:0] f_m_axilite
(
input integer null_arg
);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_m_axilite[mi] = (C_ERR_MODE[mi*32+:32] == P_AXILITE_ERRMODE);
end
end
endfunction
// Convert Bit32 vector of range [0,1] to Bit1 vector on MI
function [C_NUM_M-1:0] f_bit32to1_mi
(input [C_NUM_M*32-1:0] vec32);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_bit32to1_mi[mi] = vec32[mi*32];
end
end
endfunction
wire [C_NUM_M-1:0] target_mi_hot;
wire [P_NUM_M_LOG_M1-1:0] target_mi_enc;
wire [(C_NUM_M+1)-1:0] m_atarget_hot_i;
wire [(P_NUM_M_DE_LOG)-1:0] m_atarget_enc_i;
wire match;
wire [3:0] target_region;
wire [3:0] m_aregion_i;
wire m_avalid_i;
wire s_aready_i;
wire any_error;
wire s_rvalid_i;
wire [C_ID_WIDTH-1:0] s_rid_i;
wire s_rlast_i;
wire [P_RMUX_MESG_WIDTH-1:0] si_rmux_mesg;
wire [(C_NUM_M+1)*P_RMUX_MESG_WIDTH-1:0] mi_rmux_mesg;
wire [(C_NUM_M+1)-1:0] m_rvalid_qual;
wire [(C_NUM_M+1)-1:0] m_rready_arb;
wire [(C_NUM_M+1)-1:0] m_rready_i;
wire target_secure;
wire target_axilite;
wire m_avalid_qual_i;
wire [7:0] m_aerror_i;
genvar gen_mi;
genvar gen_thread;
generate
if (C_ADDR_DECODE) begin : gen_addr_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_M),
.C_NUM_TARGETS_LOG (P_NUM_M_LOG_M1),
.C_NUM_RANGES (C_NUM_ADDR_RANGES),
.C_ADDR_WIDTH (C_ADDR_WIDTH),
.C_TARGET_ENC (1),
.C_TARGET_HOT (1),
.C_REGION_ENC (1),
.C_BASE_ADDR (C_BASE_ADDR),
.C_HIGH_ADDR (C_HIGH_ADDR),
.C_TARGET_QUAL (C_TARGET_QUAL),
.C_RESOLUTION (2)
)
addr_decoder_inst
(
.ADDR (S_AADDR),
.TARGET_HOT (target_mi_hot),
.TARGET_ENC (target_mi_enc),
.MATCH (match),
.REGION (target_region)
);
end else begin : gen_no_addr_decoder
assign target_mi_hot = 1;
assign target_mi_enc = 0;
assign match = 1'b1;
assign target_region = 4'b0000;
end
endgenerate
assign target_secure = |(target_mi_hot & P_M_SECURE_MASK);
assign target_axilite = |(target_mi_hot & P_M_AXILITE);
assign any_error = C_RANGE_CHECK && (m_aerror_i != 0); // DECERR if error-detection enabled and any error condition.
assign m_aerror_i[0] = ~match; // Invalid target address
assign m_aerror_i[1] = target_secure && S_APROT[P_NONSECURE_BIT]; // TrustZone violation
assign m_aerror_i[2] = target_axilite && ((S_ALEN != 0) ||
(S_ASIZE[1:0] == 2'b11) || (S_ASIZE[2] == 1'b1)); // AxiLite access violation
assign m_aerror_i[7:3] = 5'b00000; // Reserved
assign M_ATARGET_HOT = m_atarget_hot_i;
assign m_atarget_hot_i = (any_error ? {1'b1, {C_NUM_M{1'b0}}} : {1'b0, target_mi_hot});
assign m_atarget_enc_i = (any_error ? C_NUM_M : target_mi_enc);
assign M_AVALID = m_avalid_i;
assign m_avalid_i = S_AVALID;
assign M_AVALID_QUAL = m_avalid_qual_i;
assign S_AREADY = s_aready_i;
assign s_aready_i = M_AREADY;
assign M_AERROR = m_aerror_i;
assign M_ATARGET_ENC = m_atarget_enc_i;
assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : 4'b0000;
// assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : S_AREGION;
assign M_AREGION = m_aregion_i;
assign M_AID = S_AID;
assign M_AADDR = S_AADDR;
assign M_ALEN = S_ALEN;
assign M_ASIZE = S_ASIZE;
assign M_ALOCK = S_ALOCK;
assign M_APROT = S_APROT;
assign M_AMESG = S_AMESG;
assign S_RVALID = s_rvalid_i;
assign M_RREADY = m_rready_i;
assign s_rid_i = si_rmux_mesg[0+:C_ID_WIDTH];
assign S_RMESG = si_rmux_mesg[C_ID_WIDTH+:C_RMESG_WIDTH];
assign s_rlast_i = si_rmux_mesg[C_ID_WIDTH+C_RMESG_WIDTH+:1];
assign S_RID = s_rid_i;
assign S_RLAST = s_rlast_i;
assign m_rvalid_qual = M_RVALID & M_RTARGET;
assign m_rready_i = m_rready_arb & M_RTARGET;
generate
for (gen_mi=0; gen_mi<(C_NUM_M+1); gen_mi=gen_mi+1) begin : gen_rmesg_mi
// Note: Concatenation of mesg signals is from MSB to LSB; assignments that chop mesg signals appear in opposite order.
assign mi_rmux_mesg[gen_mi*P_RMUX_MESG_WIDTH+:P_RMUX_MESG_WIDTH] = {
M_RLAST[gen_mi],
M_RMESG[gen_mi*C_RMESG_WIDTH+:C_RMESG_WIDTH],
M_RID[gen_mi*C_ID_WIDTH+:C_ID_WIDTH]
};
end // gen_rmesg_mi
if (C_ACCEPTANCE == 1) begin : gen_single_issue
wire cmd_push;
wire cmd_pop;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign m_avalid_qual_i = ~accept_cnt | cmd_pop; // Ready for arbitration if no outstanding transaction or transaction being completed
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 1'b0;
active_target_enc <= 0;
active_target_hot <= 0;
end else begin
if (cmd_push) begin
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
accept_cnt <= 1'b1;
end else if (cmd_pop) begin
accept_cnt <= 1'b0;
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_issue
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_issue
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_issue
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else if (C_SINGLE_THREAD || (P_NUM_ID_VAL==1)) begin : gen_single_thread
wire s_avalid_en;
wire cmd_push;
wire cmd_pop;
reg [C_ID_WIDTH-1:0] active_id;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg [4-1:0] active_region;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
wire accept_limit ;
// Implement single-region-per-ID cyclic dependency avoidance method.
assign s_avalid_en = // This transaction is qualified to request arbitration if ...
(accept_cnt == 0) || // Either there are no outstanding transactions, or ...
(((P_NUM_ID_VAL==1) || (S_AID[P_THREAD_ID_WIDTH_M1-1:0] == active_id[P_THREAD_ID_WIDTH_M1-1:0])) && // the current transaction ID matches the previous, and ...
(active_target_enc == m_atarget_enc_i) && // all outstanding transactions are to the same target MI ...
(active_region == m_aregion_i)); // and to the same REGION.
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~cmd_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = s_avalid_en & ~accept_limit;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
active_id <= 0;
active_target_enc <= 0;
active_target_hot <= 0;
active_region <= 0;
end else begin
if (cmd_push) begin
active_id <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
active_region <= m_aregion_i;
if (~cmd_pop) begin
accept_cnt <= accept_cnt + 1;
end
end else begin
if (cmd_pop & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_thread
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else begin : gen_multi_thread
wire [(P_NUM_M_DE_LOG)-1:0] resp_select;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
wire [P_NUM_THREADS-1:0] s_avalid_en;
wire [P_NUM_THREADS-1:0] thread_valid;
wire [P_NUM_THREADS-1:0] aid_match;
wire [P_NUM_THREADS-1:0] rid_match;
wire [P_NUM_THREADS-1:0] cmd_push;
wire [P_NUM_THREADS-1:0] cmd_pop;
wire [P_NUM_THREADS:0] accum_push;
reg [P_NUM_THREADS*C_ID_WIDTH-1:0] active_id;
reg [P_NUM_THREADS*8-1:0] active_target;
reg [P_NUM_THREADS*8-1:0] active_region;
reg [P_NUM_THREADS*8-1:0] active_cnt;
reg [P_NUM_THREADS*8-1:0] debug_r_beat_cnt_i;
wire [P_NUM_THREADS*8-1:0] debug_r_trans_seq_i;
wire any_aid_match;
wire any_rid_match;
wire accept_limit;
wire any_push;
wire any_pop;
axi_crossbar_v2_1_arbiter_resp # // Multi-thread response arbiter
(
.C_FAMILY (C_FAMILY),
.C_NUM_S (C_NUM_M+1),
.C_NUM_S_LOG (P_NUM_M_DE_LOG),
.C_GRANT_ENC (1),
.C_GRANT_HOT (0)
)
arbiter_resp_inst
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_VALID (m_rvalid_qual),
.S_READY (m_rready_arb),
.M_GRANT_HOT (),
.M_GRANT_ENC (resp_select),
.M_VALID (s_rvalid_i),
.M_READY (S_RREADY)
);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_multi_thread
(
.S (resp_select),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
assign any_push = M_AREADY;
assign any_pop = s_rvalid_i & S_RREADY & s_rlast_i;
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~any_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = (&s_avalid_en) & ~accept_limit; // The current request is qualified for arbitration when it is qualified against all outstanding transaction threads.
assign any_aid_match = |aid_match;
assign any_rid_match = |rid_match;
assign accum_push[0] = 1'b0;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
end else begin
if (any_push & ~any_pop) begin
accept_cnt <= accept_cnt + 1;
end else if (any_pop & ~any_push & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end // Clocked process
for (gen_thread=0; gen_thread<P_NUM_THREADS; gen_thread=gen_thread+1) begin : gen_thread_loop
assign thread_valid[gen_thread] = (active_cnt[gen_thread*8 +: C_ACCEPTANCE_LOG+1] != 0);
assign aid_match[gen_thread] = // The currect thread is active for the requested transaction if
thread_valid[gen_thread] && // this thread slot is not vacant, and
((S_AID[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]); // the requested ID matches the active ID for this thread.
assign s_avalid_en[gen_thread] = // The current request is qualified against this thread slot if
(~aid_match[gen_thread]) || // This thread slot is not active for the requested ID, or
((m_atarget_enc_i == active_target[gen_thread*8+:P_NUM_M_DE_LOG]) && // this outstanding transaction was to the same target and
(m_aregion_i == active_region[gen_thread*8+:4])); // to the same region.
// cmd_push points to the position of either the active thread for the requested ID or the lowest vacant thread slot.
assign accum_push[gen_thread+1] = accum_push[gen_thread] | ~thread_valid[gen_thread];
assign cmd_push[gen_thread] = any_push & (aid_match[gen_thread] | ((~any_aid_match) & ~thread_valid[gen_thread] & ~accum_push[gen_thread]));
// cmd_pop points to the position of the active thread that matches the current RID.
assign rid_match[gen_thread] = thread_valid[gen_thread] & ((s_rid_i[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]);
assign cmd_pop[gen_thread] = any_pop & rid_match[gen_thread];
always @(posedge ACLK) begin
if (ARESET) begin
active_id[gen_thread*C_ID_WIDTH+:C_ID_WIDTH] <= 0;
active_target[gen_thread*8+:8] <= 0;
active_region[gen_thread*8+:8] <= 0;
active_cnt[gen_thread*8+:8] <= 0;
end else begin
if (cmd_push[gen_thread]) begin
active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1] <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target[gen_thread*8+:P_NUM_M_DE_LOG] <= m_atarget_enc_i;
active_region[gen_thread*8+:4] <= m_aregion_i;
if (~cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] + 1;
end
end else if (cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] - 1;
end
end
end // Clocked process
if (C_DEBUG) begin : gen_debug_r_multi_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i & S_RREADY & rid_match[gen_thread]) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= debug_r_beat_cnt_i[gen_thread*8+:8] + 1;
end
end
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_multi_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push[gen_thread]),
.S_READY (),
.M_MESG (debug_r_trans_seq_i[gen_thread*8+:8]),
.M_VALID (),
.M_READY (cmd_pop[gen_thread])
);
end // gen_debug_r_multi_thread
end // Next gen_thread_loop
end // thread control
endgenerate
endmodule
|
module axi_crossbar_v2_1_si_transactor #
(
parameter C_FAMILY = "none",
parameter integer C_SI = 0, // SI-slot number of current instance.
parameter integer C_DIR = 0, // Direction: 0 = Write; 1 = Read.
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_NUM_M = 2,
parameter integer C_NUM_M_LOG = 1,
parameter integer C_ACCEPTANCE = 1, // Acceptance limit of this SI-slot.
parameter integer C_ACCEPTANCE_LOG = 0, // Width of acceptance counter for this SI-slot.
parameter integer C_ID_WIDTH = 1,
parameter integer C_THREAD_ID_WIDTH = 0,
parameter integer C_ADDR_WIDTH = 32,
parameter integer C_AMESG_WIDTH = 1, // Used for AW or AR channel payload, depending on instantiation.
parameter integer C_RMESG_WIDTH = 1, // Used for B or R channel payload, depending on instantiation.
parameter [C_ID_WIDTH-1:0] C_BASE_ID = {C_ID_WIDTH{1'b0}},
parameter [C_ID_WIDTH-1:0] C_HIGH_ID = {C_ID_WIDTH{1'b0}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_BASE_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_HIGH_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b0}},
parameter integer C_SINGLE_THREAD = 0,
parameter [C_NUM_M-1:0] C_TARGET_QUAL = {C_NUM_M{1'b1}},
parameter [C_NUM_M*32-1:0] C_M_AXI_SECURE = {C_NUM_M{32'h00000000}},
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE =0,
parameter [C_NUM_M*32-1:0] C_ERR_MODE = {C_NUM_M{32'h00000000}},
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Address Channel Interface Ports
input wire [C_ID_WIDTH-1:0] S_AID,
input wire [C_ADDR_WIDTH-1:0] S_AADDR,
input wire [8-1:0] S_ALEN,
input wire [3-1:0] S_ASIZE,
input wire [2-1:0] S_ABURST,
input wire [2-1:0] S_ALOCK,
input wire [3-1:0] S_APROT,
// input wire [4-1:0] S_AREGION,
input wire [C_AMESG_WIDTH-1:0] S_AMESG,
input wire S_AVALID,
output wire S_AREADY,
// Master Address Channel Interface Ports
output wire [C_ID_WIDTH-1:0] M_AID,
output wire [C_ADDR_WIDTH-1:0] M_AADDR,
output wire [8-1:0] M_ALEN,
output wire [3-1:0] M_ASIZE,
output wire [2-1:0] M_ALOCK,
output wire [3-1:0] M_APROT,
output wire [4-1:0] M_AREGION,
output wire [C_AMESG_WIDTH-1:0] M_AMESG,
output wire [(C_NUM_M+1)-1:0] M_ATARGET_HOT,
output wire [(C_NUM_M_LOG+1)-1:0] M_ATARGET_ENC,
output wire [7:0] M_AERROR,
output wire M_AVALID_QUAL,
output wire M_AVALID,
input wire M_AREADY,
// Slave Response Channel Interface Ports
output wire [C_ID_WIDTH-1:0] S_RID,
output wire [C_RMESG_WIDTH-1:0] S_RMESG,
output wire S_RLAST,
output wire S_RVALID,
input wire S_RREADY,
// Master Response Channel Interface Ports
input wire [(C_NUM_M+1)*C_ID_WIDTH-1:0] M_RID,
input wire [(C_NUM_M+1)*C_RMESG_WIDTH-1:0] M_RMESG,
input wire [(C_NUM_M+1)-1:0] M_RLAST,
input wire [(C_NUM_M+1)-1:0] M_RVALID,
output wire [(C_NUM_M+1)-1:0] M_RREADY,
input wire [(C_NUM_M+1)-1:0] M_RTARGET, // Does response ID from each MI-slot target this SI slot?
input wire [8-1:0] DEBUG_A_TRANS_SEQ
);
localparam integer P_WRITE = 0;
localparam integer P_READ = 1;
localparam integer P_RMUX_MESG_WIDTH = C_ID_WIDTH + C_RMESG_WIDTH + 1;
localparam [31:0] P_AXILITE_ERRMODE = 32'h00000001;
localparam integer P_NONSECURE_BIT = 1;
localparam integer P_NUM_M_LOG_M1 = C_NUM_M_LOG ? C_NUM_M_LOG : 1;
localparam [C_NUM_M-1:0] P_M_AXILITE = f_m_axilite(0); // Mask of AxiLite MI-slots
localparam [1:0] P_FIXED = 2'b00;
localparam integer P_NUM_M_DE_LOG = f_ceil_log2(C_NUM_M+1);
localparam integer P_THREAD_ID_WIDTH_M1 = (C_THREAD_ID_WIDTH > 0) ? C_THREAD_ID_WIDTH : 1;
localparam integer P_NUM_ID_VAL = 2**C_THREAD_ID_WIDTH;
localparam integer P_NUM_THREADS = (P_NUM_ID_VAL < C_ACCEPTANCE) ? P_NUM_ID_VAL : C_ACCEPTANCE;
localparam [C_NUM_M-1:0] P_M_SECURE_MASK = f_bit32to1_mi(C_M_AXI_SECURE); // Mask of secure MI-slots
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// AxiLite protocol flag vector
function [C_NUM_M-1:0] f_m_axilite
(
input integer null_arg
);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_m_axilite[mi] = (C_ERR_MODE[mi*32+:32] == P_AXILITE_ERRMODE);
end
end
endfunction
// Convert Bit32 vector of range [0,1] to Bit1 vector on MI
function [C_NUM_M-1:0] f_bit32to1_mi
(input [C_NUM_M*32-1:0] vec32);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_bit32to1_mi[mi] = vec32[mi*32];
end
end
endfunction
wire [C_NUM_M-1:0] target_mi_hot;
wire [P_NUM_M_LOG_M1-1:0] target_mi_enc;
wire [(C_NUM_M+1)-1:0] m_atarget_hot_i;
wire [(P_NUM_M_DE_LOG)-1:0] m_atarget_enc_i;
wire match;
wire [3:0] target_region;
wire [3:0] m_aregion_i;
wire m_avalid_i;
wire s_aready_i;
wire any_error;
wire s_rvalid_i;
wire [C_ID_WIDTH-1:0] s_rid_i;
wire s_rlast_i;
wire [P_RMUX_MESG_WIDTH-1:0] si_rmux_mesg;
wire [(C_NUM_M+1)*P_RMUX_MESG_WIDTH-1:0] mi_rmux_mesg;
wire [(C_NUM_M+1)-1:0] m_rvalid_qual;
wire [(C_NUM_M+1)-1:0] m_rready_arb;
wire [(C_NUM_M+1)-1:0] m_rready_i;
wire target_secure;
wire target_axilite;
wire m_avalid_qual_i;
wire [7:0] m_aerror_i;
genvar gen_mi;
genvar gen_thread;
generate
if (C_ADDR_DECODE) begin : gen_addr_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_M),
.C_NUM_TARGETS_LOG (P_NUM_M_LOG_M1),
.C_NUM_RANGES (C_NUM_ADDR_RANGES),
.C_ADDR_WIDTH (C_ADDR_WIDTH),
.C_TARGET_ENC (1),
.C_TARGET_HOT (1),
.C_REGION_ENC (1),
.C_BASE_ADDR (C_BASE_ADDR),
.C_HIGH_ADDR (C_HIGH_ADDR),
.C_TARGET_QUAL (C_TARGET_QUAL),
.C_RESOLUTION (2)
)
addr_decoder_inst
(
.ADDR (S_AADDR),
.TARGET_HOT (target_mi_hot),
.TARGET_ENC (target_mi_enc),
.MATCH (match),
.REGION (target_region)
);
end else begin : gen_no_addr_decoder
assign target_mi_hot = 1;
assign target_mi_enc = 0;
assign match = 1'b1;
assign target_region = 4'b0000;
end
endgenerate
assign target_secure = |(target_mi_hot & P_M_SECURE_MASK);
assign target_axilite = |(target_mi_hot & P_M_AXILITE);
assign any_error = C_RANGE_CHECK && (m_aerror_i != 0); // DECERR if error-detection enabled and any error condition.
assign m_aerror_i[0] = ~match; // Invalid target address
assign m_aerror_i[1] = target_secure && S_APROT[P_NONSECURE_BIT]; // TrustZone violation
assign m_aerror_i[2] = target_axilite && ((S_ALEN != 0) ||
(S_ASIZE[1:0] == 2'b11) || (S_ASIZE[2] == 1'b1)); // AxiLite access violation
assign m_aerror_i[7:3] = 5'b00000; // Reserved
assign M_ATARGET_HOT = m_atarget_hot_i;
assign m_atarget_hot_i = (any_error ? {1'b1, {C_NUM_M{1'b0}}} : {1'b0, target_mi_hot});
assign m_atarget_enc_i = (any_error ? C_NUM_M : target_mi_enc);
assign M_AVALID = m_avalid_i;
assign m_avalid_i = S_AVALID;
assign M_AVALID_QUAL = m_avalid_qual_i;
assign S_AREADY = s_aready_i;
assign s_aready_i = M_AREADY;
assign M_AERROR = m_aerror_i;
assign M_ATARGET_ENC = m_atarget_enc_i;
assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : 4'b0000;
// assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : S_AREGION;
assign M_AREGION = m_aregion_i;
assign M_AID = S_AID;
assign M_AADDR = S_AADDR;
assign M_ALEN = S_ALEN;
assign M_ASIZE = S_ASIZE;
assign M_ALOCK = S_ALOCK;
assign M_APROT = S_APROT;
assign M_AMESG = S_AMESG;
assign S_RVALID = s_rvalid_i;
assign M_RREADY = m_rready_i;
assign s_rid_i = si_rmux_mesg[0+:C_ID_WIDTH];
assign S_RMESG = si_rmux_mesg[C_ID_WIDTH+:C_RMESG_WIDTH];
assign s_rlast_i = si_rmux_mesg[C_ID_WIDTH+C_RMESG_WIDTH+:1];
assign S_RID = s_rid_i;
assign S_RLAST = s_rlast_i;
assign m_rvalid_qual = M_RVALID & M_RTARGET;
assign m_rready_i = m_rready_arb & M_RTARGET;
generate
for (gen_mi=0; gen_mi<(C_NUM_M+1); gen_mi=gen_mi+1) begin : gen_rmesg_mi
// Note: Concatenation of mesg signals is from MSB to LSB; assignments that chop mesg signals appear in opposite order.
assign mi_rmux_mesg[gen_mi*P_RMUX_MESG_WIDTH+:P_RMUX_MESG_WIDTH] = {
M_RLAST[gen_mi],
M_RMESG[gen_mi*C_RMESG_WIDTH+:C_RMESG_WIDTH],
M_RID[gen_mi*C_ID_WIDTH+:C_ID_WIDTH]
};
end // gen_rmesg_mi
if (C_ACCEPTANCE == 1) begin : gen_single_issue
wire cmd_push;
wire cmd_pop;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign m_avalid_qual_i = ~accept_cnt | cmd_pop; // Ready for arbitration if no outstanding transaction or transaction being completed
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 1'b0;
active_target_enc <= 0;
active_target_hot <= 0;
end else begin
if (cmd_push) begin
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
accept_cnt <= 1'b1;
end else if (cmd_pop) begin
accept_cnt <= 1'b0;
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_issue
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_issue
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_issue
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else if (C_SINGLE_THREAD || (P_NUM_ID_VAL==1)) begin : gen_single_thread
wire s_avalid_en;
wire cmd_push;
wire cmd_pop;
reg [C_ID_WIDTH-1:0] active_id;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg [4-1:0] active_region;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
wire accept_limit ;
// Implement single-region-per-ID cyclic dependency avoidance method.
assign s_avalid_en = // This transaction is qualified to request arbitration if ...
(accept_cnt == 0) || // Either there are no outstanding transactions, or ...
(((P_NUM_ID_VAL==1) || (S_AID[P_THREAD_ID_WIDTH_M1-1:0] == active_id[P_THREAD_ID_WIDTH_M1-1:0])) && // the current transaction ID matches the previous, and ...
(active_target_enc == m_atarget_enc_i) && // all outstanding transactions are to the same target MI ...
(active_region == m_aregion_i)); // and to the same REGION.
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~cmd_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = s_avalid_en & ~accept_limit;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
active_id <= 0;
active_target_enc <= 0;
active_target_hot <= 0;
active_region <= 0;
end else begin
if (cmd_push) begin
active_id <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
active_region <= m_aregion_i;
if (~cmd_pop) begin
accept_cnt <= accept_cnt + 1;
end
end else begin
if (cmd_pop & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_thread
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else begin : gen_multi_thread
wire [(P_NUM_M_DE_LOG)-1:0] resp_select;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
wire [P_NUM_THREADS-1:0] s_avalid_en;
wire [P_NUM_THREADS-1:0] thread_valid;
wire [P_NUM_THREADS-1:0] aid_match;
wire [P_NUM_THREADS-1:0] rid_match;
wire [P_NUM_THREADS-1:0] cmd_push;
wire [P_NUM_THREADS-1:0] cmd_pop;
wire [P_NUM_THREADS:0] accum_push;
reg [P_NUM_THREADS*C_ID_WIDTH-1:0] active_id;
reg [P_NUM_THREADS*8-1:0] active_target;
reg [P_NUM_THREADS*8-1:0] active_region;
reg [P_NUM_THREADS*8-1:0] active_cnt;
reg [P_NUM_THREADS*8-1:0] debug_r_beat_cnt_i;
wire [P_NUM_THREADS*8-1:0] debug_r_trans_seq_i;
wire any_aid_match;
wire any_rid_match;
wire accept_limit;
wire any_push;
wire any_pop;
axi_crossbar_v2_1_arbiter_resp # // Multi-thread response arbiter
(
.C_FAMILY (C_FAMILY),
.C_NUM_S (C_NUM_M+1),
.C_NUM_S_LOG (P_NUM_M_DE_LOG),
.C_GRANT_ENC (1),
.C_GRANT_HOT (0)
)
arbiter_resp_inst
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_VALID (m_rvalid_qual),
.S_READY (m_rready_arb),
.M_GRANT_HOT (),
.M_GRANT_ENC (resp_select),
.M_VALID (s_rvalid_i),
.M_READY (S_RREADY)
);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_multi_thread
(
.S (resp_select),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
assign any_push = M_AREADY;
assign any_pop = s_rvalid_i & S_RREADY & s_rlast_i;
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~any_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = (&s_avalid_en) & ~accept_limit; // The current request is qualified for arbitration when it is qualified against all outstanding transaction threads.
assign any_aid_match = |aid_match;
assign any_rid_match = |rid_match;
assign accum_push[0] = 1'b0;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
end else begin
if (any_push & ~any_pop) begin
accept_cnt <= accept_cnt + 1;
end else if (any_pop & ~any_push & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end // Clocked process
for (gen_thread=0; gen_thread<P_NUM_THREADS; gen_thread=gen_thread+1) begin : gen_thread_loop
assign thread_valid[gen_thread] = (active_cnt[gen_thread*8 +: C_ACCEPTANCE_LOG+1] != 0);
assign aid_match[gen_thread] = // The currect thread is active for the requested transaction if
thread_valid[gen_thread] && // this thread slot is not vacant, and
((S_AID[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]); // the requested ID matches the active ID for this thread.
assign s_avalid_en[gen_thread] = // The current request is qualified against this thread slot if
(~aid_match[gen_thread]) || // This thread slot is not active for the requested ID, or
((m_atarget_enc_i == active_target[gen_thread*8+:P_NUM_M_DE_LOG]) && // this outstanding transaction was to the same target and
(m_aregion_i == active_region[gen_thread*8+:4])); // to the same region.
// cmd_push points to the position of either the active thread for the requested ID or the lowest vacant thread slot.
assign accum_push[gen_thread+1] = accum_push[gen_thread] | ~thread_valid[gen_thread];
assign cmd_push[gen_thread] = any_push & (aid_match[gen_thread] | ((~any_aid_match) & ~thread_valid[gen_thread] & ~accum_push[gen_thread]));
// cmd_pop points to the position of the active thread that matches the current RID.
assign rid_match[gen_thread] = thread_valid[gen_thread] & ((s_rid_i[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]);
assign cmd_pop[gen_thread] = any_pop & rid_match[gen_thread];
always @(posedge ACLK) begin
if (ARESET) begin
active_id[gen_thread*C_ID_WIDTH+:C_ID_WIDTH] <= 0;
active_target[gen_thread*8+:8] <= 0;
active_region[gen_thread*8+:8] <= 0;
active_cnt[gen_thread*8+:8] <= 0;
end else begin
if (cmd_push[gen_thread]) begin
active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1] <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target[gen_thread*8+:P_NUM_M_DE_LOG] <= m_atarget_enc_i;
active_region[gen_thread*8+:4] <= m_aregion_i;
if (~cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] + 1;
end
end else if (cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] - 1;
end
end
end // Clocked process
if (C_DEBUG) begin : gen_debug_r_multi_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i & S_RREADY & rid_match[gen_thread]) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= debug_r_beat_cnt_i[gen_thread*8+:8] + 1;
end
end
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_multi_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push[gen_thread]),
.S_READY (),
.M_MESG (debug_r_trans_seq_i[gen_thread*8+:8]),
.M_VALID (),
.M_READY (cmd_pop[gen_thread])
);
end // gen_debug_r_multi_thread
end // Next gen_thread_loop
end // thread control
endgenerate
endmodule
|
module axi_crossbar_v2_1_si_transactor #
(
parameter C_FAMILY = "none",
parameter integer C_SI = 0, // SI-slot number of current instance.
parameter integer C_DIR = 0, // Direction: 0 = Write; 1 = Read.
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_NUM_M = 2,
parameter integer C_NUM_M_LOG = 1,
parameter integer C_ACCEPTANCE = 1, // Acceptance limit of this SI-slot.
parameter integer C_ACCEPTANCE_LOG = 0, // Width of acceptance counter for this SI-slot.
parameter integer C_ID_WIDTH = 1,
parameter integer C_THREAD_ID_WIDTH = 0,
parameter integer C_ADDR_WIDTH = 32,
parameter integer C_AMESG_WIDTH = 1, // Used for AW or AR channel payload, depending on instantiation.
parameter integer C_RMESG_WIDTH = 1, // Used for B or R channel payload, depending on instantiation.
parameter [C_ID_WIDTH-1:0] C_BASE_ID = {C_ID_WIDTH{1'b0}},
parameter [C_ID_WIDTH-1:0] C_HIGH_ID = {C_ID_WIDTH{1'b0}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_BASE_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_HIGH_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b0}},
parameter integer C_SINGLE_THREAD = 0,
parameter [C_NUM_M-1:0] C_TARGET_QUAL = {C_NUM_M{1'b1}},
parameter [C_NUM_M*32-1:0] C_M_AXI_SECURE = {C_NUM_M{32'h00000000}},
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE =0,
parameter [C_NUM_M*32-1:0] C_ERR_MODE = {C_NUM_M{32'h00000000}},
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Address Channel Interface Ports
input wire [C_ID_WIDTH-1:0] S_AID,
input wire [C_ADDR_WIDTH-1:0] S_AADDR,
input wire [8-1:0] S_ALEN,
input wire [3-1:0] S_ASIZE,
input wire [2-1:0] S_ABURST,
input wire [2-1:0] S_ALOCK,
input wire [3-1:0] S_APROT,
// input wire [4-1:0] S_AREGION,
input wire [C_AMESG_WIDTH-1:0] S_AMESG,
input wire S_AVALID,
output wire S_AREADY,
// Master Address Channel Interface Ports
output wire [C_ID_WIDTH-1:0] M_AID,
output wire [C_ADDR_WIDTH-1:0] M_AADDR,
output wire [8-1:0] M_ALEN,
output wire [3-1:0] M_ASIZE,
output wire [2-1:0] M_ALOCK,
output wire [3-1:0] M_APROT,
output wire [4-1:0] M_AREGION,
output wire [C_AMESG_WIDTH-1:0] M_AMESG,
output wire [(C_NUM_M+1)-1:0] M_ATARGET_HOT,
output wire [(C_NUM_M_LOG+1)-1:0] M_ATARGET_ENC,
output wire [7:0] M_AERROR,
output wire M_AVALID_QUAL,
output wire M_AVALID,
input wire M_AREADY,
// Slave Response Channel Interface Ports
output wire [C_ID_WIDTH-1:0] S_RID,
output wire [C_RMESG_WIDTH-1:0] S_RMESG,
output wire S_RLAST,
output wire S_RVALID,
input wire S_RREADY,
// Master Response Channel Interface Ports
input wire [(C_NUM_M+1)*C_ID_WIDTH-1:0] M_RID,
input wire [(C_NUM_M+1)*C_RMESG_WIDTH-1:0] M_RMESG,
input wire [(C_NUM_M+1)-1:0] M_RLAST,
input wire [(C_NUM_M+1)-1:0] M_RVALID,
output wire [(C_NUM_M+1)-1:0] M_RREADY,
input wire [(C_NUM_M+1)-1:0] M_RTARGET, // Does response ID from each MI-slot target this SI slot?
input wire [8-1:0] DEBUG_A_TRANS_SEQ
);
localparam integer P_WRITE = 0;
localparam integer P_READ = 1;
localparam integer P_RMUX_MESG_WIDTH = C_ID_WIDTH + C_RMESG_WIDTH + 1;
localparam [31:0] P_AXILITE_ERRMODE = 32'h00000001;
localparam integer P_NONSECURE_BIT = 1;
localparam integer P_NUM_M_LOG_M1 = C_NUM_M_LOG ? C_NUM_M_LOG : 1;
localparam [C_NUM_M-1:0] P_M_AXILITE = f_m_axilite(0); // Mask of AxiLite MI-slots
localparam [1:0] P_FIXED = 2'b00;
localparam integer P_NUM_M_DE_LOG = f_ceil_log2(C_NUM_M+1);
localparam integer P_THREAD_ID_WIDTH_M1 = (C_THREAD_ID_WIDTH > 0) ? C_THREAD_ID_WIDTH : 1;
localparam integer P_NUM_ID_VAL = 2**C_THREAD_ID_WIDTH;
localparam integer P_NUM_THREADS = (P_NUM_ID_VAL < C_ACCEPTANCE) ? P_NUM_ID_VAL : C_ACCEPTANCE;
localparam [C_NUM_M-1:0] P_M_SECURE_MASK = f_bit32to1_mi(C_M_AXI_SECURE); // Mask of secure MI-slots
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// AxiLite protocol flag vector
function [C_NUM_M-1:0] f_m_axilite
(
input integer null_arg
);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_m_axilite[mi] = (C_ERR_MODE[mi*32+:32] == P_AXILITE_ERRMODE);
end
end
endfunction
// Convert Bit32 vector of range [0,1] to Bit1 vector on MI
function [C_NUM_M-1:0] f_bit32to1_mi
(input [C_NUM_M*32-1:0] vec32);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_bit32to1_mi[mi] = vec32[mi*32];
end
end
endfunction
wire [C_NUM_M-1:0] target_mi_hot;
wire [P_NUM_M_LOG_M1-1:0] target_mi_enc;
wire [(C_NUM_M+1)-1:0] m_atarget_hot_i;
wire [(P_NUM_M_DE_LOG)-1:0] m_atarget_enc_i;
wire match;
wire [3:0] target_region;
wire [3:0] m_aregion_i;
wire m_avalid_i;
wire s_aready_i;
wire any_error;
wire s_rvalid_i;
wire [C_ID_WIDTH-1:0] s_rid_i;
wire s_rlast_i;
wire [P_RMUX_MESG_WIDTH-1:0] si_rmux_mesg;
wire [(C_NUM_M+1)*P_RMUX_MESG_WIDTH-1:0] mi_rmux_mesg;
wire [(C_NUM_M+1)-1:0] m_rvalid_qual;
wire [(C_NUM_M+1)-1:0] m_rready_arb;
wire [(C_NUM_M+1)-1:0] m_rready_i;
wire target_secure;
wire target_axilite;
wire m_avalid_qual_i;
wire [7:0] m_aerror_i;
genvar gen_mi;
genvar gen_thread;
generate
if (C_ADDR_DECODE) begin : gen_addr_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_M),
.C_NUM_TARGETS_LOG (P_NUM_M_LOG_M1),
.C_NUM_RANGES (C_NUM_ADDR_RANGES),
.C_ADDR_WIDTH (C_ADDR_WIDTH),
.C_TARGET_ENC (1),
.C_TARGET_HOT (1),
.C_REGION_ENC (1),
.C_BASE_ADDR (C_BASE_ADDR),
.C_HIGH_ADDR (C_HIGH_ADDR),
.C_TARGET_QUAL (C_TARGET_QUAL),
.C_RESOLUTION (2)
)
addr_decoder_inst
(
.ADDR (S_AADDR),
.TARGET_HOT (target_mi_hot),
.TARGET_ENC (target_mi_enc),
.MATCH (match),
.REGION (target_region)
);
end else begin : gen_no_addr_decoder
assign target_mi_hot = 1;
assign target_mi_enc = 0;
assign match = 1'b1;
assign target_region = 4'b0000;
end
endgenerate
assign target_secure = |(target_mi_hot & P_M_SECURE_MASK);
assign target_axilite = |(target_mi_hot & P_M_AXILITE);
assign any_error = C_RANGE_CHECK && (m_aerror_i != 0); // DECERR if error-detection enabled and any error condition.
assign m_aerror_i[0] = ~match; // Invalid target address
assign m_aerror_i[1] = target_secure && S_APROT[P_NONSECURE_BIT]; // TrustZone violation
assign m_aerror_i[2] = target_axilite && ((S_ALEN != 0) ||
(S_ASIZE[1:0] == 2'b11) || (S_ASIZE[2] == 1'b1)); // AxiLite access violation
assign m_aerror_i[7:3] = 5'b00000; // Reserved
assign M_ATARGET_HOT = m_atarget_hot_i;
assign m_atarget_hot_i = (any_error ? {1'b1, {C_NUM_M{1'b0}}} : {1'b0, target_mi_hot});
assign m_atarget_enc_i = (any_error ? C_NUM_M : target_mi_enc);
assign M_AVALID = m_avalid_i;
assign m_avalid_i = S_AVALID;
assign M_AVALID_QUAL = m_avalid_qual_i;
assign S_AREADY = s_aready_i;
assign s_aready_i = M_AREADY;
assign M_AERROR = m_aerror_i;
assign M_ATARGET_ENC = m_atarget_enc_i;
assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : 4'b0000;
// assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : S_AREGION;
assign M_AREGION = m_aregion_i;
assign M_AID = S_AID;
assign M_AADDR = S_AADDR;
assign M_ALEN = S_ALEN;
assign M_ASIZE = S_ASIZE;
assign M_ALOCK = S_ALOCK;
assign M_APROT = S_APROT;
assign M_AMESG = S_AMESG;
assign S_RVALID = s_rvalid_i;
assign M_RREADY = m_rready_i;
assign s_rid_i = si_rmux_mesg[0+:C_ID_WIDTH];
assign S_RMESG = si_rmux_mesg[C_ID_WIDTH+:C_RMESG_WIDTH];
assign s_rlast_i = si_rmux_mesg[C_ID_WIDTH+C_RMESG_WIDTH+:1];
assign S_RID = s_rid_i;
assign S_RLAST = s_rlast_i;
assign m_rvalid_qual = M_RVALID & M_RTARGET;
assign m_rready_i = m_rready_arb & M_RTARGET;
generate
for (gen_mi=0; gen_mi<(C_NUM_M+1); gen_mi=gen_mi+1) begin : gen_rmesg_mi
// Note: Concatenation of mesg signals is from MSB to LSB; assignments that chop mesg signals appear in opposite order.
assign mi_rmux_mesg[gen_mi*P_RMUX_MESG_WIDTH+:P_RMUX_MESG_WIDTH] = {
M_RLAST[gen_mi],
M_RMESG[gen_mi*C_RMESG_WIDTH+:C_RMESG_WIDTH],
M_RID[gen_mi*C_ID_WIDTH+:C_ID_WIDTH]
};
end // gen_rmesg_mi
if (C_ACCEPTANCE == 1) begin : gen_single_issue
wire cmd_push;
wire cmd_pop;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign m_avalid_qual_i = ~accept_cnt | cmd_pop; // Ready for arbitration if no outstanding transaction or transaction being completed
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 1'b0;
active_target_enc <= 0;
active_target_hot <= 0;
end else begin
if (cmd_push) begin
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
accept_cnt <= 1'b1;
end else if (cmd_pop) begin
accept_cnt <= 1'b0;
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_issue
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_issue
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_issue
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else if (C_SINGLE_THREAD || (P_NUM_ID_VAL==1)) begin : gen_single_thread
wire s_avalid_en;
wire cmd_push;
wire cmd_pop;
reg [C_ID_WIDTH-1:0] active_id;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg [4-1:0] active_region;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
wire accept_limit ;
// Implement single-region-per-ID cyclic dependency avoidance method.
assign s_avalid_en = // This transaction is qualified to request arbitration if ...
(accept_cnt == 0) || // Either there are no outstanding transactions, or ...
(((P_NUM_ID_VAL==1) || (S_AID[P_THREAD_ID_WIDTH_M1-1:0] == active_id[P_THREAD_ID_WIDTH_M1-1:0])) && // the current transaction ID matches the previous, and ...
(active_target_enc == m_atarget_enc_i) && // all outstanding transactions are to the same target MI ...
(active_region == m_aregion_i)); // and to the same REGION.
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~cmd_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = s_avalid_en & ~accept_limit;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
active_id <= 0;
active_target_enc <= 0;
active_target_hot <= 0;
active_region <= 0;
end else begin
if (cmd_push) begin
active_id <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
active_region <= m_aregion_i;
if (~cmd_pop) begin
accept_cnt <= accept_cnt + 1;
end
end else begin
if (cmd_pop & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_thread
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else begin : gen_multi_thread
wire [(P_NUM_M_DE_LOG)-1:0] resp_select;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
wire [P_NUM_THREADS-1:0] s_avalid_en;
wire [P_NUM_THREADS-1:0] thread_valid;
wire [P_NUM_THREADS-1:0] aid_match;
wire [P_NUM_THREADS-1:0] rid_match;
wire [P_NUM_THREADS-1:0] cmd_push;
wire [P_NUM_THREADS-1:0] cmd_pop;
wire [P_NUM_THREADS:0] accum_push;
reg [P_NUM_THREADS*C_ID_WIDTH-1:0] active_id;
reg [P_NUM_THREADS*8-1:0] active_target;
reg [P_NUM_THREADS*8-1:0] active_region;
reg [P_NUM_THREADS*8-1:0] active_cnt;
reg [P_NUM_THREADS*8-1:0] debug_r_beat_cnt_i;
wire [P_NUM_THREADS*8-1:0] debug_r_trans_seq_i;
wire any_aid_match;
wire any_rid_match;
wire accept_limit;
wire any_push;
wire any_pop;
axi_crossbar_v2_1_arbiter_resp # // Multi-thread response arbiter
(
.C_FAMILY (C_FAMILY),
.C_NUM_S (C_NUM_M+1),
.C_NUM_S_LOG (P_NUM_M_DE_LOG),
.C_GRANT_ENC (1),
.C_GRANT_HOT (0)
)
arbiter_resp_inst
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_VALID (m_rvalid_qual),
.S_READY (m_rready_arb),
.M_GRANT_HOT (),
.M_GRANT_ENC (resp_select),
.M_VALID (s_rvalid_i),
.M_READY (S_RREADY)
);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_multi_thread
(
.S (resp_select),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
assign any_push = M_AREADY;
assign any_pop = s_rvalid_i & S_RREADY & s_rlast_i;
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~any_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = (&s_avalid_en) & ~accept_limit; // The current request is qualified for arbitration when it is qualified against all outstanding transaction threads.
assign any_aid_match = |aid_match;
assign any_rid_match = |rid_match;
assign accum_push[0] = 1'b0;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
end else begin
if (any_push & ~any_pop) begin
accept_cnt <= accept_cnt + 1;
end else if (any_pop & ~any_push & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end // Clocked process
for (gen_thread=0; gen_thread<P_NUM_THREADS; gen_thread=gen_thread+1) begin : gen_thread_loop
assign thread_valid[gen_thread] = (active_cnt[gen_thread*8 +: C_ACCEPTANCE_LOG+1] != 0);
assign aid_match[gen_thread] = // The currect thread is active for the requested transaction if
thread_valid[gen_thread] && // this thread slot is not vacant, and
((S_AID[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]); // the requested ID matches the active ID for this thread.
assign s_avalid_en[gen_thread] = // The current request is qualified against this thread slot if
(~aid_match[gen_thread]) || // This thread slot is not active for the requested ID, or
((m_atarget_enc_i == active_target[gen_thread*8+:P_NUM_M_DE_LOG]) && // this outstanding transaction was to the same target and
(m_aregion_i == active_region[gen_thread*8+:4])); // to the same region.
// cmd_push points to the position of either the active thread for the requested ID or the lowest vacant thread slot.
assign accum_push[gen_thread+1] = accum_push[gen_thread] | ~thread_valid[gen_thread];
assign cmd_push[gen_thread] = any_push & (aid_match[gen_thread] | ((~any_aid_match) & ~thread_valid[gen_thread] & ~accum_push[gen_thread]));
// cmd_pop points to the position of the active thread that matches the current RID.
assign rid_match[gen_thread] = thread_valid[gen_thread] & ((s_rid_i[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]);
assign cmd_pop[gen_thread] = any_pop & rid_match[gen_thread];
always @(posedge ACLK) begin
if (ARESET) begin
active_id[gen_thread*C_ID_WIDTH+:C_ID_WIDTH] <= 0;
active_target[gen_thread*8+:8] <= 0;
active_region[gen_thread*8+:8] <= 0;
active_cnt[gen_thread*8+:8] <= 0;
end else begin
if (cmd_push[gen_thread]) begin
active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1] <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target[gen_thread*8+:P_NUM_M_DE_LOG] <= m_atarget_enc_i;
active_region[gen_thread*8+:4] <= m_aregion_i;
if (~cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] + 1;
end
end else if (cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] - 1;
end
end
end // Clocked process
if (C_DEBUG) begin : gen_debug_r_multi_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i & S_RREADY & rid_match[gen_thread]) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= debug_r_beat_cnt_i[gen_thread*8+:8] + 1;
end
end
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_multi_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push[gen_thread]),
.S_READY (),
.M_MESG (debug_r_trans_seq_i[gen_thread*8+:8]),
.M_VALID (),
.M_READY (cmd_pop[gen_thread])
);
end // gen_debug_r_multi_thread
end // Next gen_thread_loop
end // thread control
endgenerate
endmodule
|
module axi_crossbar_v2_1_si_transactor #
(
parameter C_FAMILY = "none",
parameter integer C_SI = 0, // SI-slot number of current instance.
parameter integer C_DIR = 0, // Direction: 0 = Write; 1 = Read.
parameter integer C_NUM_ADDR_RANGES = 1,
parameter integer C_NUM_M = 2,
parameter integer C_NUM_M_LOG = 1,
parameter integer C_ACCEPTANCE = 1, // Acceptance limit of this SI-slot.
parameter integer C_ACCEPTANCE_LOG = 0, // Width of acceptance counter for this SI-slot.
parameter integer C_ID_WIDTH = 1,
parameter integer C_THREAD_ID_WIDTH = 0,
parameter integer C_ADDR_WIDTH = 32,
parameter integer C_AMESG_WIDTH = 1, // Used for AW or AR channel payload, depending on instantiation.
parameter integer C_RMESG_WIDTH = 1, // Used for B or R channel payload, depending on instantiation.
parameter [C_ID_WIDTH-1:0] C_BASE_ID = {C_ID_WIDTH{1'b0}},
parameter [C_ID_WIDTH-1:0] C_HIGH_ID = {C_ID_WIDTH{1'b0}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_BASE_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b1}},
parameter [C_NUM_M*C_NUM_ADDR_RANGES*64-1:0] C_HIGH_ADDR = {C_NUM_M*C_NUM_ADDR_RANGES*64{1'b0}},
parameter integer C_SINGLE_THREAD = 0,
parameter [C_NUM_M-1:0] C_TARGET_QUAL = {C_NUM_M{1'b1}},
parameter [C_NUM_M*32-1:0] C_M_AXI_SECURE = {C_NUM_M{32'h00000000}},
parameter integer C_RANGE_CHECK = 0,
parameter integer C_ADDR_DECODE =0,
parameter [C_NUM_M*32-1:0] C_ERR_MODE = {C_NUM_M{32'h00000000}},
parameter integer C_DEBUG = 1
)
(
// Global Signals
input wire ACLK,
input wire ARESET,
// Slave Address Channel Interface Ports
input wire [C_ID_WIDTH-1:0] S_AID,
input wire [C_ADDR_WIDTH-1:0] S_AADDR,
input wire [8-1:0] S_ALEN,
input wire [3-1:0] S_ASIZE,
input wire [2-1:0] S_ABURST,
input wire [2-1:0] S_ALOCK,
input wire [3-1:0] S_APROT,
// input wire [4-1:0] S_AREGION,
input wire [C_AMESG_WIDTH-1:0] S_AMESG,
input wire S_AVALID,
output wire S_AREADY,
// Master Address Channel Interface Ports
output wire [C_ID_WIDTH-1:0] M_AID,
output wire [C_ADDR_WIDTH-1:0] M_AADDR,
output wire [8-1:0] M_ALEN,
output wire [3-1:0] M_ASIZE,
output wire [2-1:0] M_ALOCK,
output wire [3-1:0] M_APROT,
output wire [4-1:0] M_AREGION,
output wire [C_AMESG_WIDTH-1:0] M_AMESG,
output wire [(C_NUM_M+1)-1:0] M_ATARGET_HOT,
output wire [(C_NUM_M_LOG+1)-1:0] M_ATARGET_ENC,
output wire [7:0] M_AERROR,
output wire M_AVALID_QUAL,
output wire M_AVALID,
input wire M_AREADY,
// Slave Response Channel Interface Ports
output wire [C_ID_WIDTH-1:0] S_RID,
output wire [C_RMESG_WIDTH-1:0] S_RMESG,
output wire S_RLAST,
output wire S_RVALID,
input wire S_RREADY,
// Master Response Channel Interface Ports
input wire [(C_NUM_M+1)*C_ID_WIDTH-1:0] M_RID,
input wire [(C_NUM_M+1)*C_RMESG_WIDTH-1:0] M_RMESG,
input wire [(C_NUM_M+1)-1:0] M_RLAST,
input wire [(C_NUM_M+1)-1:0] M_RVALID,
output wire [(C_NUM_M+1)-1:0] M_RREADY,
input wire [(C_NUM_M+1)-1:0] M_RTARGET, // Does response ID from each MI-slot target this SI slot?
input wire [8-1:0] DEBUG_A_TRANS_SEQ
);
localparam integer P_WRITE = 0;
localparam integer P_READ = 1;
localparam integer P_RMUX_MESG_WIDTH = C_ID_WIDTH + C_RMESG_WIDTH + 1;
localparam [31:0] P_AXILITE_ERRMODE = 32'h00000001;
localparam integer P_NONSECURE_BIT = 1;
localparam integer P_NUM_M_LOG_M1 = C_NUM_M_LOG ? C_NUM_M_LOG : 1;
localparam [C_NUM_M-1:0] P_M_AXILITE = f_m_axilite(0); // Mask of AxiLite MI-slots
localparam [1:0] P_FIXED = 2'b00;
localparam integer P_NUM_M_DE_LOG = f_ceil_log2(C_NUM_M+1);
localparam integer P_THREAD_ID_WIDTH_M1 = (C_THREAD_ID_WIDTH > 0) ? C_THREAD_ID_WIDTH : 1;
localparam integer P_NUM_ID_VAL = 2**C_THREAD_ID_WIDTH;
localparam integer P_NUM_THREADS = (P_NUM_ID_VAL < C_ACCEPTANCE) ? P_NUM_ID_VAL : C_ACCEPTANCE;
localparam [C_NUM_M-1:0] P_M_SECURE_MASK = f_bit32to1_mi(C_M_AXI_SECURE); // Mask of secure MI-slots
// Ceiling of log2(x)
function integer f_ceil_log2
(
input integer x
);
integer acc;
begin
acc=0;
while ((2**acc) < x)
acc = acc + 1;
f_ceil_log2 = acc;
end
endfunction
// AxiLite protocol flag vector
function [C_NUM_M-1:0] f_m_axilite
(
input integer null_arg
);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_m_axilite[mi] = (C_ERR_MODE[mi*32+:32] == P_AXILITE_ERRMODE);
end
end
endfunction
// Convert Bit32 vector of range [0,1] to Bit1 vector on MI
function [C_NUM_M-1:0] f_bit32to1_mi
(input [C_NUM_M*32-1:0] vec32);
integer mi;
begin
for (mi=0; mi<C_NUM_M; mi=mi+1) begin
f_bit32to1_mi[mi] = vec32[mi*32];
end
end
endfunction
wire [C_NUM_M-1:0] target_mi_hot;
wire [P_NUM_M_LOG_M1-1:0] target_mi_enc;
wire [(C_NUM_M+1)-1:0] m_atarget_hot_i;
wire [(P_NUM_M_DE_LOG)-1:0] m_atarget_enc_i;
wire match;
wire [3:0] target_region;
wire [3:0] m_aregion_i;
wire m_avalid_i;
wire s_aready_i;
wire any_error;
wire s_rvalid_i;
wire [C_ID_WIDTH-1:0] s_rid_i;
wire s_rlast_i;
wire [P_RMUX_MESG_WIDTH-1:0] si_rmux_mesg;
wire [(C_NUM_M+1)*P_RMUX_MESG_WIDTH-1:0] mi_rmux_mesg;
wire [(C_NUM_M+1)-1:0] m_rvalid_qual;
wire [(C_NUM_M+1)-1:0] m_rready_arb;
wire [(C_NUM_M+1)-1:0] m_rready_i;
wire target_secure;
wire target_axilite;
wire m_avalid_qual_i;
wire [7:0] m_aerror_i;
genvar gen_mi;
genvar gen_thread;
generate
if (C_ADDR_DECODE) begin : gen_addr_decoder
axi_crossbar_v2_1_addr_decoder #
(
.C_FAMILY (C_FAMILY),
.C_NUM_TARGETS (C_NUM_M),
.C_NUM_TARGETS_LOG (P_NUM_M_LOG_M1),
.C_NUM_RANGES (C_NUM_ADDR_RANGES),
.C_ADDR_WIDTH (C_ADDR_WIDTH),
.C_TARGET_ENC (1),
.C_TARGET_HOT (1),
.C_REGION_ENC (1),
.C_BASE_ADDR (C_BASE_ADDR),
.C_HIGH_ADDR (C_HIGH_ADDR),
.C_TARGET_QUAL (C_TARGET_QUAL),
.C_RESOLUTION (2)
)
addr_decoder_inst
(
.ADDR (S_AADDR),
.TARGET_HOT (target_mi_hot),
.TARGET_ENC (target_mi_enc),
.MATCH (match),
.REGION (target_region)
);
end else begin : gen_no_addr_decoder
assign target_mi_hot = 1;
assign target_mi_enc = 0;
assign match = 1'b1;
assign target_region = 4'b0000;
end
endgenerate
assign target_secure = |(target_mi_hot & P_M_SECURE_MASK);
assign target_axilite = |(target_mi_hot & P_M_AXILITE);
assign any_error = C_RANGE_CHECK && (m_aerror_i != 0); // DECERR if error-detection enabled and any error condition.
assign m_aerror_i[0] = ~match; // Invalid target address
assign m_aerror_i[1] = target_secure && S_APROT[P_NONSECURE_BIT]; // TrustZone violation
assign m_aerror_i[2] = target_axilite && ((S_ALEN != 0) ||
(S_ASIZE[1:0] == 2'b11) || (S_ASIZE[2] == 1'b1)); // AxiLite access violation
assign m_aerror_i[7:3] = 5'b00000; // Reserved
assign M_ATARGET_HOT = m_atarget_hot_i;
assign m_atarget_hot_i = (any_error ? {1'b1, {C_NUM_M{1'b0}}} : {1'b0, target_mi_hot});
assign m_atarget_enc_i = (any_error ? C_NUM_M : target_mi_enc);
assign M_AVALID = m_avalid_i;
assign m_avalid_i = S_AVALID;
assign M_AVALID_QUAL = m_avalid_qual_i;
assign S_AREADY = s_aready_i;
assign s_aready_i = M_AREADY;
assign M_AERROR = m_aerror_i;
assign M_ATARGET_ENC = m_atarget_enc_i;
assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : 4'b0000;
// assign m_aregion_i = any_error ? 4'b0000 : (C_ADDR_DECODE != 0) ? target_region : S_AREGION;
assign M_AREGION = m_aregion_i;
assign M_AID = S_AID;
assign M_AADDR = S_AADDR;
assign M_ALEN = S_ALEN;
assign M_ASIZE = S_ASIZE;
assign M_ALOCK = S_ALOCK;
assign M_APROT = S_APROT;
assign M_AMESG = S_AMESG;
assign S_RVALID = s_rvalid_i;
assign M_RREADY = m_rready_i;
assign s_rid_i = si_rmux_mesg[0+:C_ID_WIDTH];
assign S_RMESG = si_rmux_mesg[C_ID_WIDTH+:C_RMESG_WIDTH];
assign s_rlast_i = si_rmux_mesg[C_ID_WIDTH+C_RMESG_WIDTH+:1];
assign S_RID = s_rid_i;
assign S_RLAST = s_rlast_i;
assign m_rvalid_qual = M_RVALID & M_RTARGET;
assign m_rready_i = m_rready_arb & M_RTARGET;
generate
for (gen_mi=0; gen_mi<(C_NUM_M+1); gen_mi=gen_mi+1) begin : gen_rmesg_mi
// Note: Concatenation of mesg signals is from MSB to LSB; assignments that chop mesg signals appear in opposite order.
assign mi_rmux_mesg[gen_mi*P_RMUX_MESG_WIDTH+:P_RMUX_MESG_WIDTH] = {
M_RLAST[gen_mi],
M_RMESG[gen_mi*C_RMESG_WIDTH+:C_RMESG_WIDTH],
M_RID[gen_mi*C_ID_WIDTH+:C_ID_WIDTH]
};
end // gen_rmesg_mi
if (C_ACCEPTANCE == 1) begin : gen_single_issue
wire cmd_push;
wire cmd_pop;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign m_avalid_qual_i = ~accept_cnt | cmd_pop; // Ready for arbitration if no outstanding transaction or transaction being completed
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 1'b0;
active_target_enc <= 0;
active_target_hot <= 0;
end else begin
if (cmd_push) begin
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
accept_cnt <= 1'b1;
end else if (cmd_pop) begin
accept_cnt <= 1'b0;
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_issue
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_issue
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_issue
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else if (C_SINGLE_THREAD || (P_NUM_ID_VAL==1)) begin : gen_single_thread
wire s_avalid_en;
wire cmd_push;
wire cmd_pop;
reg [C_ID_WIDTH-1:0] active_id;
reg [(C_NUM_M+1)-1:0] active_target_hot;
reg [P_NUM_M_DE_LOG-1:0] active_target_enc;
reg [4-1:0] active_region;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
reg [8-1:0] debug_r_beat_cnt_i;
wire [8-1:0] debug_r_trans_seq_i;
wire accept_limit ;
// Implement single-region-per-ID cyclic dependency avoidance method.
assign s_avalid_en = // This transaction is qualified to request arbitration if ...
(accept_cnt == 0) || // Either there are no outstanding transactions, or ...
(((P_NUM_ID_VAL==1) || (S_AID[P_THREAD_ID_WIDTH_M1-1:0] == active_id[P_THREAD_ID_WIDTH_M1-1:0])) && // the current transaction ID matches the previous, and ...
(active_target_enc == m_atarget_enc_i) && // all outstanding transactions are to the same target MI ...
(active_region == m_aregion_i)); // and to the same REGION.
assign cmd_push = M_AREADY;
assign cmd_pop = s_rvalid_i && S_RREADY && s_rlast_i; // Pop command queue if end of read burst
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~cmd_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = s_avalid_en & ~accept_limit;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
active_id <= 0;
active_target_enc <= 0;
active_target_hot <= 0;
active_region <= 0;
end else begin
if (cmd_push) begin
active_id <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target_enc <= m_atarget_enc_i;
active_target_hot <= m_atarget_hot_i;
active_region <= m_aregion_i;
if (~cmd_pop) begin
accept_cnt <= accept_cnt + 1;
end
end else begin
if (cmd_pop & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end
end // Clocked process
assign m_rready_arb = active_target_hot & {(C_NUM_M+1){S_RREADY}};
assign s_rvalid_i = |(active_target_hot & m_rvalid_qual);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_single_thread
(
.S (active_target_enc),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
if (C_DEBUG) begin : gen_debug_r_single_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i && S_RREADY) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i <= 0;
end else begin
debug_r_beat_cnt_i <= debug_r_beat_cnt_i + 1;
end
end
end else begin
debug_r_beat_cnt_i <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_single_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push),
.S_READY (),
.M_MESG (debug_r_trans_seq_i),
.M_VALID (),
.M_READY (cmd_pop)
);
end // gen_debug_r
end else begin : gen_multi_thread
wire [(P_NUM_M_DE_LOG)-1:0] resp_select;
reg [(C_ACCEPTANCE_LOG+1)-1:0] accept_cnt;
wire [P_NUM_THREADS-1:0] s_avalid_en;
wire [P_NUM_THREADS-1:0] thread_valid;
wire [P_NUM_THREADS-1:0] aid_match;
wire [P_NUM_THREADS-1:0] rid_match;
wire [P_NUM_THREADS-1:0] cmd_push;
wire [P_NUM_THREADS-1:0] cmd_pop;
wire [P_NUM_THREADS:0] accum_push;
reg [P_NUM_THREADS*C_ID_WIDTH-1:0] active_id;
reg [P_NUM_THREADS*8-1:0] active_target;
reg [P_NUM_THREADS*8-1:0] active_region;
reg [P_NUM_THREADS*8-1:0] active_cnt;
reg [P_NUM_THREADS*8-1:0] debug_r_beat_cnt_i;
wire [P_NUM_THREADS*8-1:0] debug_r_trans_seq_i;
wire any_aid_match;
wire any_rid_match;
wire accept_limit;
wire any_push;
wire any_pop;
axi_crossbar_v2_1_arbiter_resp # // Multi-thread response arbiter
(
.C_FAMILY (C_FAMILY),
.C_NUM_S (C_NUM_M+1),
.C_NUM_S_LOG (P_NUM_M_DE_LOG),
.C_GRANT_ENC (1),
.C_GRANT_HOT (0)
)
arbiter_resp_inst
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_VALID (m_rvalid_qual),
.S_READY (m_rready_arb),
.M_GRANT_HOT (),
.M_GRANT_ENC (resp_select),
.M_VALID (s_rvalid_i),
.M_READY (S_RREADY)
);
generic_baseblocks_v2_1_mux_enc #
(
.C_FAMILY (C_FAMILY),
.C_RATIO (C_NUM_M+1),
.C_SEL_WIDTH (P_NUM_M_DE_LOG),
.C_DATA_WIDTH (P_RMUX_MESG_WIDTH)
) mux_resp_multi_thread
(
.S (resp_select),
.A (mi_rmux_mesg),
.O (si_rmux_mesg),
.OE (1'b1)
);
assign any_push = M_AREADY;
assign any_pop = s_rvalid_i & S_RREADY & s_rlast_i;
assign accept_limit = (accept_cnt == C_ACCEPTANCE) & ~any_pop; // Allow next push if a transaction is currently being completed
assign m_avalid_qual_i = (&s_avalid_en) & ~accept_limit; // The current request is qualified for arbitration when it is qualified against all outstanding transaction threads.
assign any_aid_match = |aid_match;
assign any_rid_match = |rid_match;
assign accum_push[0] = 1'b0;
always @(posedge ACLK) begin
if (ARESET) begin
accept_cnt <= 0;
end else begin
if (any_push & ~any_pop) begin
accept_cnt <= accept_cnt + 1;
end else if (any_pop & ~any_push & (accept_cnt != 0)) begin
accept_cnt <= accept_cnt - 1;
end
end
end // Clocked process
for (gen_thread=0; gen_thread<P_NUM_THREADS; gen_thread=gen_thread+1) begin : gen_thread_loop
assign thread_valid[gen_thread] = (active_cnt[gen_thread*8 +: C_ACCEPTANCE_LOG+1] != 0);
assign aid_match[gen_thread] = // The currect thread is active for the requested transaction if
thread_valid[gen_thread] && // this thread slot is not vacant, and
((S_AID[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]); // the requested ID matches the active ID for this thread.
assign s_avalid_en[gen_thread] = // The current request is qualified against this thread slot if
(~aid_match[gen_thread]) || // This thread slot is not active for the requested ID, or
((m_atarget_enc_i == active_target[gen_thread*8+:P_NUM_M_DE_LOG]) && // this outstanding transaction was to the same target and
(m_aregion_i == active_region[gen_thread*8+:4])); // to the same region.
// cmd_push points to the position of either the active thread for the requested ID or the lowest vacant thread slot.
assign accum_push[gen_thread+1] = accum_push[gen_thread] | ~thread_valid[gen_thread];
assign cmd_push[gen_thread] = any_push & (aid_match[gen_thread] | ((~any_aid_match) & ~thread_valid[gen_thread] & ~accum_push[gen_thread]));
// cmd_pop points to the position of the active thread that matches the current RID.
assign rid_match[gen_thread] = thread_valid[gen_thread] & ((s_rid_i[P_THREAD_ID_WIDTH_M1-1:0]) == active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1]);
assign cmd_pop[gen_thread] = any_pop & rid_match[gen_thread];
always @(posedge ACLK) begin
if (ARESET) begin
active_id[gen_thread*C_ID_WIDTH+:C_ID_WIDTH] <= 0;
active_target[gen_thread*8+:8] <= 0;
active_region[gen_thread*8+:8] <= 0;
active_cnt[gen_thread*8+:8] <= 0;
end else begin
if (cmd_push[gen_thread]) begin
active_id[gen_thread*C_ID_WIDTH+:P_THREAD_ID_WIDTH_M1] <= S_AID[P_THREAD_ID_WIDTH_M1-1:0];
active_target[gen_thread*8+:P_NUM_M_DE_LOG] <= m_atarget_enc_i;
active_region[gen_thread*8+:4] <= m_aregion_i;
if (~cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] + 1;
end
end else if (cmd_pop[gen_thread]) begin
active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] <= active_cnt[gen_thread*8+:C_ACCEPTANCE_LOG+1] - 1;
end
end
end // Clocked process
if (C_DEBUG) begin : gen_debug_r_multi_thread
// DEBUG READ BEAT COUNTER (only meaningful for R-channel)
always @(posedge ACLK) begin
if (ARESET) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else if (C_DIR == P_READ) begin
if (s_rvalid_i & S_RREADY & rid_match[gen_thread]) begin
if (s_rlast_i) begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= debug_r_beat_cnt_i[gen_thread*8+:8] + 1;
end
end
end else begin
debug_r_beat_cnt_i[gen_thread*8+:8] <= 0;
end
end // Clocked process
// DEBUG R-CHANNEL TRANSACTION SEQUENCE FIFO
axi_data_fifo_v2_1_axic_srl_fifo #
(
.C_FAMILY (C_FAMILY),
.C_FIFO_WIDTH (8),
.C_FIFO_DEPTH_LOG (C_ACCEPTANCE_LOG+1),
.C_USE_FULL (0)
)
debug_r_seq_fifo_multi_thread
(
.ACLK (ACLK),
.ARESET (ARESET),
.S_MESG (DEBUG_A_TRANS_SEQ),
.S_VALID (cmd_push[gen_thread]),
.S_READY (),
.M_MESG (debug_r_trans_seq_i[gen_thread*8+:8]),
.M_VALID (),
.M_READY (cmd_pop[gen_thread])
);
end // gen_debug_r_multi_thread
end // Next gen_thread_loop
end // thread control
endgenerate
endmodule
|
module processing_system7_bfm_v2_0_5_interconnect_model (
rstn,
sw_clk,
w_qos_gp0,
w_qos_gp1,
w_qos_hp0,
w_qos_hp1,
w_qos_hp2,
w_qos_hp3,
r_qos_gp0,
r_qos_gp1,
r_qos_hp0,
r_qos_hp1,
r_qos_hp2,
r_qos_hp3,
wr_ack_ddr_gp0,
wr_ack_ocm_gp0,
wr_data_gp0,
wr_addr_gp0,
wr_bytes_gp0,
wr_dv_ddr_gp0,
wr_dv_ocm_gp0,
rd_req_ddr_gp0,
rd_req_ocm_gp0,
rd_req_reg_gp0,
rd_addr_gp0,
rd_bytes_gp0,
rd_data_ddr_gp0,
rd_data_ocm_gp0,
rd_data_reg_gp0,
rd_dv_ddr_gp0,
rd_dv_ocm_gp0,
rd_dv_reg_gp0,
wr_ack_ddr_gp1,
wr_ack_ocm_gp1,
wr_data_gp1,
wr_addr_gp1,
wr_bytes_gp1,
wr_dv_ddr_gp1,
wr_dv_ocm_gp1,
rd_req_ddr_gp1,
rd_req_ocm_gp1,
rd_req_reg_gp1,
rd_addr_gp1,
rd_bytes_gp1,
rd_data_ddr_gp1,
rd_data_ocm_gp1,
rd_data_reg_gp1,
rd_dv_ddr_gp1,
rd_dv_ocm_gp1,
rd_dv_reg_gp1,
wr_ack_ddr_hp0,
wr_ack_ocm_hp0,
wr_data_hp0,
wr_addr_hp0,
wr_bytes_hp0,
wr_dv_ddr_hp0,
wr_dv_ocm_hp0,
rd_req_ddr_hp0,
rd_req_ocm_hp0,
rd_addr_hp0,
rd_bytes_hp0,
rd_data_ddr_hp0,
rd_data_ocm_hp0,
rd_dv_ddr_hp0,
rd_dv_ocm_hp0,
wr_ack_ddr_hp1,
wr_ack_ocm_hp1,
wr_data_hp1,
wr_addr_hp1,
wr_bytes_hp1,
wr_dv_ddr_hp1,
wr_dv_ocm_hp1,
rd_req_ddr_hp1,
rd_req_ocm_hp1,
rd_addr_hp1,
rd_bytes_hp1,
rd_data_ddr_hp1,
rd_data_ocm_hp1,
rd_dv_ddr_hp1,
rd_dv_ocm_hp1,
wr_ack_ddr_hp2,
wr_ack_ocm_hp2,
wr_data_hp2,
wr_addr_hp2,
wr_bytes_hp2,
wr_dv_ddr_hp2,
wr_dv_ocm_hp2,
rd_req_ddr_hp2,
rd_req_ocm_hp2,
rd_addr_hp2,
rd_bytes_hp2,
rd_data_ddr_hp2,
rd_data_ocm_hp2,
rd_dv_ddr_hp2,
rd_dv_ocm_hp2,
wr_ack_ddr_hp3,
wr_ack_ocm_hp3,
wr_data_hp3,
wr_addr_hp3,
wr_bytes_hp3,
wr_dv_ddr_hp3,
wr_dv_ocm_hp3,
rd_req_ddr_hp3,
rd_req_ocm_hp3,
rd_addr_hp3,
rd_bytes_hp3,
rd_data_ddr_hp3,
rd_data_ocm_hp3,
rd_dv_ddr_hp3,
rd_dv_ocm_hp3,
/* Goes to port 1 of DDR */
ddr_wr_ack_port1,
ddr_wr_dv_port1,
ddr_rd_req_port1,
ddr_rd_dv_port1,
ddr_wr_addr_port1,
ddr_wr_data_port1,
ddr_wr_bytes_port1,
ddr_rd_addr_port1,
ddr_rd_data_port1,
ddr_rd_bytes_port1,
ddr_wr_qos_port1,
ddr_rd_qos_port1,
/* Goes to port2 of DDR */
ddr_wr_ack_port2,
ddr_wr_dv_port2,
ddr_rd_req_port2,
ddr_rd_dv_port2,
ddr_wr_addr_port2,
ddr_wr_data_port2,
ddr_wr_bytes_port2,
ddr_rd_addr_port2,
ddr_rd_data_port2,
ddr_rd_bytes_port2,
ddr_wr_qos_port2,
ddr_rd_qos_port2,
/* Goes to port3 of DDR */
ddr_wr_ack_port3,
ddr_wr_dv_port3,
ddr_rd_req_port3,
ddr_rd_dv_port3,
ddr_wr_addr_port3,
ddr_wr_data_port3,
ddr_wr_bytes_port3,
ddr_rd_addr_port3,
ddr_rd_data_port3,
ddr_rd_bytes_port3,
ddr_wr_qos_port3,
ddr_rd_qos_port3,
/* Goes to port1 of OCM */
ocm_wr_qos_port1,
ocm_rd_qos_port1,
ocm_wr_dv_port1,
ocm_wr_data_port1,
ocm_wr_addr_port1,
ocm_wr_bytes_port1,
ocm_wr_ack_port1,
ocm_rd_req_port1,
ocm_rd_data_port1,
ocm_rd_addr_port1,
ocm_rd_bytes_port1,
ocm_rd_dv_port1,
/* Goes to port1 for RegMap */
reg_rd_qos_port1,
reg_rd_req_port1,
reg_rd_data_port1,
reg_rd_addr_port1,
reg_rd_bytes_port1,
reg_rd_dv_port1
);
`include "processing_system7_bfm_v2_0_5_local_params.v"
input rstn;
input sw_clk;
input [axi_qos_width-1:0] w_qos_gp0;
input [axi_qos_width-1:0] w_qos_gp1;
input [axi_qos_width-1:0] w_qos_hp0;
input [axi_qos_width-1:0] w_qos_hp1;
input [axi_qos_width-1:0] w_qos_hp2;
input [axi_qos_width-1:0] w_qos_hp3;
input [axi_qos_width-1:0] r_qos_gp0;
input [axi_qos_width-1:0] r_qos_gp1;
input [axi_qos_width-1:0] r_qos_hp0;
input [axi_qos_width-1:0] r_qos_hp1;
input [axi_qos_width-1:0] r_qos_hp2;
input [axi_qos_width-1:0] r_qos_hp3;
output [axi_qos_width-1:0] ocm_wr_qos_port1;
output [axi_qos_width-1:0] ocm_rd_qos_port1;
output wr_ack_ddr_gp0;
output wr_ack_ocm_gp0;
input[max_burst_bits-1:0] wr_data_gp0;
input[addr_width-1:0] wr_addr_gp0;
input[max_burst_bytes_width:0] wr_bytes_gp0;
input wr_dv_ddr_gp0;
input wr_dv_ocm_gp0;
input rd_req_ddr_gp0;
input rd_req_ocm_gp0;
input rd_req_reg_gp0;
input[addr_width-1:0] rd_addr_gp0;
input[max_burst_bytes_width:0] rd_bytes_gp0;
output[max_burst_bits-1:0] rd_data_ddr_gp0;
output[max_burst_bits-1:0] rd_data_ocm_gp0;
output[max_burst_bits-1:0] rd_data_reg_gp0;
output rd_dv_ddr_gp0;
output rd_dv_ocm_gp0;
output rd_dv_reg_gp0;
output wr_ack_ddr_gp1;
output wr_ack_ocm_gp1;
input[max_burst_bits-1:0] wr_data_gp1;
input[addr_width-1:0] wr_addr_gp1;
input[max_burst_bytes_width:0] wr_bytes_gp1;
input wr_dv_ddr_gp1;
input wr_dv_ocm_gp1;
input rd_req_ddr_gp1;
input rd_req_ocm_gp1;
input rd_req_reg_gp1;
input[addr_width-1:0] rd_addr_gp1;
input[max_burst_bytes_width:0] rd_bytes_gp1;
output[max_burst_bits-1:0] rd_data_ddr_gp1;
output[max_burst_bits-1:0] rd_data_ocm_gp1;
output[max_burst_bits-1:0] rd_data_reg_gp1;
output rd_dv_ddr_gp1;
output rd_dv_ocm_gp1;
output rd_dv_reg_gp1;
output wr_ack_ddr_hp0;
output wr_ack_ocm_hp0;
input[max_burst_bits-1:0] wr_data_hp0;
input[addr_width-1:0] wr_addr_hp0;
input[max_burst_bytes_width:0] wr_bytes_hp0;
input wr_dv_ddr_hp0;
input wr_dv_ocm_hp0;
input rd_req_ddr_hp0;
input rd_req_ocm_hp0;
input[addr_width-1:0] rd_addr_hp0;
input[max_burst_bytes_width:0] rd_bytes_hp0;
output[max_burst_bits-1:0] rd_data_ddr_hp0;
output[max_burst_bits-1:0] rd_data_ocm_hp0;
output rd_dv_ddr_hp0;
output rd_dv_ocm_hp0;
output wr_ack_ddr_hp1;
output wr_ack_ocm_hp1;
input[max_burst_bits-1:0] wr_data_hp1;
input[addr_width-1:0] wr_addr_hp1;
input[max_burst_bytes_width:0] wr_bytes_hp1;
input wr_dv_ddr_hp1;
input wr_dv_ocm_hp1;
input rd_req_ddr_hp1;
input rd_req_ocm_hp1;
input[addr_width-1:0] rd_addr_hp1;
input[max_burst_bytes_width:0] rd_bytes_hp1;
output[max_burst_bits-1:0] rd_data_ddr_hp1;
output[max_burst_bits-1:0] rd_data_ocm_hp1;
output rd_dv_ddr_hp1;
output rd_dv_ocm_hp1;
output wr_ack_ddr_hp2;
output wr_ack_ocm_hp2;
input[max_burst_bits-1:0] wr_data_hp2;
input[addr_width-1:0] wr_addr_hp2;
input[max_burst_bytes_width:0] wr_bytes_hp2;
input wr_dv_ddr_hp2;
input wr_dv_ocm_hp2;
input rd_req_ddr_hp2;
input rd_req_ocm_hp2;
input[addr_width-1:0] rd_addr_hp2;
input[max_burst_bytes_width:0] rd_bytes_hp2;
output[max_burst_bits-1:0] rd_data_ddr_hp2;
output[max_burst_bits-1:0] rd_data_ocm_hp2;
output rd_dv_ddr_hp2;
output rd_dv_ocm_hp2;
output wr_ack_ddr_hp3;
output wr_ack_ocm_hp3;
input[max_burst_bits-1:0] wr_data_hp3;
input[addr_width-1:0] wr_addr_hp3;
input[max_burst_bytes_width:0] wr_bytes_hp3;
input wr_dv_ddr_hp3;
input wr_dv_ocm_hp3;
input rd_req_ddr_hp3;
input rd_req_ocm_hp3;
input[addr_width-1:0] rd_addr_hp3;
input[max_burst_bytes_width:0] rd_bytes_hp3;
output[max_burst_bits-1:0] rd_data_ddr_hp3;
output[max_burst_bits-1:0] rd_data_ocm_hp3;
output rd_dv_ddr_hp3;
output rd_dv_ocm_hp3;
/* Goes to port 1 of DDR */
input ddr_wr_ack_port1;
output ddr_wr_dv_port1;
output ddr_rd_req_port1;
input ddr_rd_dv_port1;
output[addr_width-1:0] ddr_wr_addr_port1;
output[max_burst_bits-1:0] ddr_wr_data_port1;
output[max_burst_bytes_width:0] ddr_wr_bytes_port1;
output[addr_width-1:0] ddr_rd_addr_port1;
input[max_burst_bits-1:0] ddr_rd_data_port1;
output[max_burst_bytes_width:0] ddr_rd_bytes_port1;
output [axi_qos_width-1:0] ddr_wr_qos_port1;
output [axi_qos_width-1:0] ddr_rd_qos_port1;
/* Goes to port2 of DDR */
input ddr_wr_ack_port2;
output ddr_wr_dv_port2;
output ddr_rd_req_port2;
input ddr_rd_dv_port2;
output[addr_width-1:0] ddr_wr_addr_port2;
output[max_burst_bits-1:0] ddr_wr_data_port2;
output[max_burst_bytes_width:0] ddr_wr_bytes_port2;
output[addr_width-1:0] ddr_rd_addr_port2;
input[max_burst_bits-1:0] ddr_rd_data_port2;
output[max_burst_bytes_width:0] ddr_rd_bytes_port2;
output [axi_qos_width-1:0] ddr_wr_qos_port2;
output [axi_qos_width-1:0] ddr_rd_qos_port2;
/* Goes to port3 of DDR */
input ddr_wr_ack_port3;
output ddr_wr_dv_port3;
output ddr_rd_req_port3;
input ddr_rd_dv_port3;
output[addr_width-1:0] ddr_wr_addr_port3;
output[max_burst_bits-1:0] ddr_wr_data_port3;
output[max_burst_bytes_width:0] ddr_wr_bytes_port3;
output[addr_width-1:0] ddr_rd_addr_port3;
input[max_burst_bits-1:0] ddr_rd_data_port3;
output[max_burst_bytes_width:0] ddr_rd_bytes_port3;
output [axi_qos_width-1:0] ddr_wr_qos_port3;
output [axi_qos_width-1:0] ddr_rd_qos_port3;
/* Goes to port1 of OCM */
input ocm_wr_ack_port1;
output ocm_wr_dv_port1;
output ocm_rd_req_port1;
input ocm_rd_dv_port1;
output[max_burst_bits-1:0] ocm_wr_data_port1;
output[addr_width-1:0] ocm_wr_addr_port1;
output[max_burst_bytes_width:0] ocm_wr_bytes_port1;
input[max_burst_bits-1:0] ocm_rd_data_port1;
output[addr_width-1:0] ocm_rd_addr_port1;
output[max_burst_bytes_width:0] ocm_rd_bytes_port1;
/* Goes to port1 of REG */
output [axi_qos_width-1:0] reg_rd_qos_port1;
output reg_rd_req_port1;
input reg_rd_dv_port1;
input[max_burst_bits-1:0] reg_rd_data_port1;
output[addr_width-1:0] reg_rd_addr_port1;
output[max_burst_bytes_width:0] reg_rd_bytes_port1;
wire ocm_wr_dv_osw0;
wire ocm_wr_dv_osw1;
wire[max_burst_bits-1:0] ocm_wr_data_osw0;
wire[max_burst_bits-1:0] ocm_wr_data_osw1;
wire[addr_width-1:0] ocm_wr_addr_osw0;
wire[addr_width-1:0] ocm_wr_addr_osw1;
wire[max_burst_bytes_width:0] ocm_wr_bytes_osw0;
wire[max_burst_bytes_width:0] ocm_wr_bytes_osw1;
wire ocm_wr_ack_osw0;
wire ocm_wr_ack_osw1;
wire ocm_rd_req_osw0;
wire ocm_rd_req_osw1;
wire[max_burst_bits-1:0] ocm_rd_data_osw0;
wire[max_burst_bits-1:0] ocm_rd_data_osw1;
wire[addr_width-1:0] ocm_rd_addr_osw0;
wire[addr_width-1:0] ocm_rd_addr_osw1;
wire[max_burst_bytes_width:0] ocm_rd_bytes_osw0;
wire[max_burst_bytes_width:0] ocm_rd_bytes_osw1;
wire ocm_rd_dv_osw0;
wire ocm_rd_dv_osw1;
wire [axi_qos_width-1:0] ocm_wr_qos_osw0;
wire [axi_qos_width-1:0] ocm_wr_qos_osw1;
wire [axi_qos_width-1:0] ocm_rd_qos_osw0;
wire [axi_qos_width-1:0] ocm_rd_qos_osw1;
processing_system7_bfm_v2_0_5_fmsw_gp fmsw (
.sw_clk(sw_clk),
.rstn(rstn),
.w_qos_gp0(w_qos_gp0),
.r_qos_gp0(r_qos_gp0),
.wr_ack_ocm_gp0(wr_ack_ocm_gp0),
.wr_ack_ddr_gp0(wr_ack_ddr_gp0),
.wr_data_gp0(wr_data_gp0),
.wr_addr_gp0(wr_addr_gp0),
.wr_bytes_gp0(wr_bytes_gp0),
.wr_dv_ocm_gp0(wr_dv_ocm_gp0),
.wr_dv_ddr_gp0(wr_dv_ddr_gp0),
.rd_req_ocm_gp0(rd_req_ocm_gp0),
.rd_req_ddr_gp0(rd_req_ddr_gp0),
.rd_req_reg_gp0(rd_req_reg_gp0),
.rd_addr_gp0(rd_addr_gp0),
.rd_bytes_gp0(rd_bytes_gp0),
.rd_data_ddr_gp0(rd_data_ddr_gp0),
.rd_data_ocm_gp0(rd_data_ocm_gp0),
.rd_data_reg_gp0(rd_data_reg_gp0),
.rd_dv_ocm_gp0(rd_dv_ocm_gp0),
.rd_dv_ddr_gp0(rd_dv_ddr_gp0),
.rd_dv_reg_gp0(rd_dv_reg_gp0),
.w_qos_gp1(w_qos_gp1),
.r_qos_gp1(r_qos_gp1),
.wr_ack_ocm_gp1(wr_ack_ocm_gp1),
.wr_ack_ddr_gp1(wr_ack_ddr_gp1),
.wr_data_gp1(wr_data_gp1),
.wr_addr_gp1(wr_addr_gp1),
.wr_bytes_gp1(wr_bytes_gp1),
.wr_dv_ocm_gp1(wr_dv_ocm_gp1),
.wr_dv_ddr_gp1(wr_dv_ddr_gp1),
.rd_req_ocm_gp1(rd_req_ocm_gp1),
.rd_req_ddr_gp1(rd_req_ddr_gp1),
.rd_req_reg_gp1(rd_req_reg_gp1),
.rd_addr_gp1(rd_addr_gp1),
.rd_bytes_gp1(rd_bytes_gp1),
.rd_data_ddr_gp1(rd_data_ddr_gp1),
.rd_data_ocm_gp1(rd_data_ocm_gp1),
.rd_data_reg_gp1(rd_data_reg_gp1),
.rd_dv_ocm_gp1(rd_dv_ocm_gp1),
.rd_dv_ddr_gp1(rd_dv_ddr_gp1),
.rd_dv_reg_gp1(rd_dv_reg_gp1),
.ocm_wr_ack (ocm_wr_ack_osw0),
.ocm_wr_dv (ocm_wr_dv_osw0),
.ocm_rd_req (ocm_rd_req_osw0),
.ocm_rd_dv (ocm_rd_dv_osw0),
.ocm_wr_addr(ocm_wr_addr_osw0),
.ocm_wr_data(ocm_wr_data_osw0),
.ocm_wr_bytes(ocm_wr_bytes_osw0),
.ocm_rd_addr(ocm_rd_addr_osw0),
.ocm_rd_data(ocm_rd_data_osw0),
.ocm_rd_bytes(ocm_rd_bytes_osw0),
.ocm_wr_qos(ocm_wr_qos_osw0),
.ocm_rd_qos(ocm_rd_qos_osw0),
.ddr_wr_qos(ddr_wr_qos_port1),
.ddr_rd_qos(ddr_rd_qos_port1),
.reg_rd_qos(reg_rd_qos_port1),
.ddr_wr_ack(ddr_wr_ack_port1),
.ddr_wr_dv(ddr_wr_dv_port1),
.ddr_rd_req(ddr_rd_req_port1),
.ddr_rd_dv(ddr_rd_dv_port1),
.ddr_wr_addr(ddr_wr_addr_port1),
.ddr_wr_data(ddr_wr_data_port1),
.ddr_wr_bytes(ddr_wr_bytes_port1),
.ddr_rd_addr(ddr_rd_addr_port1),
.ddr_rd_data(ddr_rd_data_port1),
.ddr_rd_bytes(ddr_rd_bytes_port1),
.reg_rd_req(reg_rd_req_port1),
.reg_rd_dv(reg_rd_dv_port1),
.reg_rd_addr(reg_rd_addr_port1),
.reg_rd_data(reg_rd_data_port1),
.reg_rd_bytes(reg_rd_bytes_port1)
);
processing_system7_bfm_v2_0_5_ssw_hp ssw(
.sw_clk(sw_clk),
.rstn(rstn),
.w_qos_hp0(w_qos_hp0),
.r_qos_hp0(r_qos_hp0),
.w_qos_hp1(w_qos_hp1),
.r_qos_hp1(r_qos_hp1),
.w_qos_hp2(w_qos_hp2),
.r_qos_hp2(r_qos_hp2),
.w_qos_hp3(w_qos_hp3),
.r_qos_hp3(r_qos_hp3),
.wr_ack_ddr_hp0(wr_ack_ddr_hp0),
.wr_data_hp0(wr_data_hp0),
.wr_addr_hp0(wr_addr_hp0),
.wr_bytes_hp0(wr_bytes_hp0),
.wr_dv_ddr_hp0(wr_dv_ddr_hp0),
.rd_req_ddr_hp0(rd_req_ddr_hp0),
.rd_addr_hp0(rd_addr_hp0),
.rd_bytes_hp0(rd_bytes_hp0),
.rd_data_ddr_hp0(rd_data_ddr_hp0),
.rd_data_ocm_hp0(rd_data_ocm_hp0),
.rd_dv_ddr_hp0(rd_dv_ddr_hp0),
.wr_ack_ocm_hp0(wr_ack_ocm_hp0),
.wr_dv_ocm_hp0(wr_dv_ocm_hp0),
.rd_req_ocm_hp0(rd_req_ocm_hp0),
.rd_dv_ocm_hp0(rd_dv_ocm_hp0),
.wr_ack_ddr_hp1(wr_ack_ddr_hp1),
.wr_data_hp1(wr_data_hp1),
.wr_addr_hp1(wr_addr_hp1),
.wr_bytes_hp1(wr_bytes_hp1),
.wr_dv_ddr_hp1(wr_dv_ddr_hp1),
.rd_req_ddr_hp1(rd_req_ddr_hp1),
.rd_addr_hp1(rd_addr_hp1),
.rd_bytes_hp1(rd_bytes_hp1),
.rd_data_ddr_hp1(rd_data_ddr_hp1),
.rd_data_ocm_hp1(rd_data_ocm_hp1),
.rd_dv_ddr_hp1(rd_dv_ddr_hp1),
.wr_ack_ocm_hp1(wr_ack_ocm_hp1),
.wr_dv_ocm_hp1(wr_dv_ocm_hp1),
.rd_req_ocm_hp1(rd_req_ocm_hp1),
.rd_dv_ocm_hp1(rd_dv_ocm_hp1),
.wr_ack_ddr_hp2(wr_ack_ddr_hp2),
.wr_data_hp2(wr_data_hp2),
.wr_addr_hp2(wr_addr_hp2),
.wr_bytes_hp2(wr_bytes_hp2),
.wr_dv_ddr_hp2(wr_dv_ddr_hp2),
.rd_req_ddr_hp2(rd_req_ddr_hp2),
.rd_addr_hp2(rd_addr_hp2),
.rd_bytes_hp2(rd_bytes_hp2),
.rd_data_ddr_hp2(rd_data_ddr_hp2),
.rd_data_ocm_hp2(rd_data_ocm_hp2),
.rd_dv_ddr_hp2(rd_dv_ddr_hp2),
.wr_ack_ocm_hp2(wr_ack_ocm_hp2),
.wr_dv_ocm_hp2(wr_dv_ocm_hp2),
.rd_req_ocm_hp2(rd_req_ocm_hp2),
.rd_dv_ocm_hp2(rd_dv_ocm_hp2),
.wr_ack_ddr_hp3(wr_ack_ddr_hp3),
.wr_data_hp3(wr_data_hp3),
.wr_addr_hp3(wr_addr_hp3),
.wr_bytes_hp3(wr_bytes_hp3),
.wr_dv_ddr_hp3(wr_dv_ddr_hp3),
.rd_req_ddr_hp3(rd_req_ddr_hp3),
.rd_addr_hp3(rd_addr_hp3),
.rd_bytes_hp3(rd_bytes_hp3),
.rd_data_ddr_hp3(rd_data_ddr_hp3),
.rd_data_ocm_hp3(rd_data_ocm_hp3),
.rd_dv_ddr_hp3(rd_dv_ddr_hp3),
.wr_ack_ocm_hp3(wr_ack_ocm_hp3),
.wr_dv_ocm_hp3(wr_dv_ocm_hp3),
.rd_req_ocm_hp3(rd_req_ocm_hp3),
.rd_dv_ocm_hp3(rd_dv_ocm_hp3),
.ddr_wr_ack0(ddr_wr_ack_port2),
.ddr_wr_dv0(ddr_wr_dv_port2),
.ddr_rd_req0(ddr_rd_req_port2),
.ddr_rd_dv0(ddr_rd_dv_port2),
.ddr_wr_addr0(ddr_wr_addr_port2),
.ddr_wr_data0(ddr_wr_data_port2),
.ddr_wr_bytes0(ddr_wr_bytes_port2),
.ddr_rd_addr0(ddr_rd_addr_port2),
.ddr_rd_data0(ddr_rd_data_port2),
.ddr_rd_bytes0(ddr_rd_bytes_port2),
.ddr_wr_qos0(ddr_wr_qos_port2),
.ddr_rd_qos0(ddr_rd_qos_port2),
.ddr_wr_ack1(ddr_wr_ack_port3),
.ddr_wr_dv1(ddr_wr_dv_port3),
.ddr_rd_req1(ddr_rd_req_port3),
.ddr_rd_dv1(ddr_rd_dv_port3),
.ddr_wr_addr1(ddr_wr_addr_port3),
.ddr_wr_data1(ddr_wr_data_port3),
.ddr_wr_bytes1(ddr_wr_bytes_port3),
.ddr_rd_addr1(ddr_rd_addr_port3),
.ddr_rd_data1(ddr_rd_data_port3),
.ddr_rd_bytes1(ddr_rd_bytes_port3),
.ddr_wr_qos1(ddr_wr_qos_port3),
.ddr_rd_qos1(ddr_rd_qos_port3),
.ocm_wr_qos(ocm_wr_qos_osw1),
.ocm_rd_qos(ocm_rd_qos_osw1),
.ocm_wr_ack (ocm_wr_ack_osw1),
.ocm_wr_dv (ocm_wr_dv_osw1),
.ocm_rd_req (ocm_rd_req_osw1),
.ocm_rd_dv (ocm_rd_dv_osw1),
.ocm_wr_addr(ocm_wr_addr_osw1),
.ocm_wr_data(ocm_wr_data_osw1),
.ocm_wr_bytes(ocm_wr_bytes_osw1),
.ocm_rd_addr(ocm_rd_addr_osw1),
.ocm_rd_data(ocm_rd_data_osw1),
.ocm_rd_bytes(ocm_rd_bytes_osw1)
);
processing_system7_bfm_v2_0_5_arb_wr osw_wr (
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(ocm_wr_qos_osw0), /// chk
.qos2(ocm_wr_qos_osw1), /// chk
.prt_dv1(ocm_wr_dv_osw0),
.prt_dv2(ocm_wr_dv_osw1),
.prt_data1(ocm_wr_data_osw0),
.prt_data2(ocm_wr_data_osw1),
.prt_addr1(ocm_wr_addr_osw0),
.prt_addr2(ocm_wr_addr_osw1),
.prt_bytes1(ocm_wr_bytes_osw0),
.prt_bytes2(ocm_wr_bytes_osw1),
.prt_ack1(ocm_wr_ack_osw0),
.prt_ack2(ocm_wr_ack_osw1),
.prt_req(ocm_wr_dv_port1),
.prt_qos(ocm_wr_qos_port1),
.prt_data(ocm_wr_data_port1),
.prt_addr(ocm_wr_addr_port1),
.prt_bytes(ocm_wr_bytes_port1),
.prt_ack(ocm_wr_ack_port1)
);
processing_system7_bfm_v2_0_5_arb_rd osw_rd(
.rstn(rstn),
.sw_clk(sw_clk),
.qos1(ocm_rd_qos_osw0), // chk
.qos2(ocm_rd_qos_osw1), // chk
.prt_req1(ocm_rd_req_osw0),
.prt_req2(ocm_rd_req_osw1),
.prt_data1(ocm_rd_data_osw0),
.prt_data2(ocm_rd_data_osw1),
.prt_addr1(ocm_rd_addr_osw0),
.prt_addr2(ocm_rd_addr_osw1),
.prt_bytes1(ocm_rd_bytes_osw0),
.prt_bytes2(ocm_rd_bytes_osw1),
.prt_dv1(ocm_rd_dv_osw0),
.prt_dv2(ocm_rd_dv_osw1),
.prt_req(ocm_rd_req_port1),
.prt_qos(ocm_rd_qos_port1),
.prt_data(ocm_rd_data_port1),
.prt_addr(ocm_rd_addr_port1),
.prt_bytes(ocm_rd_bytes_port1),
.prt_dv(ocm_rd_dv_port1)
);
endmodule
|
module processing_system7_bfm_v2_0_5_gen_reset(
por_rst_n,
sys_rst_n,
rst_out_n,
m_axi_gp0_clk,
m_axi_gp1_clk,
s_axi_gp0_clk,
s_axi_gp1_clk,
s_axi_hp0_clk,
s_axi_hp1_clk,
s_axi_hp2_clk,
s_axi_hp3_clk,
s_axi_acp_clk,
m_axi_gp0_rstn,
m_axi_gp1_rstn,
s_axi_gp0_rstn,
s_axi_gp1_rstn,
s_axi_hp0_rstn,
s_axi_hp1_rstn,
s_axi_hp2_rstn,
s_axi_hp3_rstn,
s_axi_acp_rstn,
fclk_reset3_n,
fclk_reset2_n,
fclk_reset1_n,
fclk_reset0_n,
fpga_acp_reset_n,
fpga_gp_m0_reset_n,
fpga_gp_m1_reset_n,
fpga_gp_s0_reset_n,
fpga_gp_s1_reset_n,
fpga_hp_s0_reset_n,
fpga_hp_s1_reset_n,
fpga_hp_s2_reset_n,
fpga_hp_s3_reset_n
);
input por_rst_n;
input sys_rst_n;
input m_axi_gp0_clk;
input m_axi_gp1_clk;
input s_axi_gp0_clk;
input s_axi_gp1_clk;
input s_axi_hp0_clk;
input s_axi_hp1_clk;
input s_axi_hp2_clk;
input s_axi_hp3_clk;
input s_axi_acp_clk;
output reg m_axi_gp0_rstn;
output reg m_axi_gp1_rstn;
output reg s_axi_gp0_rstn;
output reg s_axi_gp1_rstn;
output reg s_axi_hp0_rstn;
output reg s_axi_hp1_rstn;
output reg s_axi_hp2_rstn;
output reg s_axi_hp3_rstn;
output reg s_axi_acp_rstn;
output rst_out_n;
output fclk_reset3_n;
output fclk_reset2_n;
output fclk_reset1_n;
output fclk_reset0_n;
output fpga_acp_reset_n;
output fpga_gp_m0_reset_n;
output fpga_gp_m1_reset_n;
output fpga_gp_s0_reset_n;
output fpga_gp_s1_reset_n;
output fpga_hp_s0_reset_n;
output fpga_hp_s1_reset_n;
output fpga_hp_s2_reset_n;
output fpga_hp_s3_reset_n;
reg [31:0] fabric_rst_n;
reg r_m_axi_gp0_rstn;
reg r_m_axi_gp1_rstn;
reg r_s_axi_gp0_rstn;
reg r_s_axi_gp1_rstn;
reg r_s_axi_hp0_rstn;
reg r_s_axi_hp1_rstn;
reg r_s_axi_hp2_rstn;
reg r_s_axi_hp3_rstn;
reg r_s_axi_acp_rstn;
assign rst_out_n = por_rst_n & sys_rst_n;
assign fclk_reset0_n = !fabric_rst_n[0];
assign fclk_reset1_n = !fabric_rst_n[1];
assign fclk_reset2_n = !fabric_rst_n[2];
assign fclk_reset3_n = !fabric_rst_n[3];
assign fpga_acp_reset_n = !fabric_rst_n[24];
assign fpga_hp_s3_reset_n = !fabric_rst_n[23];
assign fpga_hp_s2_reset_n = !fabric_rst_n[22];
assign fpga_hp_s1_reset_n = !fabric_rst_n[21];
assign fpga_hp_s0_reset_n = !fabric_rst_n[20];
assign fpga_gp_s1_reset_n = !fabric_rst_n[17];
assign fpga_gp_s0_reset_n = !fabric_rst_n[16];
assign fpga_gp_m1_reset_n = !fabric_rst_n[13];
assign fpga_gp_m0_reset_n = !fabric_rst_n[12];
task fpga_soft_reset;
input[31:0] reset_ctrl;
begin
fabric_rst_n[0] = reset_ctrl[0];
fabric_rst_n[1] = reset_ctrl[1];
fabric_rst_n[2] = reset_ctrl[2];
fabric_rst_n[3] = reset_ctrl[3];
fabric_rst_n[12] = reset_ctrl[12];
fabric_rst_n[13] = reset_ctrl[13];
fabric_rst_n[16] = reset_ctrl[16];
fabric_rst_n[17] = reset_ctrl[17];
fabric_rst_n[20] = reset_ctrl[20];
fabric_rst_n[21] = reset_ctrl[21];
fabric_rst_n[22] = reset_ctrl[22];
fabric_rst_n[23] = reset_ctrl[23];
fabric_rst_n[24] = reset_ctrl[24];
end
endtask
always@(negedge por_rst_n or negedge sys_rst_n) fabric_rst_n = 32'h01f3_300f;
always@(posedge m_axi_gp0_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
m_axi_gp0_rstn = 1'b0;
else
m_axi_gp0_rstn = 1'b1;
end
always@(posedge m_axi_gp1_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
m_axi_gp1_rstn = 1'b0;
else
m_axi_gp1_rstn = 1'b1;
end
always@(posedge s_axi_gp0_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_gp0_rstn = 1'b0;
else
s_axi_gp0_rstn = 1'b1;
end
always@(posedge s_axi_gp1_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_gp1_rstn = 1'b0;
else
s_axi_gp1_rstn = 1'b1;
end
always@(posedge s_axi_hp0_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp0_rstn = 1'b0;
else
s_axi_hp0_rstn = 1'b1;
end
always@(posedge s_axi_hp1_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp1_rstn = 1'b0;
else
s_axi_hp1_rstn = 1'b1;
end
always@(posedge s_axi_hp2_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp2_rstn = 1'b0;
else
s_axi_hp2_rstn = 1'b1;
end
always@(posedge s_axi_hp3_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp3_rstn = 1'b0;
else
s_axi_hp3_rstn = 1'b1;
end
always@(posedge s_axi_acp_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_acp_rstn = 1'b0;
else
s_axi_acp_rstn = 1'b1;
end
always@(*) begin
if ((por_rst_n!= 1'b0) && (por_rst_n!= 1'b1) && (sys_rst_n != 1'b0) && (sys_rst_n != 1'b1)) begin
$display(" Error:processing_system7_bfm_v2_0_5_gen_reset. PS_PORB and PS_SRSTB must be driven to known state");
$finish();
end
end
endmodule
|
module processing_system7_bfm_v2_0_5_gen_reset(
por_rst_n,
sys_rst_n,
rst_out_n,
m_axi_gp0_clk,
m_axi_gp1_clk,
s_axi_gp0_clk,
s_axi_gp1_clk,
s_axi_hp0_clk,
s_axi_hp1_clk,
s_axi_hp2_clk,
s_axi_hp3_clk,
s_axi_acp_clk,
m_axi_gp0_rstn,
m_axi_gp1_rstn,
s_axi_gp0_rstn,
s_axi_gp1_rstn,
s_axi_hp0_rstn,
s_axi_hp1_rstn,
s_axi_hp2_rstn,
s_axi_hp3_rstn,
s_axi_acp_rstn,
fclk_reset3_n,
fclk_reset2_n,
fclk_reset1_n,
fclk_reset0_n,
fpga_acp_reset_n,
fpga_gp_m0_reset_n,
fpga_gp_m1_reset_n,
fpga_gp_s0_reset_n,
fpga_gp_s1_reset_n,
fpga_hp_s0_reset_n,
fpga_hp_s1_reset_n,
fpga_hp_s2_reset_n,
fpga_hp_s3_reset_n
);
input por_rst_n;
input sys_rst_n;
input m_axi_gp0_clk;
input m_axi_gp1_clk;
input s_axi_gp0_clk;
input s_axi_gp1_clk;
input s_axi_hp0_clk;
input s_axi_hp1_clk;
input s_axi_hp2_clk;
input s_axi_hp3_clk;
input s_axi_acp_clk;
output reg m_axi_gp0_rstn;
output reg m_axi_gp1_rstn;
output reg s_axi_gp0_rstn;
output reg s_axi_gp1_rstn;
output reg s_axi_hp0_rstn;
output reg s_axi_hp1_rstn;
output reg s_axi_hp2_rstn;
output reg s_axi_hp3_rstn;
output reg s_axi_acp_rstn;
output rst_out_n;
output fclk_reset3_n;
output fclk_reset2_n;
output fclk_reset1_n;
output fclk_reset0_n;
output fpga_acp_reset_n;
output fpga_gp_m0_reset_n;
output fpga_gp_m1_reset_n;
output fpga_gp_s0_reset_n;
output fpga_gp_s1_reset_n;
output fpga_hp_s0_reset_n;
output fpga_hp_s1_reset_n;
output fpga_hp_s2_reset_n;
output fpga_hp_s3_reset_n;
reg [31:0] fabric_rst_n;
reg r_m_axi_gp0_rstn;
reg r_m_axi_gp1_rstn;
reg r_s_axi_gp0_rstn;
reg r_s_axi_gp1_rstn;
reg r_s_axi_hp0_rstn;
reg r_s_axi_hp1_rstn;
reg r_s_axi_hp2_rstn;
reg r_s_axi_hp3_rstn;
reg r_s_axi_acp_rstn;
assign rst_out_n = por_rst_n & sys_rst_n;
assign fclk_reset0_n = !fabric_rst_n[0];
assign fclk_reset1_n = !fabric_rst_n[1];
assign fclk_reset2_n = !fabric_rst_n[2];
assign fclk_reset3_n = !fabric_rst_n[3];
assign fpga_acp_reset_n = !fabric_rst_n[24];
assign fpga_hp_s3_reset_n = !fabric_rst_n[23];
assign fpga_hp_s2_reset_n = !fabric_rst_n[22];
assign fpga_hp_s1_reset_n = !fabric_rst_n[21];
assign fpga_hp_s0_reset_n = !fabric_rst_n[20];
assign fpga_gp_s1_reset_n = !fabric_rst_n[17];
assign fpga_gp_s0_reset_n = !fabric_rst_n[16];
assign fpga_gp_m1_reset_n = !fabric_rst_n[13];
assign fpga_gp_m0_reset_n = !fabric_rst_n[12];
task fpga_soft_reset;
input[31:0] reset_ctrl;
begin
fabric_rst_n[0] = reset_ctrl[0];
fabric_rst_n[1] = reset_ctrl[1];
fabric_rst_n[2] = reset_ctrl[2];
fabric_rst_n[3] = reset_ctrl[3];
fabric_rst_n[12] = reset_ctrl[12];
fabric_rst_n[13] = reset_ctrl[13];
fabric_rst_n[16] = reset_ctrl[16];
fabric_rst_n[17] = reset_ctrl[17];
fabric_rst_n[20] = reset_ctrl[20];
fabric_rst_n[21] = reset_ctrl[21];
fabric_rst_n[22] = reset_ctrl[22];
fabric_rst_n[23] = reset_ctrl[23];
fabric_rst_n[24] = reset_ctrl[24];
end
endtask
always@(negedge por_rst_n or negedge sys_rst_n) fabric_rst_n = 32'h01f3_300f;
always@(posedge m_axi_gp0_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
m_axi_gp0_rstn = 1'b0;
else
m_axi_gp0_rstn = 1'b1;
end
always@(posedge m_axi_gp1_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
m_axi_gp1_rstn = 1'b0;
else
m_axi_gp1_rstn = 1'b1;
end
always@(posedge s_axi_gp0_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_gp0_rstn = 1'b0;
else
s_axi_gp0_rstn = 1'b1;
end
always@(posedge s_axi_gp1_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_gp1_rstn = 1'b0;
else
s_axi_gp1_rstn = 1'b1;
end
always@(posedge s_axi_hp0_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp0_rstn = 1'b0;
else
s_axi_hp0_rstn = 1'b1;
end
always@(posedge s_axi_hp1_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp1_rstn = 1'b0;
else
s_axi_hp1_rstn = 1'b1;
end
always@(posedge s_axi_hp2_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp2_rstn = 1'b0;
else
s_axi_hp2_rstn = 1'b1;
end
always@(posedge s_axi_hp3_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_hp3_rstn = 1'b0;
else
s_axi_hp3_rstn = 1'b1;
end
always@(posedge s_axi_acp_clk or negedge (por_rst_n & sys_rst_n))
begin
if (!(por_rst_n & sys_rst_n))
s_axi_acp_rstn = 1'b0;
else
s_axi_acp_rstn = 1'b1;
end
always@(*) begin
if ((por_rst_n!= 1'b0) && (por_rst_n!= 1'b1) && (sys_rst_n != 1'b0) && (sys_rst_n != 1'b1)) begin
$display(" Error:processing_system7_bfm_v2_0_5_gen_reset. PS_PORB and PS_SRSTB must be driven to known state");
$finish();
end
end
endmodule
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.