text
stringlengths
938
1.05M
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: rx_port_requester.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Issues read requests to the tx_engine for the rx_port // and sg_list_requester modules in the rx_port. Expects those modules to update // their address and length values after every request issued. Also expects them // to update their space available values within 6 cycles of a change to the // RX_LEN. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_RXPORTREQ_RX_TX 2'b00 `define S_RXPORTREQ_TX_RX 2'b01 `define S_RXPORTREQ_ISSUE 2'b10 `timescale 1ns/1ns module rx_port_requester_mux ( input RST, input CLK, input SG_RX_REQ, // Scatter gather RX read request input [9:0] SG_RX_LEN, // Scatter gather RX read request length input [63:0] SG_RX_ADDR, // Scatter gather RX read request address output SG_RX_REQ_PROC, // Scatter gather RX read request processing input SG_TX_REQ, // Scatter gather TX read request input [9:0] SG_TX_LEN, // Scatter gather TX read request length input [63:0] SG_TX_ADDR, // Scatter gather TX read request address output SG_TX_REQ_PROC, // Scatter gather TX read request processing input MAIN_REQ, // Main read request input [9:0] MAIN_LEN, // Main read request length input [63:0] MAIN_ADDR, // Main read request address output MAIN_REQ_PROC, // Main read request processing output RX_REQ, // Read request input RX_REQ_ACK, // Read request accepted output [1:0] RX_REQ_TAG, // Read request data tag output [63:0] RX_REQ_ADDR, // Read request address output [9:0] RX_REQ_LEN, // Read request length output REQ_ACK // Request accepted ); reg rRxReqAck=0, _rRxReqAck=0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [1:0] rState=`S_RXPORTREQ_RX_TX, _rState=`S_RXPORTREQ_RX_TX; reg [9:0] rLen=0, _rLen=0; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg rSgRxAck=0, _rSgRxAck=0; reg rSgTxAck=0, _rSgTxAck=0; reg rMainAck=0, _rMainAck=0; reg rAck=0, _rAck=0; assign SG_RX_REQ_PROC = rSgRxAck; assign SG_TX_REQ_PROC = rSgTxAck; assign MAIN_REQ_PROC = rMainAck; assign RX_REQ = rState[1]; // S_RXPORTREQ_ISSUE assign RX_REQ_TAG = {rSgTxAck, rSgRxAck}; assign RX_REQ_ADDR = rAddr; assign RX_REQ_LEN = rLen; assign REQ_ACK = rAck; // Buffer signals that come from outside the rx_port. always @ (posedge CLK) begin rRxReqAck <= #1 (RST ? 1'd0 : _rRxReqAck); end always @ (*) begin _rRxReqAck = RX_REQ_ACK; end // Handle issuing read requests. Scatter gather requests are processed // with higher priority than the main channel. always @ (posedge CLK) begin rState <= #1 (RST ? `S_RXPORTREQ_RX_TX : _rState); rLen <= #1 _rLen; rAddr <= #1 _rAddr; rSgRxAck <= #1 _rSgRxAck; rSgTxAck <= #1 _rSgTxAck; rMainAck <= #1 _rMainAck; rAck <= #1 _rAck; end always @ (*) begin _rState = rState; _rLen = rLen; _rAddr = rAddr; _rSgRxAck = rSgRxAck; _rSgTxAck = rSgTxAck; _rMainAck = rMainAck; _rAck = rAck; case (rState) `S_RXPORTREQ_RX_TX: begin // Wait for a new read request if (SG_RX_REQ) begin _rLen = SG_RX_LEN; _rAddr = SG_RX_ADDR; _rSgRxAck = 1; _rAck = 1; _rState = `S_RXPORTREQ_ISSUE; end else if (SG_TX_REQ) begin _rLen = SG_TX_LEN; _rAddr = SG_TX_ADDR; _rSgTxAck = 1; _rAck = 1; _rState = `S_RXPORTREQ_ISSUE; end else if (MAIN_REQ) begin _rLen = MAIN_LEN; _rAddr = MAIN_ADDR; _rMainAck = 1; _rAck = 1; _rState = `S_RXPORTREQ_ISSUE; end else begin _rState = `S_RXPORTREQ_TX_RX; end end `S_RXPORTREQ_TX_RX: begin // Wait for a new read request if (SG_TX_REQ) begin _rLen = SG_TX_LEN; _rAddr = SG_TX_ADDR; _rSgTxAck = 1; _rAck = 1; _rState = `S_RXPORTREQ_ISSUE; end else if (SG_RX_REQ) begin _rLen = SG_RX_LEN; _rAddr = SG_RX_ADDR; _rSgRxAck = 1; _rAck = 1; _rState = `S_RXPORTREQ_ISSUE; end else if (MAIN_REQ) begin _rLen = MAIN_LEN; _rAddr = MAIN_ADDR; _rMainAck = 1; _rAck = 1; _rState = `S_RXPORTREQ_ISSUE; end else begin _rState = `S_RXPORTREQ_RX_TX; end end `S_RXPORTREQ_ISSUE: begin // Issue the request _rAck = 0; if (rRxReqAck) begin _rSgRxAck = 0; _rSgTxAck = 0; _rMainAck = 0; if (rSgRxAck) _rState = `S_RXPORTREQ_TX_RX; else _rState = `S_RXPORTREQ_RX_TX; end end default: begin _rState = `S_RXPORTREQ_RX_TX; end endcase end endmodule
//***************************************************************************** // (c) Copyright 2009 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor: Xilinx // \ \ \/ Version: %version // \ \ Application: MIG // / / Filename: ddr_phy_init.v // /___/ /\ Date Last Modified: $Date: 2011/06/02 08:35:09 $ // \ \ / \ Date Created: // \___\/\___\ // //Device: 7 Series //Design Name: DDR3 SDRAM //Purpose: // Memory initialization and overall master state control during // initialization and calibration. Specifically, the following functions // are performed: // 1. Memory initialization (initial AR, mode register programming, etc.) // 2. Initiating write leveling // 3. Generate training pattern writes for read leveling. Generate // memory readback for read leveling. // This module has an interface for providing control/address and write // data to the PHY Control Block during initialization/calibration. // Once initialization and calibration are complete, control is passed to the MC. // //Reference: //Revision History: // //***************************************************************************** /****************************************************************************** **$Id: ddr_phy_init.v,v 1.1 2011/06/02 08:35:09 mishra Exp $ **$Date: 2011/06/02 08:35:09 $ **$Author: mishra $ **$Revision: 1.1 $ **$Source: /devl/xcs/repo/env/Databases/ip/src2/O/mig_7series_v1_3/data/dlib/7series/ddr3_sdram/verilog/rtl/phy/ddr_phy_init.v,v $ ******************************************************************************/ `timescale 1ps/1ps module mig_7series_v1_9_ddr_phy_init # ( parameter TCQ = 100, parameter nCK_PER_CLK = 4, // # of memory clocks per CLK parameter CLK_PERIOD = 3000, // Logic (internal) clk period (in ps) parameter USE_ODT_PORT = 0, // 0 - No ODT output from FPGA // 1 - ODT output from FPGA parameter PRBS_WIDTH = 8, // PRBS sequence = 2^PRBS_WIDTH parameter BANK_WIDTH = 2, parameter CA_MIRROR = "OFF", // C/A mirror opt for DDR3 dual rank parameter COL_WIDTH = 10, parameter nCS_PER_RANK = 1, // # of CS bits per rank e.g. for // component I/F with CS_WIDTH=1, // nCS_PER_RANK=# of components parameter DQ_WIDTH = 64, parameter DQS_WIDTH = 8, parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH)) parameter ROW_WIDTH = 14, parameter CS_WIDTH = 1, parameter RANKS = 1, // # of memory ranks in the interface parameter CKE_WIDTH = 1, // # of cke outputs parameter DRAM_TYPE = "DDR3", parameter REG_CTRL = "ON", parameter ADDR_CMD_MODE= "1T", // calibration Address parameter CALIB_ROW_ADD = 16'h0000,// Calibration row address parameter CALIB_COL_ADD = 12'h000, // Calibration column address parameter CALIB_BA_ADD = 3'h0, // Calibration bank address // DRAM mode settings parameter AL = "0", // Additive Latency option parameter BURST_MODE = "8", // Burst length parameter BURST_TYPE = "SEQ", // Burst type // parameter nAL = 0, // Additive latency (in clk cyc) parameter nCL = 5, // Read CAS latency (in clk cyc) parameter nCWL = 5, // Write CAS latency (in clk cyc) parameter tRFC = 110000, // Refresh-to-command delay (in ps) parameter OUTPUT_DRV = "HIGH", // DRAM reduced output drive option parameter RTT_NOM = "60", // Nominal ODT termination value parameter RTT_WR = "60", // Write ODT termination value parameter WRLVL = "ON", // Enable write leveling // parameter PHASE_DETECT = "ON", // Enable read phase detector parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2 parameter nSLOTS = 1, // Number of DIMM SLOTs in the system parameter SIM_INIT_OPTION = "NONE", // "NONE", "SKIP_PU_DLY", "SKIP_INIT" parameter SIM_CAL_OPTION = "NONE", // "NONE", "FAST_CAL", "SKIP_CAL" parameter CKE_ODT_AUX = "FALSE", parameter PRE_REV3ES = "OFF", // Enable TG error detection during calibration parameter TEST_AL = "0" // Internal use for ICM verification ) ( input clk, input rst, input [2*8*nCK_PER_CLK-1:0] prbs_o, input delay_incdec_done, input ck_addr_cmd_delay_done, input pi_phase_locked_all, input pi_dqs_found_done, input dqsfound_retry, input dqs_found_prech_req, output reg pi_phaselock_start, output pi_phase_locked_err, output pi_calib_done, input phy_if_empty, // Read/write calibration interface input wrlvl_done, input wrlvl_rank_done, input wrlvl_byte_done, input wrlvl_byte_redo, input wrlvl_final, output reg wrlvl_final_if_rst, input oclkdelay_calib_done, input oclk_prech_req, input oclk_calib_resume, output reg oclkdelay_calib_start, input done_dqs_tap_inc, input [5:0] rd_data_offset_0, input [5:0] rd_data_offset_1, input [5:0] rd_data_offset_2, input [6*RANKS-1:0] rd_data_offset_ranks_0, input [6*RANKS-1:0] rd_data_offset_ranks_1, input [6*RANKS-1:0] rd_data_offset_ranks_2, input pi_dqs_found_rank_done, input wrcal_done, input wrcal_prech_req, input wrcal_read_req, input wrcal_act_req, input temp_wrcal_done, input [7:0] slot_0_present, input [7:0] slot_1_present, output reg wl_sm_start, output reg wr_lvl_start, output reg wrcal_start, output reg wrcal_rd_wait, output reg wrcal_sanity_chk, output reg tg_timer_done, output reg no_rst_tg_mc, input rdlvl_stg1_done, input rdlvl_stg1_rank_done, output reg rdlvl_stg1_start, output reg pi_dqs_found_start, output reg detect_pi_found_dqs, // rdlvl stage 1 precharge requested after each DQS input rdlvl_prech_req, input rdlvl_last_byte_done, input wrcal_resume, input wrcal_sanity_chk_done, // MPR read leveling input mpr_rdlvl_done, input mpr_rnk_done, input mpr_last_byte_done, output reg mpr_rdlvl_start, output reg mpr_end_if_reset, // PRBS Read Leveling input prbs_rdlvl_done, input prbs_last_byte_done, input prbs_rdlvl_prech_req, output reg prbs_rdlvl_start, output reg prbs_gen_clk_en, // Signals shared btw multiple calibration stages output reg prech_done, // Data select / status output reg init_calib_complete, // Signal to mask memory model error for Invalid latching edge output reg calib_writes, // PHY address/control // 2 commands to PHY Control Block per div 2 clock in 2:1 mode // 4 commands to PHY Control Block per div 4 clock in 4:1 mode output reg [nCK_PER_CLK*ROW_WIDTH-1:0] phy_address, output reg [nCK_PER_CLK*BANK_WIDTH-1:0]phy_bank, output reg [nCK_PER_CLK-1:0] phy_ras_n, output reg [nCK_PER_CLK-1:0] phy_cas_n, output reg [nCK_PER_CLK-1:0] phy_we_n, output reg phy_reset_n, output [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] phy_cs_n, // Hard PHY Interface signals input phy_ctl_ready, input phy_ctl_full, input phy_cmd_full, input phy_data_full, output reg calib_ctl_wren, output reg calib_cmd_wren, output reg [1:0] calib_seq, output reg write_calib, output reg read_calib, // PHY_Ctl_Wd output reg [2:0] calib_cmd, // calib_aux_out used for CKE and ODT output reg [3:0] calib_aux_out, output reg [1:0] calib_odt , output reg [nCK_PER_CLK-1:0] calib_cke , output [1:0] calib_rank_cnt, output reg [1:0] calib_cas_slot, output reg [5:0] calib_data_offset_0, output reg [5:0] calib_data_offset_1, output reg [5:0] calib_data_offset_2, // PHY OUT_FIFO output reg calib_wrdata_en, output reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_wrdata, // PHY Read output phy_rddata_en, output phy_rddata_valid, output [255:0] dbg_phy_init ); //***************************************************************************** // Assertions to be added //***************************************************************************** // The phy_ctl_full signal must never be asserted in synchronous mode of // operation either 4:1 or 2:1 // // The RANKS parameter must never be set to '0' by the user // valid values: 1 to 4 // //***************************************************************************** //*************************************************************************** // Number of Read level stage 1 writes limited to a SDRAM row // The address of Read Level stage 1 reads must also be limited // to a single SDRAM row // (2^COL_WIDTH)/BURST_MODE = (2^10)/8 = 128 localparam NUM_STG1_WR_RD = (BURST_MODE == "8") ? 4 : (BURST_MODE == "4") ? 8 : 4; localparam ADDR_INC = (BURST_MODE == "8") ? 8 : (BURST_MODE == "4") ? 4 : 8; // In a 2 slot dual rank per system RTT_NOM values // for Rank2 and Rank3 default to 40 ohms localparam RTT_NOM2 = "40"; localparam RTT_NOM3 = "40"; localparam RTT_NOM_int = (USE_ODT_PORT == 1) ? RTT_NOM : RTT_WR; // Specifically for use with half-frequency controller (nCK_PER_CLK=2) // = 1 if burst length = 4, = 0 if burst length = 8. Determines how // often row command needs to be issued during read-leveling // For DDR3 the burst length is fixed during calibration localparam BURST4_FLAG = (DRAM_TYPE == "DDR3")? 1'b0 : (BURST_MODE == "8") ? 1'b0 : ((BURST_MODE == "4") ? 1'b1 : 1'b0); //*************************************************************************** // Counter values used to determine bus timing // NOTE on all counter terminal counts - these can/should be one less than // the actual delay to take into account extra clock cycle delay in // generating the corresponding "done" signal //*************************************************************************** localparam CLK_MEM_PERIOD = CLK_PERIOD / nCK_PER_CLK; // Calculate initial delay required in number of CLK clock cycles // to delay initially. The counter is clocked by [CLK/1024] - which // is approximately division by 1000 - note that the formulas below will // result in more than the minimum wait time because of this approximation. // NOTE: For DDR3 JEDEC specifies to delay reset // by 200us, and CKE by an additional 500us after power-up // For DDR2 CKE is delayed by 200us after power up. localparam DDR3_RESET_DELAY_NS = 200000; localparam DDR3_CKE_DELAY_NS = 500000 + DDR3_RESET_DELAY_NS; localparam DDR2_CKE_DELAY_NS = 200000; localparam PWRON_RESET_DELAY_CNT = ((DDR3_RESET_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD); localparam PWRON_CKE_DELAY_CNT = (DRAM_TYPE == "DDR3") ? (((DDR3_CKE_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD)) : (((DDR2_CKE_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD)); // FOR DDR2 -1 taken out. With -1 not getting 200us. The equation // needs to be reworked. localparam DDR2_INIT_PRE_DELAY_PS = 400000; localparam DDR2_INIT_PRE_CNT = ((DDR2_INIT_PRE_DELAY_PS+CLK_PERIOD-1)/CLK_PERIOD)-1; // Calculate tXPR time: reset from CKE HIGH to valid command after power-up // tXPR = (max(5nCK, tRFC(min)+10ns). Add a few (blah, messy) more clock // cycles because this counter actually starts up before CKE is asserted // to memory. localparam TXPR_DELAY_CNT = (5*CLK_MEM_PERIOD > tRFC+10000) ? (((5+nCK_PER_CLK-1)/nCK_PER_CLK)-1)+11 : (((tRFC+10000+CLK_PERIOD-1)/CLK_PERIOD)-1)+11; // tDLLK/tZQINIT time = 512*tCK = 256*tCLKDIV localparam TDLLK_TZQINIT_DELAY_CNT = 255; // TWR values in ns. Both DDR2 and DDR3 have the same value. // 15000ns/tCK localparam TWR_CYC = ((15000) % CLK_MEM_PERIOD) ? (15000/CLK_MEM_PERIOD) + 1 : 15000/CLK_MEM_PERIOD; // time to wait between consecutive commands in PHY_INIT - this is a // generic number, and must be large enough to account for worst case // timing parameter (tRFC - refresh-to-active) across all memory speed // grades and operating frequencies. Expressed in clk // (Divided by 4 or Divided by 2) clock cycles. localparam CNTNEXT_CMD = 7'b1111111; // Counter values to keep track of which MR register to load during init // Set value of INIT_CNT_MR_DONE to equal value of counter for last mode // register configured during initialization. // NOTE: Reserve more bits for DDR2 - more MR accesses for DDR2 init localparam INIT_CNT_MR2 = 2'b00; localparam INIT_CNT_MR3 = 2'b01; localparam INIT_CNT_MR1 = 2'b10; localparam INIT_CNT_MR0 = 2'b11; localparam INIT_CNT_MR_DONE = 2'b11; // Register chip programmable values for DDR3 // The register chip for the registered DIMM needs to be programmed // before the initialization of the registered DIMM. // Address for the control word is in : DBA2, DA2, DA1, DA0 // Data for the control word is in: DBA1 DBA0, DA4, DA3 // The values will be stored in the local param in the following format // {DBA[2:0], DA[4:0]} // RC0 is global features control word. Address == 000 localparam REG_RC0 = 8'b00000000; // RC1 Clock driver enable control word. Enables or disables the four // output clocks in the register chip. For single rank and dual rank // two clocks will be enabled and for quad rank all the four clocks // will be enabled. Address == 000. Data = 0110 for single and dual rank. // = 0000 for quad rank localparam REG_RC1 = (RANKS <= 2) ? 8'b00110001 : 8'b00000001; // RC2 timing control word. Set in 1T timing mode // Address = 010. Data = 0000 localparam REG_RC2 = 8'b00000010; // RC3 timing control word. Setting the data to 0000 localparam REG_RC3 = 8'b00000011; // RC4 timing control work. Setting the data to 0000 localparam REG_RC4 = 8'b00000100; // RC5 timing control work. Setting the data to 0000 localparam REG_RC5 = 8'b00000101; // For non-zero AL values localparam nAL = (AL == "CL-1") ? nCL - 1 : 0; // Adding the register dimm latency to write latency localparam CWL_M = (REG_CTRL == "ON") ? nCWL + nAL + 1 : nCWL + nAL; // Count value to generate pi_phase_locked_err signal localparam PHASELOCKED_TIMEOUT = (SIM_CAL_OPTION == "NONE") ? 16383 : 1000; // Timeout interval for detecting error with Traffic Generator localparam [13:0] TG_TIMER_TIMEOUT = (SIM_CAL_OPTION == "NONE") ? 14'h3FFF : 14'h0001; // Master state machine encoding localparam INIT_IDLE = 6'b000000; //0 localparam INIT_WAIT_CKE_EXIT = 6'b000001; //1 localparam INIT_LOAD_MR = 6'b000010; //2 localparam INIT_LOAD_MR_WAIT = 6'b000011; //3 localparam INIT_ZQCL = 6'b000100; //4 localparam INIT_WAIT_DLLK_ZQINIT = 6'b000101; //5 localparam INIT_WRLVL_START = 6'b000110; //6 localparam INIT_WRLVL_WAIT = 6'b000111; //7 localparam INIT_WRLVL_LOAD_MR = 6'b001000; //8 localparam INIT_WRLVL_LOAD_MR_WAIT = 6'b001001; //9 localparam INIT_WRLVL_LOAD_MR2 = 6'b001010; //A localparam INIT_WRLVL_LOAD_MR2_WAIT = 6'b001011; //B localparam INIT_RDLVL_ACT = 6'b001100; //C localparam INIT_RDLVL_ACT_WAIT = 6'b001101; //D localparam INIT_RDLVL_STG1_WRITE = 6'b001110; //E localparam INIT_RDLVL_STG1_WRITE_READ = 6'b001111; //F localparam INIT_RDLVL_STG1_READ = 6'b010000; //10 localparam INIT_RDLVL_STG2_READ = 6'b010001; //11 localparam INIT_RDLVL_STG2_READ_WAIT = 6'b010010; //12 localparam INIT_PRECHARGE_PREWAIT = 6'b010011; //13 localparam INIT_PRECHARGE = 6'b010100; //14 localparam INIT_PRECHARGE_WAIT = 6'b010101; //15 localparam INIT_DONE = 6'b010110; //16 localparam INIT_DDR2_PRECHARGE = 6'b010111; //17 localparam INIT_DDR2_PRECHARGE_WAIT = 6'b011000; //18 localparam INIT_REFRESH = 6'b011001; //19 localparam INIT_REFRESH_WAIT = 6'b011010; //1A localparam INIT_REG_WRITE = 6'b011011; //1B localparam INIT_REG_WRITE_WAIT = 6'b011100; //1C localparam INIT_DDR2_MULTI_RANK = 6'b011101; //1D localparam INIT_DDR2_MULTI_RANK_WAIT = 6'b011110; //1E localparam INIT_WRCAL_ACT = 6'b011111; //1F localparam INIT_WRCAL_ACT_WAIT = 6'b100000; //20 localparam INIT_WRCAL_WRITE = 6'b100001; //21 localparam INIT_WRCAL_WRITE_READ = 6'b100010; //22 localparam INIT_WRCAL_READ = 6'b100011; //23 localparam INIT_WRCAL_READ_WAIT = 6'b100100; //24 localparam INIT_WRCAL_MULT_READS = 6'b100101; //25 localparam INIT_PI_PHASELOCK_READS = 6'b100110; //26 localparam INIT_MPR_RDEN = 6'b100111; //27 localparam INIT_MPR_WAIT = 6'b101000; //28 localparam INIT_MPR_READ = 6'b101001; //29 localparam INIT_MPR_DISABLE_PREWAIT = 6'b101010; //2A localparam INIT_MPR_DISABLE = 6'b101011; //2B localparam INIT_MPR_DISABLE_WAIT = 6'b101100; //2C localparam INIT_OCLKDELAY_ACT = 6'b101101; //2D localparam INIT_OCLKDELAY_ACT_WAIT = 6'b101110; //2E localparam INIT_OCLKDELAY_WRITE = 6'b101111; //2F localparam INIT_OCLKDELAY_WRITE_WAIT = 6'b110000; //30 localparam INIT_OCLKDELAY_READ = 6'b110001; //31 localparam INIT_OCLKDELAY_READ_WAIT = 6'b110010; //32 localparam INIT_REFRESH_RNK2_WAIT = 6'b110011; //33 integer i, j, k, l, m, n, p, q; reg pi_dqs_found_all_r; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r1; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r2; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r3; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r4; reg pi_calib_rank_done_r; reg [13:0] pi_phaselock_timer; reg stg1_wr_done; reg rnk_ref_cnt; reg pi_dqs_found_done_r1; reg pi_dqs_found_rank_done_r; reg read_calib_int; reg read_calib_r; reg pi_calib_done_r; reg pi_calib_done_r1; reg burst_addr_r; reg [1:0] chip_cnt_r; reg [6:0] cnt_cmd_r; reg cnt_cmd_done_r; reg cnt_cmd_done_m7_r; reg [7:0] cnt_dllk_zqinit_r; reg cnt_dllk_zqinit_done_r; reg cnt_init_af_done_r; reg [1:0] cnt_init_af_r; reg [1:0] cnt_init_data_r; reg [1:0] cnt_init_mr_r; reg cnt_init_mr_done_r; reg cnt_init_pre_wait_done_r; reg [7:0] cnt_init_pre_wait_r; reg [9:0] cnt_pwron_ce_r; reg cnt_pwron_cke_done_r; reg cnt_pwron_cke_done_r1; reg [8:0] cnt_pwron_r; reg cnt_pwron_reset_done_r; reg cnt_txpr_done_r; reg [7:0] cnt_txpr_r; reg ddr2_pre_flag_r; reg ddr2_refresh_flag_r; reg ddr3_lm_done_r; reg [4:0] enable_wrlvl_cnt; reg init_complete_r; reg init_complete_r1; reg init_complete_r2; (* keep = "true" *) reg init_complete_r_timing; (* keep = "true" *) reg init_complete_r1_timing; reg [5:0] init_next_state; reg [5:0] init_state_r; reg [5:0] init_state_r1; wire [15:0] load_mr0; wire [15:0] load_mr1; wire [15:0] load_mr2; wire [15:0] load_mr3; reg mem_init_done_r; reg [1:0] mr2_r [0:3]; reg [2:0] mr1_r [0:3]; reg new_burst_r; reg [15:0] wrcal_start_dly_r; wire wrcal_start_pre; reg wrcal_resume_r; // Only one ODT signal per rank in PHY Control Block reg [nCK_PER_CLK-1:0] phy_tmp_odt_r; reg [nCK_PER_CLK-1:0] phy_tmp_odt_r1; reg [CS_WIDTH*nCS_PER_RANK-1:0] phy_tmp_cs1_r; reg [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] phy_int_cs_n; wire prech_done_pre; reg [15:0] prech_done_dly_r; reg prech_pending_r; reg prech_req_posedge_r; reg prech_req_r; reg pwron_ce_r; reg first_rdlvl_pat_r; reg first_wrcal_pat_r; reg phy_wrdata_en; reg phy_wrdata_en_r1; reg [1:0] wrdata_pat_cnt; reg [1:0] wrcal_pat_cnt; reg [ROW_WIDTH-1:0] address_w; reg [BANK_WIDTH-1:0] bank_w; reg rdlvl_stg1_done_r1; reg rdlvl_stg1_start_int; reg [15:0] rdlvl_start_dly0_r; reg rdlvl_start_pre; reg rdlvl_last_byte_done_r; wire rdlvl_rd; wire rdlvl_wr; reg rdlvl_wr_r; wire rdlvl_wr_rd; reg [2:0] reg_ctrl_cnt_r; reg [1:0] tmp_mr2_r [0:3]; reg [2:0] tmp_mr1_r [0:3]; reg wrlvl_done_r; reg wrlvl_done_r1; reg wrlvl_rank_done_r1; reg wrlvl_rank_done_r2; reg wrlvl_rank_done_r3; reg wrlvl_rank_done_r4; reg wrlvl_rank_done_r5; reg wrlvl_rank_done_r6; reg wrlvl_rank_done_r7; reg [2:0] wrlvl_rank_cntr; reg wrlvl_odt_ctl; reg wrlvl_odt; reg wrlvl_active; reg wrlvl_active_r1; reg [2:0] num_reads; reg temp_wrcal_done_r; reg temp_lmr_done; reg extend_cal_pat; reg [13:0] tg_timer; reg tg_timer_go; reg cnt_wrcal_rd; reg [3:0] cnt_wait; reg [7:0] wrcal_reads; reg [8:0] stg1_wr_rd_cnt; reg phy_data_full_r; reg wr_level_dqs_asrt; reg wr_level_dqs_asrt_r1; reg [1:0] dqs_asrt_cnt; reg [3:0] num_refresh; wire oclkdelay_calib_start_pre; reg [15:0] oclkdelay_start_dly_r; reg [3:0] oclk_wr_cnt; reg [3:0] wrcal_wr_cnt; reg wrlvl_final_r; reg prbs_rdlvl_done_r1; reg prbs_last_byte_done_r; reg phy_if_empty_r; reg wrcal_final_chk; //*************************************************************************** // Debug //*************************************************************************** //synthesis translate_off always @(posedge mem_init_done_r) begin if (!rst) $display ("PHY_INIT: Memory Initialization completed at %t", $time); end always @(posedge wrlvl_done) begin if (!rst && (WRLVL == "ON")) $display ("PHY_INIT: Write Leveling completed at %t", $time); end always @(posedge rdlvl_stg1_done) begin if (!rst) $display ("PHY_INIT: Read Leveling Stage 1 completed at %t", $time); end always @(posedge mpr_rdlvl_done) begin if (!rst) $display ("PHY_INIT: MPR Read Leveling completed at %t", $time); end always @(posedge oclkdelay_calib_done) begin if (!rst) $display ("PHY_INIT: OCLKDELAY calibration completed at %t", $time); end always @(posedge pi_calib_done_r1) begin if (!rst) $display ("PHY_INIT: Phaser_In Phase Locked at %t", $time); end always @(posedge pi_dqs_found_done) begin if (!rst) $display ("PHY_INIT: Phaser_In DQSFOUND completed at %t", $time); end always @(posedge wrcal_done) begin if (!rst && (WRLVL == "ON")) $display ("PHY_INIT: Write Calibration completed at %t", $time); end //synthesis translate_on assign dbg_phy_init[5:0] = init_state_r; //*************************************************************************** // DQS count to be sent to hard PHY during Phaser_IN Phase Locking stage //*************************************************************************** // assign pi_phaselock_calib_cnt = dqs_cnt_r; assign pi_calib_done = pi_calib_done_r1; always @(posedge clk) begin if (rst) wrcal_final_chk <= #TCQ 1'b0; else if ((init_next_state == INIT_WRCAL_ACT) && wrcal_done && (DRAM_TYPE == "DDR3")) wrcal_final_chk <= #TCQ 1'b1; end always @(posedge clk) begin rdlvl_stg1_done_r1 <= #TCQ rdlvl_stg1_done; prbs_rdlvl_done_r1 <= #TCQ prbs_rdlvl_done; wrcal_resume_r <= #TCQ wrcal_resume; wrcal_sanity_chk <= #TCQ wrcal_final_chk; end always @(posedge clk) begin if (rst) mpr_end_if_reset <= #TCQ 1'b0; else if (mpr_last_byte_done && (num_refresh != 'd0)) mpr_end_if_reset <= #TCQ 1'b1; else mpr_end_if_reset <= #TCQ 1'b0; end // Siganl to mask memory model error for Invalid latching edge always @(posedge clk) if (rst) calib_writes <= #TCQ 1'b0; else if ((init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ)) calib_writes <= #TCQ 1'b1; else calib_writes <= #TCQ 1'b0; always @(posedge clk) if (rst) wrcal_rd_wait <= #TCQ 1'b0; else if (init_state_r == INIT_WRCAL_READ_WAIT) wrcal_rd_wait <= #TCQ 1'b1; else wrcal_rd_wait <= #TCQ 1'b0; //*************************************************************************** // Signal PHY completion when calibration is finished // Signal assertion is delayed by four clock cycles to account for the // multi cycle path constraint to (phy_init_data_sel) signal. //*************************************************************************** always @(posedge clk) if (rst) begin init_complete_r <= #TCQ 1'b0; init_complete_r_timing <= #TCQ 1'b0; init_complete_r1 <= #TCQ 1'b0; init_complete_r1_timing <= #TCQ 1'b0; init_complete_r2 <= #TCQ 1'b0; init_calib_complete <= #TCQ 1'b0; end else begin if (init_state_r == INIT_DONE) begin init_complete_r <= #TCQ 1'b1; init_complete_r_timing <= #TCQ 1'b1; end init_complete_r1 <= #TCQ init_complete_r; init_complete_r1_timing <= #TCQ init_complete_r_timing; init_complete_r2 <= #TCQ init_complete_r1; init_calib_complete <= #TCQ init_complete_r2; end //*************************************************************************** // Instantiate FF for the phy_init_data_sel signal. A multi cycle path // constraint will be assigned to this signal. This signal will only be // used within the PHY //*************************************************************************** // FDRSE u_ff_phy_init_data_sel // ( // .Q (phy_init_data_sel), // .C (clk), // .CE (1'b1), // .D (init_complete_r), // .R (1'b0), // .S (1'b0) // ) /* synthesis syn_preserve=1 */ // /* synthesis syn_replicate = 0 */; //*************************************************************************** // Mode register programming //*************************************************************************** //***************************************************************** // DDR3 Load mode reg0 // Mode Register (MR0): // [15:13] - unused - 000 // [12] - Precharge Power-down DLL usage - 0 (DLL frozen, slow-exit), // 1 (DLL maintained) // [11:9] - write recovery for Auto Precharge (tWR/tCK = 6) // [8] - DLL reset - 0 or 1 // [7] - Test Mode - 0 (normal) // [6:4],[2] - CAS latency - CAS_LAT // [3] - Burst Type - BURST_TYPE // [1:0] - Burst Length - BURST_LEN // DDR2 Load mode register // Mode Register (MR): // [15:14] - unused - 00 // [13] - reserved - 0 // [12] - Power-down mode - 0 (normal) // [11:9] - write recovery - write recovery for Auto Precharge // (tWR/tCK = 6) // [8] - DLL reset - 0 or 1 // [7] - Test Mode - 0 (normal) // [6:4] - CAS latency - CAS_LAT // [3] - Burst Type - BURST_TYPE // [2:0] - Burst Length - BURST_LEN //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr0_DDR3 assign load_mr0[1:0] = (BURST_MODE == "8") ? 2'b00 : (BURST_MODE == "OTF") ? 2'b01 : (BURST_MODE == "4") ? 2'b10 : 2'b11; assign load_mr0[2] = (nCL >= 12) ? 1'b1 : 1'b0; // LSb of CAS latency assign load_mr0[3] = (BURST_TYPE == "SEQ") ? 1'b0 : 1'b1; assign load_mr0[6:4] = ((nCL == 5) || (nCL == 13)) ? 3'b001 : ((nCL == 6) || (nCL == 14)) ? 3'b010 : (nCL == 7) ? 3'b011 : (nCL == 8) ? 3'b100 : (nCL == 9) ? 3'b101 : (nCL == 10) ? 3'b110 : (nCL == 11) ? 3'b111 : (nCL == 12) ? 3'b000 : 3'b111; assign load_mr0[7] = 1'b0; assign load_mr0[8] = 1'b1; // Reset DLL (init only) assign load_mr0[11:9] = (TWR_CYC == 5) ? 3'b001 : (TWR_CYC == 6) ? 3'b010 : (TWR_CYC == 7) ? 3'b011 : (TWR_CYC == 8) ? 3'b100 : (TWR_CYC == 9) ? 3'b101 : (TWR_CYC == 10) ? 3'b101 : (TWR_CYC == 11) ? 3'b110 : (TWR_CYC == 12) ? 3'b110 : (TWR_CYC == 13) ? 3'b111 : (TWR_CYC == 14) ? 3'b111 : (TWR_CYC == 15) ? 3'b000 : (TWR_CYC == 16) ? 3'b000 : 3'b010; assign load_mr0[12] = 1'b0; // Precharge Power-Down DLL 'slow-exit' assign load_mr0[15:13] = 3'b000; end else if (DRAM_TYPE == "DDR2") begin: gen_load_mr0_DDR2 // block: gen assign load_mr0[2:0] = (BURST_MODE == "8") ? 3'b011 : (BURST_MODE == "4") ? 3'b010 : 3'b111; assign load_mr0[3] = (BURST_TYPE == "SEQ") ? 1'b0 : 1'b1; assign load_mr0[6:4] = (nCL == 3) ? 3'b011 : (nCL == 4) ? 3'b100 : (nCL == 5) ? 3'b101 : (nCL == 6) ? 3'b110 : 3'b111; assign load_mr0[7] = 1'b0; assign load_mr0[8] = 1'b1; // Reset DLL (init only) assign load_mr0[11:9] = (TWR_CYC == 2) ? 3'b001 : (TWR_CYC == 3) ? 3'b010 : (TWR_CYC == 4) ? 3'b011 : (TWR_CYC == 5) ? 3'b100 : (TWR_CYC == 6) ? 3'b101 : 3'b010; assign load_mr0[15:12]= 4'b0000; // Reserved end endgenerate //***************************************************************** // DDR3 Load mode reg1 // Mode Register (MR1): // [15:13] - unused - 00 // [12] - output enable - 0 (enabled for DQ, DQS, DQS#) // [11] - TDQS enable - 0 (TDQS disabled and DM enabled) // [10] - reserved - 0 (must be '0') // [9] - RTT[2] - 0 // [8] - reserved - 0 (must be '0') // [7] - write leveling - 0 (disabled), 1 (enabled) // [6] - RTT[1] - RTT[1:0] = 0(no ODT), 1(75), 2(150), 3(50) // [5] - Output driver impedance[1] - 0 (RZQ/6 and RZQ/7) // [4:3] - Additive CAS - ADDITIVE_CAS // [2] - RTT[0] // [1] - Output driver impedance[0] - 0(RZQ/6), or 1 (RZQ/7) // [0] - DLL enable - 0 (normal) // DDR2 ext mode register // Extended Mode Register (MR): // [15:14] - unused - 00 // [13] - reserved - 0 // [12] - output enable - 0 (enabled) // [11] - RDQS enable - 0 (disabled) // [10] - DQS# enable - 0 (enabled) // [9:7] - OCD Program - 111 or 000 (first 111, then 000 during init) // [6] - RTT[1] - RTT[1:0] = 0(no ODT), 1(75), 2(150), 3(50) // [5:3] - Additive CAS - ADDITIVE_CAS // [2] - RTT[0] // [1] - Output drive - REDUCE_DRV (= 0(full), = 1 (reduced) // [0] - DLL enable - 0 (normal) //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr1_DDR3 assign load_mr1[0] = 1'b0; // DLL enabled during Imitialization assign load_mr1[1] = (OUTPUT_DRV == "LOW") ? 1'b0 : 1'b1; assign load_mr1[2] = ((RTT_NOM_int == "30") || (RTT_NOM_int == "40") || (RTT_NOM_int == "60")) ? 1'b1 : 1'b0; assign load_mr1[4:3] = (AL == "0") ? 2'b00 : (AL == "CL-1") ? 2'b01 : (AL == "CL-2") ? 2'b10 : 2'b11; assign load_mr1[5] = 1'b0; assign load_mr1[6] = ((RTT_NOM_int == "40") || (RTT_NOM_int == "120")) ? 1'b1 : 1'b0; assign load_mr1[7] = 1'b0; // Enable write lvl after init sequence assign load_mr1[8] = 1'b0; assign load_mr1[9] = ((RTT_NOM_int == "20") || (RTT_NOM_int == "30")) ? 1'b1 : 1'b0; assign load_mr1[10] = 1'b0; assign load_mr1[15:11] = 5'b00000; end else if (DRAM_TYPE == "DDR2") begin: gen_load_mr1_DDR2 assign load_mr1[0] = 1'b0; // DLL enabled during Imitialization assign load_mr1[1] = (OUTPUT_DRV == "LOW") ? 1'b1 : 1'b0; assign load_mr1[2] = ((RTT_NOM_int == "75") || (RTT_NOM_int == "50")) ? 1'b1 : 1'b0; assign load_mr1[5:3] = (AL == "0") ? 3'b000 : (AL == "1") ? 3'b001 : (AL == "2") ? 3'b010 : (AL == "3") ? 3'b011 : (AL == "4") ? 3'b100 : 3'b111; assign load_mr1[6] = ((RTT_NOM_int == "50") || (RTT_NOM_int == "150")) ? 1'b1 : 1'b0; assign load_mr1[9:7] = 3'b000; assign load_mr1[10] = (DDR2_DQSN_ENABLE == "YES") ? 1'b0 : 1'b1; assign load_mr1[15:11] = 5'b00000; end endgenerate //***************************************************************** // DDR3 Load mode reg2 // Mode Register (MR2): // [15:11] - unused - 00 // [10:9] - RTT_WR - 00 (Dynamic ODT off) // [8] - reserved - 0 (must be '0') // [7] - self-refresh temperature range - // 0 (normal), 1 (extended) // [6] - Auto Self-Refresh - 0 (manual), 1(auto) // [5:3] - CAS Write Latency (CWL) - // 000 (5 for 400 MHz device), // 001 (6 for 400 MHz to 533 MHz devices), // 010 (7 for 533 MHz to 667 MHz devices), // 011 (8 for 667 MHz to 800 MHz) // [2:0] - Partial Array Self-Refresh (Optional) - // 000 (full array) // Not used for DDR2 //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr2_DDR3 assign load_mr2[2:0] = 3'b000; assign load_mr2[5:3] = (nCWL == 5) ? 3'b000 : (nCWL == 6) ? 3'b001 : (nCWL == 7) ? 3'b010 : (nCWL == 8) ? 3'b011 : (nCWL == 9) ? 3'b100 : (nCWL == 10) ? 3'b101 : (nCWL == 11) ? 3'b110 : 3'b111; assign load_mr2[6] = 1'b0; assign load_mr2[7] = 1'b0; assign load_mr2[8] = 1'b0; // Dynamic ODT disabled assign load_mr2[10:9] = 2'b00; assign load_mr2[15:11] = 5'b00000; end else begin: gen_load_mr2_DDR2 assign load_mr2[15:0] = 16'd0; end endgenerate //***************************************************************** // DDR3 Load mode reg3 // Mode Register (MR3): // [15:3] - unused - All zeros // [2] - MPR Operation - 0(normal operation), 1(data flow from MPR) // [1:0] - MPR location - 00 (Predefined pattern) //***************************************************************** assign load_mr3[1:0] = 2'b00; assign load_mr3[2] = 1'b0; assign load_mr3[15:3] = 13'b0000000000000; // For multi-rank systems the rank being accessed during writes in // Read Leveling must be sent to phy_write for the bitslip logic assign calib_rank_cnt = chip_cnt_r; //*************************************************************************** // Logic to begin initial calibration, and to handle precharge requests // during read-leveling (to avoid tRAS violations if individual read // levelling calibration stages take more than max{tRAS) to complete). //*************************************************************************** // Assert when readback for each stage of read-leveling begins. However, // note this indicates only when the read command is issued and when // Phaser_IN has phase aligned FREQ_REF clock to read DQS. It does not // indicate when the read data is present on the bus (when this happens // after the read command is issued depends on CAS LATENCY) - there will // need to be some delay before valid data is present on the bus. // assign rdlvl_start_pre = (init_state_r == INIT_PI_PHASELOCK_READS); // Assert when read back for oclkdelay calibration begins assign oclkdelay_calib_start_pre = (init_state_r == INIT_OCLKDELAY_READ); // Assert when read back for write calibration begins assign wrcal_start_pre = (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS); // Common precharge signal done signal - pulses only when there has been // a precharge issued as a result of a PRECH_REQ pulse. Note also a common // PRECH_DONE signal is used for all blocks assign prech_done_pre = (((init_state_r == INIT_RDLVL_STG1_READ) || ((rdlvl_last_byte_done_r || prbs_last_byte_done_r) && (init_state_r == INIT_RDLVL_ACT_WAIT) && cnt_cmd_done_r) || (dqs_found_prech_req && (init_state_r == INIT_RDLVL_ACT_WAIT)) || (init_state_r == INIT_MPR_RDEN) || ((init_state_r == INIT_WRCAL_ACT_WAIT) && cnt_cmd_done_r) || ((init_state_r == INIT_OCLKDELAY_ACT_WAIT) && cnt_cmd_done_r) || (wrlvl_final && (init_state_r == INIT_REFRESH_WAIT) && cnt_cmd_done_r && ~oclkdelay_calib_done)) && prech_pending_r && !prech_req_posedge_r); always @(posedge clk) if (rst) pi_phaselock_start <= #TCQ 1'b0; else if (init_state_r == INIT_PI_PHASELOCK_READS) pi_phaselock_start <= #TCQ 1'b1; // Delay start of each calibration by 16 clock cycles to ensure that when // calibration logic begins, read data is already appearing on the bus. // Each circuit should synthesize using an SRL16. Assume that reset is // long enough to clear contents of SRL16. always @(posedge clk) begin rdlvl_last_byte_done_r <= #TCQ rdlvl_last_byte_done; prbs_last_byte_done_r <= #TCQ prbs_last_byte_done; rdlvl_start_dly0_r <= #TCQ {rdlvl_start_dly0_r[14:0], rdlvl_start_pre}; wrcal_start_dly_r <= #TCQ {wrcal_start_dly_r[14:0], wrcal_start_pre}; oclkdelay_start_dly_r <= #TCQ {oclkdelay_start_dly_r[14:0], oclkdelay_calib_start_pre}; prech_done_dly_r <= #TCQ {prech_done_dly_r[14:0], prech_done_pre}; end always @(posedge clk) prech_done <= #TCQ prech_done_dly_r[15]; always @(posedge clk) if (rst) mpr_rdlvl_start <= #TCQ 1'b0; else if (pi_dqs_found_done && (init_state_r == INIT_MPR_READ)) mpr_rdlvl_start <= #TCQ 1'b1; always @(posedge clk) phy_if_empty_r <= #TCQ phy_if_empty; always @(posedge clk) if (rst || (phy_if_empty_r && prbs_rdlvl_prech_req) || ((stg1_wr_rd_cnt == 'd1) && ~stg1_wr_done) || prbs_rdlvl_done) prbs_gen_clk_en <= #TCQ 1'b0; else if ((~phy_if_empty_r && rdlvl_stg1_done_r1 && ~prbs_rdlvl_done) || ((init_state_r == INIT_RDLVL_ACT_WAIT) && rdlvl_stg1_done_r1 && (cnt_cmd_r == 'd0))) prbs_gen_clk_en <= #TCQ 1'b1; generate if (RANKS < 2) begin always @(posedge clk) if (rst) begin rdlvl_stg1_start <= #TCQ 1'b0; rdlvl_stg1_start_int <= #TCQ 1'b0; rdlvl_start_pre <= #TCQ 1'b0; prbs_rdlvl_start <= #TCQ 1'b0; end else begin if (pi_dqs_found_done && cnt_cmd_done_r && (init_state_r == INIT_RDLVL_ACT_WAIT)) rdlvl_stg1_start_int <= #TCQ 1'b1; if (pi_dqs_found_done && (init_state_r == INIT_RDLVL_STG1_READ))begin rdlvl_start_pre <= #TCQ 1'b1; rdlvl_stg1_start <= #TCQ rdlvl_start_dly0_r[14]; end if (pi_dqs_found_done && rdlvl_stg1_done && (init_state_r == INIT_RDLVL_STG1_READ) && (WRLVL == "ON")) begin prbs_rdlvl_start <= #TCQ 1'b1; end end end else begin always @(posedge clk) if (rst || rdlvl_stg1_rank_done) begin rdlvl_stg1_start <= #TCQ 1'b0; rdlvl_stg1_start_int <= #TCQ 1'b0; rdlvl_start_pre <= #TCQ 1'b0; prbs_rdlvl_start <= #TCQ 1'b0; end else begin if (pi_dqs_found_done && cnt_cmd_done_r && (init_state_r == INIT_RDLVL_ACT_WAIT)) rdlvl_stg1_start_int <= #TCQ 1'b1; if (pi_dqs_found_done && (init_state_r == INIT_RDLVL_STG1_READ))begin rdlvl_start_pre <= #TCQ 1'b1; rdlvl_stg1_start <= #TCQ rdlvl_start_dly0_r[14]; end if (pi_dqs_found_done && rdlvl_stg1_done && (init_state_r == INIT_RDLVL_STG1_READ) && (WRLVL == "ON")) begin prbs_rdlvl_start <= #TCQ 1'b1; end end end endgenerate always @(posedge clk) begin if (rst || dqsfound_retry || wrlvl_byte_redo) begin pi_dqs_found_start <= #TCQ 1'b0; wrcal_start <= #TCQ 1'b0; end else begin if (!pi_dqs_found_done && init_state_r == INIT_RDLVL_STG2_READ) pi_dqs_found_start <= #TCQ 1'b1; if (wrcal_start_dly_r[5]) wrcal_start <= #TCQ 1'b1; end end // else: !if(rst) always @(posedge clk) if (rst) oclkdelay_calib_start <= #TCQ 1'b0; else if (oclkdelay_start_dly_r[5]) oclkdelay_calib_start <= #TCQ 1'b1; always @(posedge clk) if (rst) pi_dqs_found_done_r1 <= #TCQ 1'b0; else pi_dqs_found_done_r1 <= #TCQ pi_dqs_found_done; always @(posedge clk) wrlvl_final_r <= #TCQ wrlvl_final; // Reset IN_FIFO after final write leveling to make sure the FIFO // pointers are initialized always @(posedge clk) if (rst || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_REFRESH)) wrlvl_final_if_rst <= #TCQ 1'b0; else if (wrlvl_done_r && //(wrlvl_final_r && wrlvl_done_r && (init_state_r == INIT_WRLVL_LOAD_MR2)) wrlvl_final_if_rst <= #TCQ 1'b1; // Constantly enable DQS while write leveling is enabled in the memory // This is more to get rid of warnings in simulation, can later change // this code to only enable WRLVL_ACTIVE when WRLVL_START is asserted always @(posedge clk) if (rst || ((init_state_r1 != INIT_WRLVL_START) && (init_state_r == INIT_WRLVL_START))) wrlvl_odt_ctl <= #TCQ 1'b0; else if (wrlvl_rank_done && ~wrlvl_rank_done_r1) wrlvl_odt_ctl <= #TCQ 1'b1; generate if (nCK_PER_CLK == 4) begin: en_cnt_div4 always @ (posedge clk) if (rst) enable_wrlvl_cnt <= #TCQ 5'd0; else if ((init_state_r == INIT_WRLVL_START) || (wrlvl_odt && (enable_wrlvl_cnt == 5'd0))) enable_wrlvl_cnt <= #TCQ 5'd12; else if ((enable_wrlvl_cnt > 5'd0) && ~(phy_ctl_full || phy_cmd_full)) enable_wrlvl_cnt <= #TCQ enable_wrlvl_cnt - 1; // ODT stays asserted as long as write_calib // signal is asserted always @(posedge clk) if (rst || wrlvl_odt_ctl) wrlvl_odt <= #TCQ 1'b0; else if (enable_wrlvl_cnt == 5'd1) wrlvl_odt <= #TCQ 1'b1; end else begin: en_cnt_div2 always @ (posedge clk) if (rst) enable_wrlvl_cnt <= #TCQ 5'd0; else if ((init_state_r == INIT_WRLVL_START) || (wrlvl_odt && (enable_wrlvl_cnt == 5'd0))) enable_wrlvl_cnt <= #TCQ 5'd21; else if ((enable_wrlvl_cnt > 5'd0) && ~(phy_ctl_full || phy_cmd_full)) enable_wrlvl_cnt <= #TCQ enable_wrlvl_cnt - 1; // ODT stays asserted as long as write_calib // signal is asserted always @(posedge clk) if (rst || wrlvl_odt_ctl) wrlvl_odt <= #TCQ 1'b0; else if (enable_wrlvl_cnt == 5'd1) wrlvl_odt <= #TCQ 1'b1; end endgenerate always @(posedge clk) if (rst || wrlvl_rank_done || done_dqs_tap_inc) wrlvl_active <= #TCQ 1'b0; else if ((enable_wrlvl_cnt == 5'd1) && wrlvl_odt && !wrlvl_active) wrlvl_active <= #TCQ 1'b1; // signal used to assert DQS for write leveling. // the DQS will be asserted once every 16 clock cycles. always @(posedge clk)begin if(rst || (enable_wrlvl_cnt != 5'd1)) begin wr_level_dqs_asrt <= #TCQ 1'd0; end else if ((enable_wrlvl_cnt == 5'd1) && (wrlvl_active_r1)) begin wr_level_dqs_asrt <= #TCQ 1'd1; end end always @ (posedge clk) begin if (rst || (wrlvl_done_r && ~wrlvl_done_r1)) dqs_asrt_cnt <= #TCQ 2'd0; else if (wr_level_dqs_asrt && dqs_asrt_cnt != 2'd3) dqs_asrt_cnt <= #TCQ (dqs_asrt_cnt + 1); end always @ (posedge clk) begin if (rst || ~wrlvl_active) wr_lvl_start <= #TCQ 1'd0; else if (dqs_asrt_cnt == 2'd3) wr_lvl_start <= #TCQ 1'd1; end always @(posedge clk) begin if (rst) wl_sm_start <= #TCQ 1'b0; else wl_sm_start <= #TCQ wr_level_dqs_asrt_r1; end always @(posedge clk) begin wrlvl_active_r1 <= #TCQ wrlvl_active; wr_level_dqs_asrt_r1 <= #TCQ wr_level_dqs_asrt; wrlvl_done_r <= #TCQ wrlvl_done; wrlvl_done_r1 <= #TCQ wrlvl_done_r; wrlvl_rank_done_r1 <= #TCQ wrlvl_rank_done; wrlvl_rank_done_r2 <= #TCQ wrlvl_rank_done_r1; wrlvl_rank_done_r3 <= #TCQ wrlvl_rank_done_r2; wrlvl_rank_done_r4 <= #TCQ wrlvl_rank_done_r3; wrlvl_rank_done_r5 <= #TCQ wrlvl_rank_done_r4; wrlvl_rank_done_r6 <= #TCQ wrlvl_rank_done_r5; wrlvl_rank_done_r7 <= #TCQ wrlvl_rank_done_r6; end always @ (posedge clk) begin //if (rst) wrlvl_rank_cntr <= #TCQ 3'd0; //else if (wrlvl_rank_done) // wrlvl_rank_cntr <= #TCQ wrlvl_rank_cntr + 1'b1; end //***************************************************************** // Precharge request logic - those calibration logic blocks // that require greater than tRAS(max) to finish must break up // their calibration into smaller units of time, with precharges // issued in between. This is done using the XXX_PRECH_REQ and // PRECH_DONE handshaking between PHY_INIT and those blocks //***************************************************************** // Shared request from multiple sources assign prech_req = oclk_prech_req | rdlvl_prech_req | wrcal_prech_req | prbs_rdlvl_prech_req | (dqs_found_prech_req & (init_state_r == INIT_RDLVL_STG2_READ_WAIT)); // Handshaking logic to force precharge during read leveling, and to // notify read leveling logic when precharge has been initiated and // it's okay to proceed with leveling again always @(posedge clk) if (rst) begin prech_req_r <= #TCQ 1'b0; prech_req_posedge_r <= #TCQ 1'b0; prech_pending_r <= #TCQ 1'b0; end else begin prech_req_r <= #TCQ prech_req; prech_req_posedge_r <= #TCQ prech_req & ~prech_req_r; if (prech_req_posedge_r) prech_pending_r <= #TCQ 1'b1; // Clear after we've finished with the precharge and have // returned to issuing read leveling calibration reads else if (prech_done_pre) prech_pending_r <= #TCQ 1'b0; end //*************************************************************************** // Various timing counters //*************************************************************************** //***************************************************************** // Generic delay for various states that require it (e.g. for turnaround // between read and write). Make this a sufficiently large number of clock // cycles to cover all possible frequencies and memory components) // Requirements for this counter: // 1. Greater than tMRD // 2. tRFC (refresh-active) for DDR2 // 3. (list the other requirements, slacker...) //***************************************************************** always @(posedge clk) begin case (init_state_r) INIT_LOAD_MR_WAIT, INIT_WRLVL_LOAD_MR_WAIT, INIT_WRLVL_LOAD_MR2_WAIT, INIT_MPR_WAIT, INIT_MPR_DISABLE_PREWAIT, INIT_MPR_DISABLE_WAIT, INIT_OCLKDELAY_ACT_WAIT, INIT_OCLKDELAY_WRITE_WAIT, INIT_RDLVL_ACT_WAIT, INIT_RDLVL_STG1_WRITE_READ, INIT_RDLVL_STG2_READ_WAIT, INIT_WRCAL_ACT_WAIT, INIT_WRCAL_WRITE_READ, INIT_WRCAL_READ_WAIT, INIT_PRECHARGE_PREWAIT, INIT_PRECHARGE_WAIT, INIT_DDR2_PRECHARGE_WAIT, INIT_REG_WRITE_WAIT, INIT_REFRESH_WAIT, INIT_REFRESH_RNK2_WAIT: begin if (phy_ctl_full || phy_cmd_full) cnt_cmd_r <= #TCQ cnt_cmd_r; else cnt_cmd_r <= #TCQ cnt_cmd_r + 1; end INIT_WRLVL_WAIT: cnt_cmd_r <= #TCQ 'b0; default: cnt_cmd_r <= #TCQ 'b0; endcase end // pulse when count reaches terminal count always @(posedge clk) cnt_cmd_done_r <= #TCQ (cnt_cmd_r == CNTNEXT_CMD); // For ODT deassertion - hold throughout post read/write wait stage, but // deassert before next command. The post read/write stage is very long, so // we simply address the longest case here plus some margin. always @(posedge clk) cnt_cmd_done_m7_r <= #TCQ (cnt_cmd_r == (CNTNEXT_CMD - 7)); //************************************************************************ // Added to support PO fine delay inc when TG errors always @(posedge clk) begin case (init_state_r) INIT_WRCAL_READ_WAIT: begin if (phy_ctl_full || phy_cmd_full) cnt_wait <= #TCQ cnt_wait; else cnt_wait <= #TCQ cnt_wait + 1; end default: cnt_wait <= #TCQ 'b0; endcase end always @(posedge clk) cnt_wrcal_rd <= #TCQ (cnt_wait == 'd4); always @(posedge clk) begin if (rst || ~temp_wrcal_done) temp_lmr_done <= #TCQ 1'b0; else if (temp_wrcal_done && (init_state_r == INIT_LOAD_MR)) temp_lmr_done <= #TCQ 1'b1; end always @(posedge clk) temp_wrcal_done_r <= #TCQ temp_wrcal_done; always @(posedge clk) if (rst) begin tg_timer_go <= #TCQ 1'b0; end else if ((PRE_REV3ES == "ON") && temp_wrcal_done && temp_lmr_done && (init_state_r == INIT_WRCAL_READ_WAIT)) begin tg_timer_go <= #TCQ 1'b1; end else begin tg_timer_go <= #TCQ 1'b0; end always @(posedge clk) begin if (rst || (temp_wrcal_done && ~temp_wrcal_done_r) || (init_state_r == INIT_PRECHARGE_PREWAIT)) tg_timer <= #TCQ 'd0; else if ((pi_phaselock_timer == PHASELOCKED_TIMEOUT) && tg_timer_go && (tg_timer != TG_TIMER_TIMEOUT)) tg_timer <= #TCQ tg_timer + 1; end always @(posedge clk) begin if (rst) tg_timer_done <= #TCQ 1'b0; else if (tg_timer == TG_TIMER_TIMEOUT) tg_timer_done <= #TCQ 1'b1; else tg_timer_done <= #TCQ 1'b0; end always @(posedge clk) begin if (rst) no_rst_tg_mc <= #TCQ 1'b0; else if ((init_state_r == INIT_WRCAL_ACT) && wrcal_read_req) no_rst_tg_mc <= #TCQ 1'b1; else no_rst_tg_mc <= #TCQ 1'b0; end //************************************************************************ always @(posedge clk) begin if (rst) detect_pi_found_dqs <= #TCQ 1'b0; else if ((cnt_cmd_r == 7'b0111111) && (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) detect_pi_found_dqs <= #TCQ 1'b1; else detect_pi_found_dqs <= #TCQ 1'b0; end //***************************************************************** // Initial delay after power-on for RESET, CKE // NOTE: Could reduce power consumption by turning off these counters // after initial power-up (at expense of more logic) // NOTE: Likely can combine multiple counters into single counter //***************************************************************** // Create divided by 1024 version of clock always @(posedge clk) if (rst) begin cnt_pwron_ce_r <= #TCQ 10'h000; pwron_ce_r <= #TCQ 1'b0; end else begin cnt_pwron_ce_r <= #TCQ cnt_pwron_ce_r + 1; pwron_ce_r <= #TCQ (cnt_pwron_ce_r == 10'h3FF); end // "Main" power-on counter - ticks every CLKDIV/1024 cycles always @(posedge clk) if (rst) cnt_pwron_r <= #TCQ 'b0; else if (pwron_ce_r) cnt_pwron_r <= #TCQ cnt_pwron_r + 1; always @(posedge clk) if (rst || ~phy_ctl_ready) begin cnt_pwron_reset_done_r <= #TCQ 1'b0; cnt_pwron_cke_done_r <= #TCQ 1'b0; end else begin // skip power-up count for simulation purposes only if ((SIM_INIT_OPTION == "SKIP_PU_DLY") || (SIM_INIT_OPTION == "SKIP_INIT")) begin cnt_pwron_reset_done_r <= #TCQ 1'b1; cnt_pwron_cke_done_r <= #TCQ 1'b1; end else begin // otherwise, create latched version of done signal for RESET, CKE if (DRAM_TYPE == "DDR3") begin if (!cnt_pwron_reset_done_r) cnt_pwron_reset_done_r <= #TCQ (cnt_pwron_r == PWRON_RESET_DELAY_CNT); if (!cnt_pwron_cke_done_r) cnt_pwron_cke_done_r <= #TCQ (cnt_pwron_r == PWRON_CKE_DELAY_CNT); end else begin // DDR2 cnt_pwron_reset_done_r <= #TCQ 1'b1; // not needed if (!cnt_pwron_cke_done_r) cnt_pwron_cke_done_r <= #TCQ (cnt_pwron_r == PWRON_CKE_DELAY_CNT); end end end // else: !if(rst || ~phy_ctl_ready) always @(posedge clk) cnt_pwron_cke_done_r1 <= #TCQ cnt_pwron_cke_done_r; // Keep RESET asserted and CKE deasserted until after power-on delay always @(posedge clk or posedge rst) begin if (rst) phy_reset_n <= #TCQ 1'b0; else phy_reset_n <= #TCQ cnt_pwron_reset_done_r; // phy_cke <= #TCQ {CKE_WIDTH{cnt_pwron_cke_done_r}}; end //***************************************************************** // Counter for tXPR (pronouned "Tax-Payer") - wait time after // CKE deassertion before first MRS command can be asserted //***************************************************************** always @(posedge clk) if (!cnt_pwron_cke_done_r) begin cnt_txpr_r <= #TCQ 'b0; cnt_txpr_done_r <= #TCQ 1'b0; end else begin cnt_txpr_r <= #TCQ cnt_txpr_r + 1; if (!cnt_txpr_done_r) cnt_txpr_done_r <= #TCQ (cnt_txpr_r == TXPR_DELAY_CNT); end //***************************************************************** // Counter for the initial 400ns wait for issuing precharge all // command after CKE assertion. Only for DDR2. //***************************************************************** always @(posedge clk) if (!cnt_pwron_cke_done_r) begin cnt_init_pre_wait_r <= #TCQ 'b0; cnt_init_pre_wait_done_r <= #TCQ 1'b0; end else begin cnt_init_pre_wait_r <= #TCQ cnt_init_pre_wait_r + 1; if (!cnt_init_pre_wait_done_r) cnt_init_pre_wait_done_r <= #TCQ (cnt_init_pre_wait_r >= DDR2_INIT_PRE_CNT); end //***************************************************************** // Wait for both DLL to lock (tDLLK) and ZQ calibration to finish // (tZQINIT). Both take the same amount of time (512*tCK) //***************************************************************** always @(posedge clk) if (init_state_r == INIT_ZQCL) begin cnt_dllk_zqinit_r <= #TCQ 'b0; cnt_dllk_zqinit_done_r <= #TCQ 1'b0; end else if (~(phy_ctl_full || phy_cmd_full)) begin cnt_dllk_zqinit_r <= #TCQ cnt_dllk_zqinit_r + 1; if (!cnt_dllk_zqinit_done_r) cnt_dllk_zqinit_done_r <= #TCQ (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT); end //***************************************************************** // Keep track of which MRS counter needs to be programmed during // memory initialization // The counter and the done signal are reset an additional time // for DDR2. The same signals are used for the additional DDR2 // initialization sequence. //***************************************************************** always @(posedge clk) if ((init_state_r == INIT_IDLE)|| ((init_state_r == INIT_REFRESH) && (~mem_init_done_r))) begin cnt_init_mr_r <= #TCQ 'b0; cnt_init_mr_done_r <= #TCQ 1'b0; end else if (init_state_r == INIT_LOAD_MR) begin cnt_init_mr_r <= #TCQ cnt_init_mr_r + 1; cnt_init_mr_done_r <= #TCQ (cnt_init_mr_r == INIT_CNT_MR_DONE); end //***************************************************************** // Flag to tell if the first precharge for DDR2 init sequence is // done //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) ddr2_pre_flag_r<= #TCQ 'b0; else if (init_state_r == INIT_LOAD_MR) ddr2_pre_flag_r<= #TCQ 1'b1; // reset the flag for multi rank case else if ((ddr2_refresh_flag_r) && (init_state_r == INIT_LOAD_MR_WAIT)&& (cnt_cmd_done_r) && (cnt_init_mr_done_r)) ddr2_pre_flag_r <= #TCQ 'b0; //***************************************************************** // Flag to tell if the refresh stat for DDR2 init sequence is // reached //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) ddr2_refresh_flag_r<= #TCQ 'b0; else if ((init_state_r == INIT_REFRESH) && (~mem_init_done_r)) // reset the flag for multi rank case ddr2_refresh_flag_r<= #TCQ 1'b1; else if ((ddr2_refresh_flag_r) && (init_state_r == INIT_LOAD_MR_WAIT)&& (cnt_cmd_done_r) && (cnt_init_mr_done_r)) ddr2_refresh_flag_r <= #TCQ 'b0; //***************************************************************** // Keep track of the number of auto refreshes for DDR2 // initialization. The spec asks for a minimum of two refreshes. // Four refreshes are performed here. The two extra refreshes is to // account for the 200 clock cycle wait between step h and l. // Without the two extra refreshes we would have to have a // wait state. //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) begin cnt_init_af_r <= #TCQ 'b0; cnt_init_af_done_r <= #TCQ 1'b0; end else if ((init_state_r == INIT_REFRESH) && (~mem_init_done_r))begin cnt_init_af_r <= #TCQ cnt_init_af_r + 1; cnt_init_af_done_r <= #TCQ (cnt_init_af_r == 2'b11); end //***************************************************************** // Keep track of the register control word programming for // DDR3 RDIMM //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) reg_ctrl_cnt_r <= #TCQ 'b0; else if (init_state_r == INIT_REG_WRITE) reg_ctrl_cnt_r <= #TCQ reg_ctrl_cnt_r + 1; generate if (RANKS < 2) begin: one_rank always @(posedge clk) if ((init_state_r == INIT_IDLE) || rdlvl_last_byte_done) stg1_wr_done <= #TCQ 1'b0; else if (init_state_r == INIT_RDLVL_STG1_WRITE_READ) stg1_wr_done <= #TCQ 1'b1; end else begin: two_ranks always @(posedge clk) if ((init_state_r == INIT_IDLE) || rdlvl_last_byte_done || (rdlvl_stg1_rank_done )) stg1_wr_done <= #TCQ 1'b0; else if (init_state_r == INIT_RDLVL_STG1_WRITE_READ) stg1_wr_done <= #TCQ 1'b1; end endgenerate always @(posedge clk) if (rst) rnk_ref_cnt <= #TCQ 1'b0; else if (stg1_wr_done && (init_state_r == INIT_REFRESH_WAIT) && cnt_cmd_done_r) rnk_ref_cnt <= #TCQ ~rnk_ref_cnt; always @(posedge clk) if (rst || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_RDLVL_ACT)) num_refresh <= #TCQ 'd0; else if ((init_state_r == INIT_REFRESH) && (~pi_dqs_found_done || ((DRAM_TYPE == "DDR3") && ~oclkdelay_calib_done) || (rdlvl_stg1_done && ~prbs_rdlvl_done) || ((CLK_PERIOD/nCK_PER_CLK <= 2500) && wrcal_done && ~rdlvl_stg1_done) || ((CLK_PERIOD/nCK_PER_CLK > 2500) && wrlvl_done_r1 && ~rdlvl_stg1_done))) num_refresh <= #TCQ num_refresh + 1; //*************************************************************************** // Initialization state machine //*************************************************************************** //***************************************************************** // Next-state logic //***************************************************************** always @(posedge clk) if (rst)begin init_state_r <= #TCQ INIT_IDLE; init_state_r1 <= #TCQ INIT_IDLE; end else begin init_state_r <= #TCQ init_next_state; init_state_r1 <= #TCQ init_state_r; end always @(burst_addr_r or chip_cnt_r or cnt_cmd_done_r or cnt_dllk_zqinit_done_r or cnt_init_af_done_r or cnt_init_mr_done_r or phy_ctl_ready or phy_ctl_full or stg1_wr_done or rdlvl_last_byte_done or phy_cmd_full or num_reads or rnk_ref_cnt or mpr_last_byte_done or oclk_wr_cnt or mpr_rdlvl_done or mpr_rnk_done or num_refresh or oclkdelay_calib_done or oclk_prech_req or oclk_calib_resume or wrlvl_byte_redo or wrlvl_byte_done or wrlvl_final or wrlvl_final_r or cnt_init_pre_wait_done_r or cnt_pwron_cke_done_r or delay_incdec_done or wrcal_wr_cnt or ck_addr_cmd_delay_done or wrcal_read_req or wrcal_reads or cnt_wrcal_rd or wrcal_act_req or temp_wrcal_done or temp_lmr_done or cnt_txpr_done_r or ddr2_pre_flag_r or ddr2_refresh_flag_r or ddr3_lm_done_r or init_state_r or mem_init_done_r or dqsfound_retry or dqs_found_prech_req or prech_req_posedge_r or prech_req_r or wrcal_done or wrcal_resume_r or rdlvl_stg1_done or rdlvl_stg1_done_r1 or rdlvl_stg1_rank_done or rdlvl_stg1_start_int or prbs_rdlvl_done or prbs_last_byte_done or prbs_rdlvl_done_r1 or stg1_wr_rd_cnt or rdlvl_prech_req or wrcal_prech_req or read_calib_int or read_calib_r or pi_calib_done_r1 or pi_phase_locked_all_r3 or pi_phase_locked_all_r4 or pi_dqs_found_done or pi_dqs_found_rank_done or pi_dqs_found_start or reg_ctrl_cnt_r or wrlvl_done_r1 or wrlvl_rank_done_r7 or wrcal_final_chk or wrcal_sanity_chk_done) begin init_next_state = init_state_r; (* full_case, parallel_case *) case (init_state_r) //******************************************************* // DRAM initialization //******************************************************* // Initial state - wait for: // 1. Power-on delays to pass // 2. PHY Control Block to assert phy_ctl_ready // 3. PHY Control FIFO must not be FULL // 4. Read path initialization to finish INIT_IDLE: if (cnt_pwron_cke_done_r && phy_ctl_ready && ck_addr_cmd_delay_done && delay_incdec_done && ~(phy_ctl_full || phy_cmd_full) ) begin // If skipping memory initialization (simulation only) if (SIM_INIT_OPTION == "SKIP_INIT") //if (WRLVL == "ON") // Proceed to write leveling // init_next_state = INIT_WRLVL_START; //else //if (SIM_CAL_OPTION != "SKIP_CAL") // Proceed to Phaser_In phase lock init_next_state = INIT_RDLVL_ACT; // else // Skip read leveling //init_next_state = INIT_DONE; else init_next_state = INIT_WAIT_CKE_EXIT; end // Wait minimum of Reset CKE exit time (tXPR = max(tXS, INIT_WAIT_CKE_EXIT: if ((cnt_txpr_done_r) && (DRAM_TYPE == "DDR3") && ~(phy_ctl_full || phy_cmd_full)) begin if((REG_CTRL == "ON") && ((nCS_PER_RANK > 1) || (RANKS > 1))) //register write for reg dimm. Some register chips // have the register chip in a pre-programmed state // in that case the nCS_PER_RANK == 1 && RANKS == 1 init_next_state = INIT_REG_WRITE; else // Load mode register - this state is repeated multiple times init_next_state = INIT_LOAD_MR; end else if ((cnt_init_pre_wait_done_r) && (DRAM_TYPE == "DDR2") && ~(phy_ctl_full || phy_cmd_full)) // DDR2 start with a precharge all command init_next_state = INIT_DDR2_PRECHARGE; INIT_REG_WRITE: init_next_state = INIT_REG_WRITE_WAIT; INIT_REG_WRITE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if(reg_ctrl_cnt_r == 3'd5) init_next_state = INIT_LOAD_MR; else init_next_state = INIT_REG_WRITE; end INIT_LOAD_MR: init_next_state = INIT_LOAD_MR_WAIT; // After loading MR, wait at least tMRD INIT_LOAD_MR_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin // If finished loading all mode registers, proceed to next step if (prbs_rdlvl_done && pi_dqs_found_done && rdlvl_stg1_done) // for ddr3 when the correct burst length is writtern at end init_next_state = INIT_PRECHARGE; else if (~wrcal_done && temp_lmr_done) init_next_state = INIT_PRECHARGE_PREWAIT; else if (cnt_init_mr_done_r)begin if(DRAM_TYPE == "DDR3") init_next_state = INIT_ZQCL; else begin //DDR2 if(ddr2_refresh_flag_r)begin // memory initialization per rank for multi-rank case if (!mem_init_done_r && (chip_cnt_r <= RANKS-1)) init_next_state = INIT_DDR2_MULTI_RANK; else init_next_state = INIT_RDLVL_ACT; // ddr2 initialization done.load mode state after refresh end else init_next_state = INIT_DDR2_PRECHARGE; end end else init_next_state = INIT_LOAD_MR; end // DDR2 multi rank transition state INIT_DDR2_MULTI_RANK: init_next_state = INIT_DDR2_MULTI_RANK_WAIT; INIT_DDR2_MULTI_RANK_WAIT: init_next_state = INIT_DDR2_PRECHARGE; // Initial ZQ calibration INIT_ZQCL: init_next_state = INIT_WAIT_DLLK_ZQINIT; // Wait until both DLL have locked, and ZQ calibration done INIT_WAIT_DLLK_ZQINIT: if (cnt_dllk_zqinit_done_r && ~(phy_ctl_full || phy_cmd_full)) // memory initialization per rank for multi-rank case if (!mem_init_done_r && (chip_cnt_r <= RANKS-1)) init_next_state = INIT_LOAD_MR; //else if (WRLVL == "ON") // init_next_state = INIT_WRLVL_START; else // skip write-leveling (e.g. for DDR2 interface) init_next_state = INIT_RDLVL_ACT; // Initial precharge for DDR2 INIT_DDR2_PRECHARGE: init_next_state = INIT_DDR2_PRECHARGE_WAIT; INIT_DDR2_PRECHARGE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if (ddr2_pre_flag_r) init_next_state = INIT_REFRESH; else // from precharge state initially go to load mode init_next_state = INIT_LOAD_MR; end INIT_REFRESH: if ((RANKS == 2) && (chip_cnt_r == RANKS - 1)) init_next_state = INIT_REFRESH_RNK2_WAIT; else init_next_state = INIT_REFRESH_WAIT; INIT_REFRESH_RNK2_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_PRECHARGE; INIT_REFRESH_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full))begin if(cnt_init_af_done_r && (~mem_init_done_r)) // go to lm state as part of DDR2 init sequence init_next_state = INIT_LOAD_MR; else if (pi_dqs_found_done && ~wrlvl_done_r1 && ~wrlvl_final && ~wrlvl_byte_redo && (WRLVL == "ON")) init_next_state = INIT_WRLVL_START; else if (~pi_dqs_found_done || (rdlvl_stg1_done && ~prbs_rdlvl_done) || ((CLK_PERIOD/nCK_PER_CLK <= 2500) && wrcal_done && ~rdlvl_stg1_done) || ((CLK_PERIOD/nCK_PER_CLK > 2500) && wrlvl_done_r1 && ~rdlvl_stg1_done)) begin if (num_refresh == 'd8) init_next_state = INIT_RDLVL_ACT; else init_next_state = INIT_REFRESH; end else if ((~wrcal_done && wrlvl_byte_redo)&& (DRAM_TYPE == "DDR3") && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRLVL_LOAD_MR2; else if (((prbs_rdlvl_done && rdlvl_stg1_done && pi_dqs_found_done) && (WRLVL == "ON")) && mem_init_done_r && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRCAL_ACT; else if (pi_dqs_found_done && (DRAM_TYPE == "DDR3") && ~(mpr_last_byte_done || mpr_rdlvl_done)) begin if (num_refresh == 'd8) init_next_state = INIT_MPR_RDEN; else init_next_state = INIT_REFRESH; end else if (((~oclkdelay_calib_done && wrlvl_final) || (~wrcal_done && wrlvl_byte_redo)) && (DRAM_TYPE == "DDR3")) init_next_state = INIT_WRLVL_LOAD_MR2; else if (~oclkdelay_calib_done && (mpr_last_byte_done || mpr_rdlvl_done) && (DRAM_TYPE == "DDR3")) begin if (num_refresh == 'd8) init_next_state = INIT_OCLKDELAY_ACT; else init_next_state = INIT_REFRESH; end else if ((~wrcal_done && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK <= 2500)) && pi_dqs_found_done) init_next_state = INIT_WRCAL_ACT; else if (mem_init_done_r) begin if (RANKS < 2) init_next_state = INIT_RDLVL_ACT; else if (stg1_wr_done && ~rnk_ref_cnt && ~rdlvl_stg1_done) init_next_state = INIT_PRECHARGE; else init_next_state = INIT_RDLVL_ACT; end else // to DDR2 init state as part of DDR2 init sequence init_next_state = INIT_REFRESH; end //****************************************************** // Write Leveling //******************************************************* // Enable write leveling in MR1 and start write leveling // for current rank INIT_WRLVL_START: init_next_state = INIT_WRLVL_WAIT; // Wait for both MR load and write leveling to complete // (write leveling should take much longer than MR load..) INIT_WRLVL_WAIT: if (wrlvl_rank_done_r7 && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRLVL_LOAD_MR; // Disable write leveling in MR1 for current rank INIT_WRLVL_LOAD_MR: init_next_state = INIT_WRLVL_LOAD_MR_WAIT; INIT_WRLVL_LOAD_MR_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRLVL_LOAD_MR2; // Load MR2 to set ODT: Dynamic ODT for single rank case // And ODTs for multi-rank case as well INIT_WRLVL_LOAD_MR2: init_next_state = INIT_WRLVL_LOAD_MR2_WAIT; // Wait tMRD before proceeding INIT_WRLVL_LOAD_MR2_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin //if (wrlvl_byte_done) // init_next_state = INIT_PRECHARGE_PREWAIT; // else if ((RANKS == 2) && wrlvl_rank_done_r2) // init_next_state = INIT_WRLVL_LOAD_MR2_WAIT; if (~wrlvl_done_r1) init_next_state = INIT_WRLVL_START; else if (SIM_CAL_OPTION == "SKIP_CAL") // If skip rdlvl, then we're done init_next_state = INIT_DONE; else // Otherwise, proceed to read leveling //init_next_state = INIT_RDLVL_ACT; init_next_state = INIT_PRECHARGE_PREWAIT; end //******************************************************* // Read Leveling //******************************************************* // single row activate. All subsequent read leveling writes and // read will take place in this row INIT_RDLVL_ACT: init_next_state = INIT_RDLVL_ACT_WAIT; // hang out for awhile before issuing subsequent column commands // it's also possible to reach this state at various points // during read leveling - determine what the current stage is INIT_RDLVL_ACT_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin // Just finished an activate. Now either write, read, or precharge // depending on where we are in the training sequence if (!pi_calib_done_r1) init_next_state = INIT_PI_PHASELOCK_READS; else if (!pi_dqs_found_done) // (!pi_dqs_found_start || pi_dqs_found_rank_done)) init_next_state = INIT_RDLVL_STG2_READ; else if (~wrcal_done && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK <= 2500)) init_next_state = INIT_WRCAL_ACT_WAIT; else if ((!rdlvl_stg1_done && ~stg1_wr_done && ~rdlvl_last_byte_done) || (!prbs_rdlvl_done && ~stg1_wr_done && ~prbs_last_byte_done)) begin // Added to avoid rdlvl_stg1 write data pattern at the start of PRBS rdlvl if (!prbs_rdlvl_done && ~stg1_wr_done && rdlvl_last_byte_done) init_next_state = INIT_RDLVL_ACT_WAIT; else init_next_state = INIT_RDLVL_STG1_WRITE; end else if ((!rdlvl_stg1_done && rdlvl_stg1_start_int) || !prbs_rdlvl_done) begin if (rdlvl_last_byte_done || prbs_last_byte_done) // Added to avoid extra reads at the end of read leveling init_next_state = INIT_RDLVL_ACT_WAIT; else // Case 2: If in stage 1, and just precharged after training // previous byte, then continue reading init_next_state = INIT_RDLVL_STG1_READ; end else if ((prbs_rdlvl_done && rdlvl_stg1_done && (RANKS == 1)) && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRCAL_ACT_WAIT; else // Otherwise, if we're finished with calibration, then precharge // the row - silly, because we just opened it - possible to take // this out by adding logic to avoid the ACT in first place. Make // sure that cnt_cmd_done will handle tRAS(min) init_next_state = INIT_PRECHARGE_PREWAIT; end //************************************************** // Back-to-back reads for Phaser_IN Phase locking // DQS to FREQ_REF clock //************************************************** INIT_PI_PHASELOCK_READS: if (pi_phase_locked_all_r3 && ~pi_phase_locked_all_r4) init_next_state = INIT_PRECHARGE_PREWAIT; //********************************************* // Stage 1 read-leveling (write and continuous read) //********************************************* // Write training pattern for stage 1 // PRBS pattern of TBD length INIT_RDLVL_STG1_WRITE: // 4:1 DDR3 BL8 will require all 8 words in 1 DIV4 clock cycle // 2:1 DDR2/DDR3 BL8 will require 2 DIV2 clock cycles for 8 words // 2:1 DDR2 BL4 will require 1 DIV2 clock cycle for 4 words // An entire row worth of writes issued before proceeding to reads // The number of write is (2^column width)/burst length to accomodate // PRBS pattern for window detection. if (stg1_wr_rd_cnt == 9'd1) init_next_state = INIT_RDLVL_STG1_WRITE_READ; // Write-read turnaround INIT_RDLVL_STG1_WRITE_READ: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_RDLVL_STG1_READ; // Continuous read, where interruptible by precharge request from // calibration logic. Also precharges when stage 1 is complete // No precharges when reads provided to Phaser_IN for phase locking // FREQ_REF to read DQS since data integrity is not important. INIT_RDLVL_STG1_READ: if (rdlvl_stg1_rank_done || (rdlvl_stg1_done && ~rdlvl_stg1_done_r1) || prech_req_posedge_r || (prbs_rdlvl_done && ~prbs_rdlvl_done_r1)) init_next_state = INIT_PRECHARGE_PREWAIT; //********************************************* // DQSFOUND calibration (set of 4 reads with gaps) //********************************************* // Read of training data. Note that Stage 2 is not a constant read, // instead there is a large gap between each set of back-to-back reads INIT_RDLVL_STG2_READ: // 4 read commands issued back-to-back if (num_reads == 'b1) init_next_state = INIT_RDLVL_STG2_READ_WAIT; // Wait before issuing the next set of reads. If a precharge request // comes in then handle - this can occur after stage 2 calibration is // completed for a DQS group INIT_RDLVL_STG2_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (pi_dqs_found_rank_done || pi_dqs_found_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; else if (cnt_cmd_done_r) init_next_state = INIT_RDLVL_STG2_READ; end //****************************************************************** // MPR Read Leveling for DDR3 OCLK_DELAYED calibration //****************************************************************** // Issue Load Mode Register 3 command with A[2]=1, A[1:0]=2'b00 // to enable Multi Purpose Register (MPR) Read INIT_MPR_RDEN: init_next_state = INIT_MPR_WAIT; //Wait tMRD, tMOD INIT_MPR_WAIT: if (cnt_cmd_done_r) begin init_next_state = INIT_MPR_READ; end // Issue back-to-back read commands to read from MPR with // Address bus 0x0000 for BL=8. DQ[0] will output the pre-defined // MPR pattern of 01010101 (Rise0 = 1'b0, Fall0 = 1'b1 ...) INIT_MPR_READ: if (mpr_rdlvl_done || mpr_rnk_done || rdlvl_prech_req) init_next_state = INIT_MPR_DISABLE_PREWAIT; INIT_MPR_DISABLE_PREWAIT: if (cnt_cmd_done_r) init_next_state = INIT_MPR_DISABLE; // Issue Load Mode Register 3 command with A[2]=0 to disable // MPR read INIT_MPR_DISABLE: init_next_state = INIT_MPR_DISABLE_WAIT; INIT_MPR_DISABLE_WAIT: init_next_state = INIT_PRECHARGE_PREWAIT; //*********************************************************************** // OCLKDELAY Calibration //*********************************************************************** // This calibration requires single write followed by single read to // determine the Phaser_Out stage 3 delay required to center write DQS // in write DQ valid window. // Single Row Activate command before issuing Write command INIT_OCLKDELAY_ACT: init_next_state = INIT_OCLKDELAY_ACT_WAIT; INIT_OCLKDELAY_ACT_WAIT: if (cnt_cmd_done_r && ~oclk_prech_req) init_next_state = INIT_OCLKDELAY_WRITE; else if (oclkdelay_calib_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; INIT_OCLKDELAY_WRITE: if (oclk_wr_cnt == 4'd1) init_next_state = INIT_OCLKDELAY_WRITE_WAIT; INIT_OCLKDELAY_WRITE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_OCLKDELAY_READ; INIT_OCLKDELAY_READ: init_next_state = INIT_OCLKDELAY_READ_WAIT; INIT_OCLKDELAY_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (oclk_calib_resume) init_next_state = INIT_OCLKDELAY_WRITE; else if (oclkdelay_calib_done || prech_req_posedge_r || wrlvl_final) init_next_state = INIT_PRECHARGE_PREWAIT; end //********************************************* // Write calibration //********************************************* // single row activate INIT_WRCAL_ACT: init_next_state = INIT_WRCAL_ACT_WAIT; // hang out for awhile before issuing subsequent column command INIT_WRCAL_ACT_WAIT: if (cnt_cmd_done_r && ~wrcal_prech_req) init_next_state = INIT_WRCAL_WRITE; else if (wrcal_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; // Write training pattern for write calibration INIT_WRCAL_WRITE: // Once we've issued enough commands for 8 words - proceed to reads //if (burst_addr_r == 1'b1) if (wrcal_wr_cnt == 4'd1) init_next_state = INIT_WRCAL_WRITE_READ; // Write-read turnaround INIT_WRCAL_WRITE_READ: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRCAL_READ; else if (dqsfound_retry) init_next_state = INIT_RDLVL_STG2_READ_WAIT; INIT_WRCAL_READ: if (burst_addr_r == 1'b1) init_next_state = INIT_WRCAL_READ_WAIT; INIT_WRCAL_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (wrcal_resume_r) begin if (wrcal_final_chk) init_next_state = INIT_WRCAL_READ; else init_next_state = INIT_WRCAL_WRITE; end else if (wrcal_done || prech_req_posedge_r || wrcal_act_req || // Added to support PO fine delay inc when TG errors wrlvl_byte_redo || (temp_wrcal_done && ~temp_lmr_done)) init_next_state = INIT_PRECHARGE_PREWAIT; else if (dqsfound_retry) init_next_state = INIT_RDLVL_STG2_READ_WAIT; else if (wrcal_read_req && cnt_wrcal_rd) init_next_state = INIT_WRCAL_MULT_READS; end INIT_WRCAL_MULT_READS: // multiple read commands issued back-to-back if (wrcal_reads == 'b1) init_next_state = INIT_WRCAL_READ_WAIT; //********************************************* // Handling of precharge during and in between read-level stages //********************************************* // Make sure we aren't violating any timing specs by precharging // immediately INIT_PRECHARGE_PREWAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_PRECHARGE; // Initiate precharge INIT_PRECHARGE: init_next_state = INIT_PRECHARGE_WAIT; INIT_PRECHARGE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if ((wrcal_sanity_chk_done && (DRAM_TYPE == "DDR3")) || (rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done && (DRAM_TYPE == "DDR2"))) init_next_state = INIT_DONE; else if ((wrcal_done || (WRLVL == "OFF")) && rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done && ((ddr3_lm_done_r) || (DRAM_TYPE == "DDR2"))) // If read leveling and phase detection calibration complete, // and programing the correct burst length then we're finished init_next_state = INIT_WRCAL_ACT; else if ((wrcal_done || (WRLVL == "OFF") || (~wrcal_done && temp_wrcal_done && ~temp_lmr_done)) && (rdlvl_stg1_done || (~wrcal_done && temp_wrcal_done && ~temp_lmr_done)) && prbs_rdlvl_done && rdlvl_stg1_done && pi_dqs_found_done) begin // after all calibration program the correct burst length init_next_state = INIT_LOAD_MR; // Added to support PO fine delay inc when TG errors end else if (~wrcal_done && temp_wrcal_done && temp_lmr_done) init_next_state = INIT_WRCAL_READ_WAIT; else if (rdlvl_stg1_done && pi_dqs_found_done && (WRLVL == "ON")) // If read leveling finished, proceed to write calibration init_next_state = INIT_REFRESH; else // Otherwise, open row for read-leveling purposes init_next_state = INIT_REFRESH; end //******************************************************* // Initialization/Calibration done. Take a long rest, relax //******************************************************* INIT_DONE: init_next_state = INIT_DONE; endcase end //***************************************************************** // Initialization done signal - asserted before leveling starts //***************************************************************** always @(posedge clk) if (rst) mem_init_done_r <= #TCQ 1'b0; else if ((!cnt_dllk_zqinit_done_r && (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT) && (chip_cnt_r == RANKS-1) && (DRAM_TYPE == "DDR3")) || ( (init_state_r == INIT_LOAD_MR_WAIT) && (ddr2_refresh_flag_r) && (chip_cnt_r == RANKS-1) && (cnt_init_mr_done_r) && (DRAM_TYPE == "DDR2"))) mem_init_done_r <= #TCQ 1'b1; //***************************************************************** // Write Calibration signal to PHY Control Block - asserted before // Write Leveling starts //***************************************************************** //generate //if (RANKS < 2) begin: ranks_one always @(posedge clk) begin if (rst || (done_dqs_tap_inc && (init_state_r == INIT_WRLVL_LOAD_MR2))) write_calib <= #TCQ 1'b0; else if (wrlvl_active_r1) write_calib <= #TCQ 1'b1; end //end else begin: ranks_two // always @(posedge clk) begin // if (rst || // ((init_state_r1 == INIT_WRLVL_LOAD_MR_WAIT) && // ((wrlvl_rank_done_r2 && (chip_cnt_r == RANKS-1)) || // (SIM_CAL_OPTION == "FAST_CAL")))) // write_calib <= #TCQ 1'b0; // else if (wrlvl_active_r1) // write_calib <= #TCQ 1'b1; // end //end //endgenerate //***************************************************************** // Read Calibration signal to PHY Control Block - asserted after // Write Leveling during PHASER_IN phase locking stage. // Must be de-asserted before Read Leveling //***************************************************************** always @(posedge clk) begin if (rst || pi_calib_done_r1) read_calib_int <= #TCQ 1'b0; else if (~pi_calib_done_r1 && (init_state_r == INIT_RDLVL_ACT_WAIT) && (cnt_cmd_r == CNTNEXT_CMD)) read_calib_int <= #TCQ 1'b1; end always @(posedge clk) read_calib_r <= #TCQ read_calib_int; always @(posedge clk) begin if (rst || pi_calib_done_r1) read_calib <= #TCQ 1'b0; else if (~pi_calib_done_r1 && (init_state_r == INIT_PI_PHASELOCK_READS)) read_calib <= #TCQ 1'b1; end always @(posedge clk) if (rst) pi_calib_done_r <= #TCQ 1'b0; else if (pi_calib_rank_done_r)// && (chip_cnt_r == RANKS-1)) pi_calib_done_r <= #TCQ 1'b1; always @(posedge clk) if (rst) pi_calib_rank_done_r <= #TCQ 1'b0; else if (pi_phase_locked_all_r3 && ~pi_phase_locked_all_r4) pi_calib_rank_done_r <= #TCQ 1'b1; else pi_calib_rank_done_r <= #TCQ 1'b0; always @(posedge clk) begin if (rst || ((PRE_REV3ES == "ON") && temp_wrcal_done && ~temp_wrcal_done_r)) pi_phaselock_timer <= #TCQ 'd0; else if (((init_state_r == INIT_PI_PHASELOCK_READS) && (pi_phaselock_timer != PHASELOCKED_TIMEOUT)) || tg_timer_go) pi_phaselock_timer <= #TCQ pi_phaselock_timer + 1; else pi_phaselock_timer <= #TCQ pi_phaselock_timer; end assign pi_phase_locked_err = (pi_phaselock_timer == PHASELOCKED_TIMEOUT) ? 1'b1 : 1'b0; //***************************************************************** // DDR3 final burst length programming done. For DDR3 during // calibration the burst length is fixed to BL8. After calibration // the correct burst length is programmed. //***************************************************************** always @(posedge clk) if (rst) ddr3_lm_done_r <= #TCQ 1'b0; else if ((init_state_r == INIT_LOAD_MR_WAIT) && (chip_cnt_r == RANKS-1) && wrcal_done) ddr3_lm_done_r <= #TCQ 1'b1; always @(posedge clk) begin pi_dqs_found_rank_done_r <= #TCQ pi_dqs_found_rank_done; pi_phase_locked_all_r1 <= #TCQ pi_phase_locked_all; pi_phase_locked_all_r2 <= #TCQ pi_phase_locked_all_r1; pi_phase_locked_all_r3 <= #TCQ pi_phase_locked_all_r2; pi_phase_locked_all_r4 <= #TCQ pi_phase_locked_all_r3; pi_dqs_found_all_r <= #TCQ pi_dqs_found_done; pi_calib_done_r1 <= #TCQ pi_calib_done_r; end //*************************************************************************** // Logic for deep memory (multi-rank) configurations //*************************************************************************** // For DDR3 asserted when generate if (RANKS < 2) begin: single_rank always @(posedge clk) chip_cnt_r <= #TCQ 2'b00; end else begin: dual_rank always @(posedge clk) if (rst || // Set chip_cnt_r to 2'b00 after both Ranks are read leveled (rdlvl_stg1_done && prbs_rdlvl_done && ~wrcal_done) || // Set chip_cnt_r to 2'b00 after both Ranks are write leveled (wrlvl_done_r && (init_state_r==INIT_WRLVL_LOAD_MR2_WAIT)))begin chip_cnt_r <= #TCQ 2'b00; end else if ((((init_state_r == INIT_WAIT_DLLK_ZQINIT) && (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT)) && (DRAM_TYPE == "DDR3")) || ((init_state_r==INIT_REFRESH_RNK2_WAIT) && (cnt_cmd_r=='d36)) || //mpr_rnk_done || //(rdlvl_stg1_rank_done && ~rdlvl_last_byte_done) || //(stg1_wr_done && (init_state_r == INIT_REFRESH) && //~(rnk_ref_cnt && rdlvl_last_byte_done)) || // Increment chip_cnt_r to issue Refresh to second rank (~pi_dqs_found_all_r && (init_state_r==INIT_PRECHARGE_PREWAIT) && (cnt_cmd_r=='d36)) || // Increment chip_cnt_r when DQSFOUND done for the Rank (pi_dqs_found_rank_done && ~pi_dqs_found_rank_done_r) || ((init_state_r == INIT_LOAD_MR_WAIT)&& cnt_cmd_done_r && wrcal_done) || ((init_state_r == INIT_DDR2_MULTI_RANK) && (DRAM_TYPE == "DDR2"))) begin if ((~mem_init_done_r || ~rdlvl_stg1_done || ~pi_dqs_found_done || // condition to increment chip_cnt during // final burst length programming for DDR3 ~pi_calib_done_r || wrcal_done) //~mpr_rdlvl_done || && (chip_cnt_r != RANKS-1)) chip_cnt_r <= #TCQ chip_cnt_r + 1; else chip_cnt_r <= #TCQ 2'b00; end end endgenerate generate if ((REG_CTRL == "ON") && (RANKS == 1)) begin: DDR3_RDIMM_1rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[0] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end end else if (RANKS == 1) begin: DDR3_1rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (n = 0; n < nCS_PER_RANK; n = n + 1) begin phy_int_cs_n[n] <= #TCQ 1'b0; end end else begin //odd CWL for (p = nCS_PER_RANK; p < 2*nCS_PER_RANK; p = p + 1) begin phy_int_cs_n[p] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end end else if ((REG_CTRL == "ON") && (RANKS == 2)) begin: DDR3_2rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; case (chip_cnt_r) 2'b00:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[0] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1*CS_WIDTH*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (n = 0; n < nCS_PER_RANK*nCK_PER_CLK*2; n = n + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[n+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end 2'b01:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[1] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1+1*CS_WIDTH*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (p = nCS_PER_RANK; p < nCS_PER_RANK*nCK_PER_CLK*2; p = p + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[p+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end endcase end end end else if (RANKS == 2) begin: DDR3_2rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; case (chip_cnt_r) 2'b00:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (n = 0; n < nCS_PER_RANK; n = n + 1) begin phy_int_cs_n[n] <= #TCQ 1'b0; end end else begin // odd CWL for (p = CS_WIDTH*nCS_PER_RANK; p < (CS_WIDTH*nCS_PER_RANK + nCS_PER_RANK); p = p + 1) begin phy_int_cs_n[p] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (n = 0; n < nCS_PER_RANK*nCK_PER_CLK*2; n = n + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[n+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end 2'b01:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (q = nCS_PER_RANK; q < (2 * nCS_PER_RANK); q = q + 1) begin phy_int_cs_n[q] <= #TCQ 1'b0; end end else begin // odd CWL for (m = (nCS_PER_RANK*CS_WIDTH + nCS_PER_RANK); m < (nCS_PER_RANK*CS_WIDTH + 2*nCS_PER_RANK); m = m + 1) begin phy_int_cs_n[m] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (p = nCS_PER_RANK; p < nCS_PER_RANK*nCK_PER_CLK*2; p = p + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[p+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end endcase end end // always @ (posedge clk) end // commented out for now. Need it for DDR2 2T timing /* end else begin: DDR2 always @(posedge clk) if (rst) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end else begin if (init_state_r == INIT_REG_WRITE) begin // All ranks selected simultaneously phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b0}}; end else if ((wrlvl_odt) || (init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH)) begin phy_int_cs_n[0] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end // else: !if(rst) end // block: DDR2 */ endgenerate assign phy_cs_n = phy_int_cs_n; //*************************************************************************** // Write/read burst logic for calibration //*************************************************************************** assign rdlvl_wr = (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE); assign rdlvl_rd = (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_MPR_READ) || (init_state_r == INIT_WRCAL_MULT_READS); assign rdlvl_wr_rd = rdlvl_wr | rdlvl_rd; //*************************************************************************** // Address generation and logic to count # of writes/reads issued during // certain stages of calibration //*************************************************************************** // Column address generation logic: // Keep track of the current column address - since all bursts are in // increments of 8 only during calibration, we need to keep track of // addresses [COL_WIDTH-1:3], lower order address bits will always = 0 always @(posedge clk) if (rst || wrcal_done) burst_addr_r <= #TCQ 1'b0; else if ((init_state_r == INIT_WRCAL_ACT_WAIT) || (init_state_r == INIT_OCLKDELAY_ACT_WAIT) || (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS) || (init_state_r == INIT_WRCAL_READ_WAIT)) burst_addr_r <= #TCQ 1'b1; else if (rdlvl_wr_rd && new_burst_r) burst_addr_r <= #TCQ ~burst_addr_r; else burst_addr_r <= #TCQ 1'b0; // Read Level Stage 1 requires writes to the entire row since // a PRBS pattern is being written. This counter keeps track // of the number of writes which depends on the column width // The (stg1_wr_rd_cnt==9'd0) condition was added so the col // address wraps around during stage1 reads always @(posedge clk) if (rst || ((init_state_r == INIT_RDLVL_STG1_WRITE_READ) && ~rdlvl_stg1_done)) stg1_wr_rd_cnt <= #TCQ NUM_STG1_WR_RD; else if (rdlvl_last_byte_done || (stg1_wr_rd_cnt == 9'd1) || (prbs_rdlvl_prech_req && (init_state_r == INIT_RDLVL_ACT_WAIT))) stg1_wr_rd_cnt <= #TCQ 'd128; else if (((init_state_r == INIT_RDLVL_STG1_WRITE) && new_burst_r && ~phy_data_full) ||((init_state_r == INIT_RDLVL_STG1_READ) && rdlvl_stg1_done)) stg1_wr_rd_cnt <= #TCQ stg1_wr_rd_cnt - 1; // OCLKDELAY calibration requires multiple writes because // write can be up to 2 cycles early since OCLKDELAY tap // can go down to 0 always @(posedge clk) if (rst || (init_state_r == INIT_OCLKDELAY_WRITE_WAIT) || (oclk_wr_cnt == 4'd0)) oclk_wr_cnt <= #TCQ NUM_STG1_WR_RD; else if ((init_state_r == INIT_OCLKDELAY_WRITE) && new_burst_r && ~phy_data_full) oclk_wr_cnt <= #TCQ oclk_wr_cnt - 1; // Write calibration requires multiple writes because // write can be up to 2 cycles early due to new write // leveling algorithm to avoid late writes always @(posedge clk) if (rst || (init_state_r == INIT_WRCAL_WRITE_READ) || (wrcal_wr_cnt == 4'd0)) wrcal_wr_cnt <= #TCQ NUM_STG1_WR_RD; else if ((init_state_r == INIT_WRCAL_WRITE) && new_burst_r && ~phy_data_full) wrcal_wr_cnt <= #TCQ wrcal_wr_cnt - 1; generate if(nCK_PER_CLK == 4) begin:back_to_back_reads_4_1 // 4 back-to-back reads with gaps for // read data_offset calibration (rdlvl stage 2) always @(posedge clk) if (rst || (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) num_reads <= #TCQ 3'b000; else if ((num_reads > 3'b000) && ~(phy_ctl_full || phy_cmd_full)) num_reads <= #TCQ num_reads - 1; else if ((init_state_r == INIT_RDLVL_STG2_READ) || phy_ctl_full || phy_cmd_full && new_burst_r) num_reads <= #TCQ 3'b011; end else if(nCK_PER_CLK == 2) begin: back_to_back_reads_2_1 // 4 back-to-back reads with gaps for // read data_offset calibration (rdlvl stage 2) always @(posedge clk) if (rst || (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) num_reads <= #TCQ 3'b000; else if ((num_reads > 3'b000) && ~(phy_ctl_full || phy_cmd_full)) num_reads <= #TCQ num_reads - 1; else if ((init_state_r == INIT_RDLVL_STG2_READ) || phy_ctl_full || phy_cmd_full && new_burst_r) num_reads <= #TCQ 3'b111; end endgenerate // back-to-back reads during write calibration always @(posedge clk) if (rst ||(init_state_r == INIT_WRCAL_READ_WAIT)) wrcal_reads <= #TCQ 2'b00; else if ((wrcal_reads > 2'b00) && ~(phy_ctl_full || phy_cmd_full)) wrcal_reads <= #TCQ wrcal_reads - 1; else if ((init_state_r == INIT_WRCAL_MULT_READS) || phy_ctl_full || phy_cmd_full && new_burst_r) wrcal_reads <= #TCQ 'd255; // determine how often to issue row command during read leveling writes // and reads always @(posedge clk) if (rdlvl_wr_rd) begin // 2:1 mode - every other command issued is a data command // 4:1 mode - every command issued is a data command if (nCK_PER_CLK == 2) begin if (!phy_ctl_full) new_burst_r <= #TCQ ~new_burst_r; end else new_burst_r <= #TCQ 1'b1; end else new_burst_r <= #TCQ 1'b1; // indicate when a write is occurring. PHY_WRDATA_EN must be asserted // simultaneous with the corresponding command/address for CWL = 5,6 always @(posedge clk) begin rdlvl_wr_r <= #TCQ rdlvl_wr; calib_wrdata_en <= #TCQ phy_wrdata_en; end always @(posedge clk) begin if (rst || wrcal_done) extend_cal_pat <= #TCQ 1'b0; else if (temp_lmr_done && (PRE_REV3ES == "ON")) extend_cal_pat <= #TCQ 1'b1; end generate if ((nCK_PER_CLK == 4) || (BURST_MODE == "4")) begin: wrdqen_div4 // Write data enable asserted for one DIV4 clock cycle // Only BL8 supported with DIV4. DDR2 BL4 will use DIV2. always @(rst or phy_data_full or init_state_r) begin if (~phy_data_full && ((init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_WRCAL_WRITE))) phy_wrdata_en = 1'b1; else phy_wrdata_en = 1'b0; end end else begin: wrdqen_div2 // block: wrdqen_div4 always @(rdlvl_wr or phy_ctl_full or new_burst_r or phy_wrdata_en_r1 or phy_data_full) if((rdlvl_wr & ~phy_ctl_full & new_burst_r & ~phy_data_full) | phy_wrdata_en_r1) phy_wrdata_en = 1'b1; else phy_wrdata_en = 1'b0; always @(posedge clk) phy_wrdata_en_r1 <= #TCQ rdlvl_wr & ~phy_ctl_full & new_burst_r & ~phy_data_full; always @(posedge clk) begin if (!phy_wrdata_en & first_rdlvl_pat_r) wrdata_pat_cnt <= #TCQ 2'b00; else if (wrdata_pat_cnt == 2'b11) wrdata_pat_cnt <= #TCQ 2'b10; else wrdata_pat_cnt <= #TCQ wrdata_pat_cnt + 1; end always @(posedge clk) begin if (!phy_wrdata_en & first_wrcal_pat_r) wrcal_pat_cnt <= #TCQ 2'b00; else if (extend_cal_pat && (wrcal_pat_cnt == 2'b01)) wrcal_pat_cnt <= #TCQ 2'b00; else if (wrcal_pat_cnt == 2'b11) wrcal_pat_cnt <= #TCQ 2'b10; else wrcal_pat_cnt <= #TCQ wrcal_pat_cnt + 1; end end endgenerate // indicate when a write is occurring. PHY_RDDATA_EN must be asserted // simultaneous with the corresponding command/address. PHY_RDDATA_EN // is used during read-leveling to determine read latency assign phy_rddata_en = ~phy_if_empty; // Read data valid generation for MC and User Interface after calibration is // complete assign phy_rddata_valid = init_complete_r1_timing ? phy_rddata_en : 1'b0; //*************************************************************************** // Generate training data written at start of each read-leveling stage // For every stage of read leveling, 8 words are written into memory // The format is as follows (shown as {rise,fall}): // Stage 1: 0xF, 0x0, 0xF, 0x0, 0xF, 0x0, 0xF, 0x0 // Stage 2: 0xF, 0x0, 0xA, 0x5, 0x5, 0xA, 0x9, 0x6 //*************************************************************************** always @(posedge clk) if ((init_state_r == INIT_IDLE) || (init_state_r == INIT_RDLVL_STG1_WRITE)) cnt_init_data_r <= #TCQ 2'b00; else if (phy_wrdata_en) cnt_init_data_r <= #TCQ cnt_init_data_r + 1; else if (init_state_r == INIT_WRCAL_WRITE) cnt_init_data_r <= #TCQ 2'b10; // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling always @(posedge clk) if (rst || rdlvl_stg1_rank_done) first_rdlvl_pat_r <= #TCQ 1'b1; else if (phy_wrdata_en && (init_state_r == INIT_RDLVL_STG1_WRITE)) first_rdlvl_pat_r <= #TCQ 1'b0; always @(posedge clk) if (rst || wrcal_resume || (init_state_r == INIT_WRCAL_ACT_WAIT)) first_wrcal_pat_r <= #TCQ 1'b1; else if (phy_wrdata_en && (init_state_r == INIT_WRCAL_WRITE)) first_wrcal_pat_r <= #TCQ 1'b0; generate if ((CLK_PERIOD/nCK_PER_CLK > 2500) && (nCK_PER_CLK == 2)) begin: wrdq_div2_2to1_rdlvl_first always @(posedge clk) if (~oclkdelay_calib_done) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}}; else if (!rdlvl_stg1_done) begin // The 16 words for stage 1 write data in 2:1 mode is written // over 4 consecutive controller clock cycles. Note that write // data follows phy_wrdata_en by one clock cycle case (wrdata_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}}, {DQ_WIDTH/4{4'h9}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end endcase end else if (!prbs_rdlvl_done && ~phy_data_full) begin // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[4*8-1:3*8]}}, {DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}}, {DQ_WIDTH/8{prbs_o[8-1:0]}}}; end else if (!wrcal_done) begin case (wrcal_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h5}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}}, {DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h4}}}; end endcase end end else if ((CLK_PERIOD/nCK_PER_CLK > 2500) && (nCK_PER_CLK == 4)) begin: wrdq_div2_4to1_rdlvl_first always @(posedge clk) if (~oclkdelay_calib_done) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}}; else if (!rdlvl_stg1_done && ~phy_data_full) // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling if (first_rdlvl_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}},{DQ_WIDTH/4{4'h9}}}; else // For all others, change the first two words written in order // to differentiate the "early write" and "on-time write" // readback patterns during read leveling phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; else if (!prbs_rdlvl_done && ~phy_data_full) // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[8*8-1:7*8]}},{DQ_WIDTH/8{prbs_o[7*8-1:6*8]}}, {DQ_WIDTH/8{prbs_o[6*8-1:5*8]}},{DQ_WIDTH/8{prbs_o[5*8-1:4*8]}}, {DQ_WIDTH/8{prbs_o[4*8-1:3*8]}},{DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}},{DQ_WIDTH/8{prbs_o[8-1:0]}}}; else if (!wrcal_done) if (first_wrcal_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}},{DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (nCK_PER_CLK == 4) begin: wrdq_div1_4to1_wrcal_first always @(posedge clk) if ((~oclkdelay_calib_done) && (DRAM_TYPE == "DDR3")) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}}; else if ((!wrcal_done)&& (DRAM_TYPE == "DDR3")) begin if (extend_cal_pat) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else if (first_wrcal_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}},{DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (!rdlvl_stg1_done && ~phy_data_full) begin // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling if (first_rdlvl_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}},{DQ_WIDTH/4{4'h9}}}; else // For all others, change the first two words written in order // to differentiate the "early write" and "on-time write" // readback patterns during read leveling phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (!prbs_rdlvl_done && ~phy_data_full) // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[8*8-1:7*8]}},{DQ_WIDTH/8{prbs_o[7*8-1:6*8]}}, {DQ_WIDTH/8{prbs_o[6*8-1:5*8]}},{DQ_WIDTH/8{prbs_o[5*8-1:4*8]}}, {DQ_WIDTH/8{prbs_o[4*8-1:3*8]}},{DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}},{DQ_WIDTH/8{prbs_o[8-1:0]}}}; end else begin: wrdq_div1_2to1_wrcal_first always @(posedge clk) if ((~oclkdelay_calib_done)&& (DRAM_TYPE == "DDR3")) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}}; else if ((!wrcal_done) && (DRAM_TYPE == "DDR3"))begin case (wrcal_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h5}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}}, {DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h4}}}; end endcase end else if (!rdlvl_stg1_done) begin // The 16 words for stage 1 write data in 2:1 mode is written // over 4 consecutive controller clock cycles. Note that write // data follows phy_wrdata_en by one clock cycle case (wrdata_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}}, {DQ_WIDTH/4{4'h9}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end endcase end else if (!prbs_rdlvl_done && ~phy_data_full) begin // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[4*8-1:3*8]}}, {DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}}, {DQ_WIDTH/8{prbs_o[8-1:0]}}}; end end endgenerate //*************************************************************************** // Memory control/address //*************************************************************************** // Phases [2] and [3] are always deasserted for 4:1 mode generate if (nCK_PER_CLK == 4) begin: gen_div4_ca_tieoff always @(posedge clk) begin phy_ras_n[3:2] <= #TCQ 3'b11; phy_cas_n[3:2] <= #TCQ 3'b11; phy_we_n[3:2] <= #TCQ 3'b11; end end endgenerate // Assert RAS when: (1) Loading MRS, (2) Activating Row, (3) Precharging // (4) auto refresh generate if (!(CWL_M % 2)) begin: even_cwl always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH))begin phy_ras_n[0] <= #TCQ 1'b0; phy_ras_n[1] <= #TCQ 1'b1; end else begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b1; end end // Assert CAS when: (1) Loading MRS, (2) Issuing Read/Write command // (3) auto refresh always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r))begin phy_cas_n[0] <= #TCQ 1'b0; phy_cas_n[1] <= #TCQ 1'b1; end else begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b1; end end // Assert WE when: (1) Loading MRS, (2) Issuing Write command (only // occur during read leveling), (3) Issuing ZQ Long Calib command, // (4) Precharge always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE)|| (rdlvl_wr && new_burst_r))begin phy_we_n[0] <= #TCQ 1'b0; phy_we_n[1] <= #TCQ 1'b1; end else begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b1; end end end else begin: odd_cwl always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH))begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b0; end else begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b1; end end // Assert CAS when: (1) Loading MRS, (2) Issuing Read/Write command // (3) auto refresh always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r))begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b0; end else begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b1; end end // Assert WE when: (1) Loading MRS, (2) Issuing Write command (only // occur during read leveling), (3) Issuing ZQ Long Calib command, // (4) Precharge always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE)|| (rdlvl_wr && new_burst_r))begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b0; end else begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b1; end end end endgenerate // Assign calib_cmd for the command field in PHY_Ctl_Word always @(posedge clk) begin if (wr_level_dqs_asrt) begin // Request to toggle DQS during write leveling calib_cmd <= #TCQ 3'b001; if (CWL_M % 2) begin // odd write latency calib_data_offset_0 <= #TCQ CWL_M + 3; calib_data_offset_1 <= #TCQ CWL_M + 3; calib_data_offset_2 <= #TCQ CWL_M + 3; calib_cas_slot <= #TCQ 2'b01; end else begin // even write latency calib_data_offset_0 <= #TCQ CWL_M + 2; calib_data_offset_1 <= #TCQ CWL_M + 2; calib_data_offset_2 <= #TCQ CWL_M + 2; calib_cas_slot <= #TCQ 2'b00; end end else if (rdlvl_wr && new_burst_r) begin // Write Command calib_cmd <= #TCQ 3'b001; if (CWL_M % 2) begin // odd write latency calib_data_offset_0 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_data_offset_1 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_data_offset_2 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_cas_slot <= #TCQ 2'b01; end else begin // even write latency calib_data_offset_0 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_data_offset_1 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_data_offset_2 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_cas_slot <= #TCQ 2'b00; end end else if (rdlvl_rd && new_burst_r) begin // Read Command calib_cmd <= #TCQ 3'b011; if (CWL_M % 2) calib_cas_slot <= #TCQ 2'b01; else calib_cas_slot <= #TCQ 2'b00; if (~pi_calib_done_r1) begin calib_data_offset_0 <= #TCQ 6'd0; calib_data_offset_1 <= #TCQ 6'd0; calib_data_offset_2 <= #TCQ 6'd0; end else if (~pi_dqs_found_done_r1) begin calib_data_offset_0 <= #TCQ rd_data_offset_0; calib_data_offset_1 <= #TCQ rd_data_offset_1; calib_data_offset_2 <= #TCQ rd_data_offset_2; end else begin calib_data_offset_0 <= #TCQ rd_data_offset_ranks_0[6*chip_cnt_r+:6]; calib_data_offset_1 <= #TCQ rd_data_offset_ranks_1[6*chip_cnt_r+:6]; calib_data_offset_2 <= #TCQ rd_data_offset_ranks_2[6*chip_cnt_r+:6]; end end else begin // Non-Data Commands like NOP, MRS, ZQ Long Cal, Precharge, // Active, Refresh calib_cmd <= #TCQ 3'b100; calib_data_offset_0 <= #TCQ 6'd0; calib_data_offset_1 <= #TCQ 6'd0; calib_data_offset_2 <= #TCQ 6'd0; if (CWL_M % 2) calib_cas_slot <= #TCQ 2'b01; else calib_cas_slot <= #TCQ 2'b00; end end // Write Enable to PHY_Control FIFO always asserted // No danger of this FIFO being Full with 4:1 sync clock ratio // This is also the write enable to the command OUT_FIFO always @(posedge clk) begin if (rst) begin calib_ctl_wren <= #TCQ 1'b0; calib_cmd_wren <= #TCQ 1'b0; calib_seq <= #TCQ 2'b00; end else if (cnt_pwron_cke_done_r && phy_ctl_ready && ~(phy_ctl_full || phy_cmd_full )) begin calib_ctl_wren <= #TCQ 1'b1; calib_cmd_wren <= #TCQ 1'b1; calib_seq <= #TCQ calib_seq + 1; end else begin calib_ctl_wren <= #TCQ 1'b0; calib_cmd_wren <= #TCQ 1'b0; calib_seq <= #TCQ calib_seq; end end generate genvar rnk_i; for (rnk_i = 0; rnk_i < 4; rnk_i = rnk_i + 1) begin: gen_rnk always @(posedge clk) begin if (rst) begin mr2_r[rnk_i] <= #TCQ 2'b00; mr1_r[rnk_i] <= #TCQ 3'b000; end else begin mr2_r[rnk_i] <= #TCQ tmp_mr2_r[rnk_i]; mr1_r[rnk_i] <= #TCQ tmp_mr1_r[rnk_i]; end end end endgenerate // ODT assignment based on slot config and slot present // For single slot systems slot_1_present input will be ignored // Assuming component interfaces to be single slot systems generate if (nSLOTS == 1) begin: gen_single_slot_odt always @(posedge clk) begin if (rst) begin tmp_mr2_r[1] <= #TCQ 2'b00; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; tmp_mr1_r[1] <= #TCQ 3'b000; tmp_mr1_r[2] <= #TCQ 3'b000; tmp_mr1_r[3] <= #TCQ 3'b000; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b1}}; phy_tmp_odt_r <= #TCQ 4'b0000; phy_tmp_odt_r1 <= #TCQ phy_tmp_odt_r; end else begin case ({slot_0_present[0],slot_0_present[1], slot_0_present[2],slot_0_present[3]}) // Single slot configuration with quad rank // Assuming same behavior as single slot dual rank for now // DDR2 does not have quad rank parts 4'b1111: begin if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 RTT_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 RTT_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end phy_tmp_odt_r <= #TCQ 4'b0001; // Chip Select assignments phy_tmp_cs1_r[((chip_cnt_r*nCS_PER_RANK) ) +: nCS_PER_RANK] <= #TCQ 'b0; end // Single slot configuration with single rank 4'b1000: begin phy_tmp_odt_r <= #TCQ 4'b0001; if ((REG_CTRL == "ON") && (nCS_PER_RANK > 1)) begin phy_tmp_cs1_r[chip_cnt_r] <= #TCQ 1'b0; end else begin phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b0}}; end if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && ((cnt_init_mr_r == 2'd0) || (USE_ODT_PORT == 1)))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 RTT_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 RTT_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Single slot configuration with dual rank 4'b1100: begin phy_tmp_odt_r <= #TCQ 4'b0001; // Chip Select assignments phy_tmp_cs1_r[((chip_cnt_r*nCS_PER_RANK) ) +: nCS_PER_RANK] <= #TCQ 'b0; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end default: begin phy_tmp_odt_r <= #TCQ 4'b0001; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end endcase end end end else if (nSLOTS == 2) begin: gen_dual_slot_odt always @ (posedge clk) begin if (rst) begin tmp_mr2_r[1] <= #TCQ 2'b00; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; tmp_mr1_r[1] <= #TCQ 3'b000; tmp_mr1_r[2] <= #TCQ 3'b000; tmp_mr1_r[3] <= #TCQ 3'b000; phy_tmp_odt_r <= #TCQ 4'b0000; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b1}}; phy_tmp_odt_r1 <= #TCQ phy_tmp_odt_r; end else begin case ({slot_0_present[0],slot_0_present[1], slot_1_present[0],slot_1_present[1]}) // Two slot configuration, one slot present, single rank 4'b10_00: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end phy_tmp_cs1_r <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end 4'b00_10: begin //Rank1 ODT enabled if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end phy_tmp_cs1_r <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank1 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank1 Rtt_NOM defaults to 120 ohms tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Two slot configuration, one slot present, dual rank 4'b00_11: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end 4'b11_00: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank1 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Two slot configuration, one rank per slot 4'b10_10: begin if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r == 2'b00)begin phy_tmp_odt_r <= #TCQ 4'b0010; //bit0 for rank0 end else begin phy_tmp_odt_r <= #TCQ 4'b0001; //bit0 for rank0 end end else begin if(init_state_r == INIT_WRLVL_WAIT) phy_tmp_odt_r <= #TCQ 4'b0011; // rank 0/1 odt0 else if((init_next_state == INIT_RDLVL_STG1_WRITE) || (init_next_state == INIT_WRCAL_WRITE) || (init_next_state == INIT_OCLKDELAY_WRITE)) phy_tmp_odt_r <= #TCQ 4'b0011; // bit0 for rank0/1 (write) else if ((init_next_state == INIT_PI_PHASELOCK_READS) || (init_next_state == INIT_MPR_READ) || (init_next_state == INIT_RDLVL_STG1_READ) || (init_next_state == INIT_RDLVL_STG2_READ) || (init_next_state == INIT_OCLKDELAY_READ) || (init_next_state == INIT_WRCAL_READ) || (init_next_state == INIT_WRCAL_MULT_READS)) phy_tmp_odt_r <= #TCQ 4'b0010; // bit0 for rank1 (rank 0 rd) end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_WR == "60") ? 3'b001 : (RTT_WR == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; end end // Two Slots - One slot with dual rank and other with single rank 4'b10_11: begin //Rank3 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[1] <= #TCQ 3'b000; end //Slot1 Rank1 or Rank3 is being written if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r == 2'b00)begin phy_tmp_odt_r <= #TCQ 4'b0010; end else begin phy_tmp_odt_r <= #TCQ 4'b0001; end end else begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0011; //Slot0 Rank0 is being written end else begin phy_tmp_odt_r <= #TCQ 4'b0101; // ODT for ranks 0 and 2 aserted end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))begin if (chip_cnt_r == 2'b00) begin phy_tmp_odt_r <= #TCQ 4'b0100; end else begin phy_tmp_odt_r <= #TCQ 4'b0001; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end // Two Slots - One slot with dual rank and other with single rank 4'b11_10: begin //Rank2 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM2 == "60") ? 3'b001 : (RTT_NOM2 == "120") ? 3'b010 : (RTT_NOM2 == "20") ? 3'b100 : (RTT_NOM2 == "30") ? 3'b101 : (RTT_NOM2 == "40") ? 3'b011: 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011: 3'b000; //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r[1] == 1'b1)begin phy_tmp_odt_r <= #TCQ 4'b0001; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; // rank 2 ODT asserted end end else begin if (// wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin if (chip_cnt_r[1] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0110; end else begin phy_tmp_odt_r <= #TCQ 4'b0101; end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS)) begin if (chip_cnt_r[1] == 1'b1) begin phy_tmp_odt_r[(1*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ 4'b0010; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end // Two Slots - two ranks per slot 4'b11_11: begin //Rank2 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM2 == "60") ? 3'b001 : (RTT_NOM2 == "120") ? 3'b010 : (RTT_NOM2 == "20") ? 3'b100 : (RTT_NOM2 == "30") ? 3'b101 : (RTT_NOM2 == "40") ? 3'b011 : 3'b000; //Rank3 Rtt_NOM tmp_mr1_r[3] <= #TCQ (RTT_NOM3 == "60") ? 3'b001 : (RTT_NOM3 == "120") ? 3'b010 : (RTT_NOM3 == "20") ? 3'b100 : (RTT_NOM3 == "30") ? 3'b101 : (RTT_NOM3 == "40") ? 3'b011 : 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[1] <= #TCQ 3'b000; //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r[1] == 1'b1)begin phy_tmp_odt_r <= #TCQ 4'b0001; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; end end else begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin //Slot1 Rank1 or Rank3 is being written if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0110; //Slot0 Rank0 or Rank2 is being written end else begin phy_tmp_odt_r <= #TCQ 4'b1001; end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))begin //Slot1 Rank1 or Rank3 is being read if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0100; //Slot0 Rank0 or Rank2 is being read end else begin phy_tmp_odt_r <= #TCQ 4'b1000; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end default: begin phy_tmp_odt_r <= #TCQ 4'b1111; // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "60") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; end end endcase end end end endgenerate // PHY only supports two ranks. // calib_aux_out[0] is CKE for rank 0 and calib_aux_out[1] is ODT for rank 0 // calib_aux_out[2] is CKE for rank 1 and calib_aux_out[3] is ODT for rank 1 generate if(CKE_ODT_AUX == "FALSE") begin if ((nSLOTS == 1) && (RANKS < 2)) begin always @(posedge clk) if (rst) begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))/* || wrlvl_rank_done || wrlvl_rank_done_r1 || (wrlvl_done && !wrlvl_done_r)*/) && (DRAM_TYPE == "DDR3")) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt ) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_OCLKDELAY_WRITE)|| (init_state_r == INIT_OCLKDELAY_WRITE_WAIT))) begin // Quad rank in a single slot calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end end end else if ((nSLOTS == 1) && (RANKS <= 2)) begin always @(posedge clk) if (rst) begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))/* || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)*/) && (DRAM_TYPE == "DDR3")) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt)|| (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_OCLKDELAY_WRITE)|| (init_state_r == INIT_OCLKDELAY_WRITE_WAIT))) begin // Dual rank in a single slot calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end end end else if ((nSLOTS == 2) && (RANKS == 2)) begin always @(posedge clk) if (rst)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if (((DRAM_TYPE == "DDR2") && (RTT_NOM == "DISABLED")) || ((DRAM_TYPE == "DDR3") && (RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // Quad rank in a single slot if (nCK_PER_CLK == 2) begin calib_odt[0] <= #TCQ (!calib_odt[0]) ? phy_tmp_odt_r[0] : 1'b0; calib_odt[1] <= #TCQ (!calib_odt[1]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end // Turn on for idle rank during read if dynamic ODT is enabled in DDR3 end else if(((DRAM_TYPE == "DDR3") && (RTT_WR != "OFF")) && ((init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_MPR_READ) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))) begin if (nCK_PER_CLK == 2) begin calib_odt[0] <= #TCQ (!calib_odt[0]) ? phy_tmp_odt_r[0] : 1'b0; calib_odt[1] <= #TCQ (!calib_odt[1]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end // disable well before next command and before disabling write leveling end else if(cnt_cmd_done_m7_r || (init_state_r == INIT_WRLVL_WAIT && ~wrlvl_odt)) calib_odt <= #TCQ 2'b00; end end end else begin//USE AUX OUTPUT for routing CKE and ODT. if ((nSLOTS == 1) && (RANKS < 2)) begin always @(posedge clk) if (rst) begin calib_aux_out <= #TCQ 4'b0000; end else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done || wrlvl_rank_done_r1 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Quad rank in a single slot calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end else if ((nSLOTS == 1) && (RANKS <= 2)) begin always @(posedge clk) if (rst) begin calib_aux_out <= #TCQ 4'b0000; end else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Dual rank in a single slot calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end else if ((nSLOTS == 2) && (RANKS == 2)) begin always @(posedge clk) if (rst) calib_aux_out <= #TCQ 4'b0000; else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Quad rank in a single slot if (nCK_PER_CLK == 2) begin calib_aux_out[1] <= #TCQ (!calib_aux_out[1]) ? phy_tmp_odt_r[0] : 1'b0; calib_aux_out[3] <= #TCQ (!calib_aux_out[3]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end end endgenerate //***************************************************************** // memory address during init //***************************************************************** always @(posedge clk) phy_data_full_r <= #TCQ phy_data_full; always @(burst_addr_r or cnt_init_mr_r or chip_cnt_r or wrcal_wr_cnt or ddr2_refresh_flag_r or init_state_r or load_mr0 or phy_data_full_r or load_mr1 or load_mr2 or load_mr3 or new_burst_r or phy_address or mr1_r[0][0] or mr1_r[0][1] or mr1_r[0][2] or mr1_r[1][0] or mr1_r[1][1] or mr1_r[1][2] or mr1_r[2][0] or mr1_r[2][1] or mr1_r[2][2] or mr1_r[3][0] or mr1_r[3][1] or mr1_r[3][2] or mr2_r[chip_cnt_r] or reg_ctrl_cnt_r or stg1_wr_rd_cnt or oclk_wr_cnt or rdlvl_stg1_done or prbs_rdlvl_done or pi_dqs_found_done or rdlvl_wr_rd)begin // Bus 0 for address/bank never used address_w = 'b0; bank_w = 'b0; if ((init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_DDR2_PRECHARGE)) begin // Set A10=1 for ZQ long calibration or Precharge All address_w = 'b0; address_w[10] = 1'b1; bank_w = 'b0; end else if (init_state_r == INIT_WRLVL_START) begin // Enable wrlvl in MR1 bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; address_w[7] = 1'b1; end else if (init_state_r == INIT_WRLVL_LOAD_MR) begin // Finished with write leveling, disable wrlvl in MR1 // For single rank disable Rtt_Nom bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; end else if (init_state_r == INIT_WRLVL_LOAD_MR2) begin // Set RTT_WR in MR2 after write leveling disabled bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; address_w[10:9] = mr2_r[chip_cnt_r]; end else if (init_state_r == INIT_MPR_READ) begin address_w = 'b0; bank_w = 'b0; end else if (init_state_r == INIT_MPR_RDEN) begin // Enable MPR read with LMR3 and A2=1 bank_w[BANK_WIDTH-1:0] = 'd3; address_w = {ROW_WIDTH{1'b0}}; address_w[2] = 1'b1; end else if (init_state_r == INIT_MPR_DISABLE) begin // Disable MPR read with LMR3 and A2=0 bank_w[BANK_WIDTH-1:0] = 'd3; address_w = {ROW_WIDTH{1'b0}}; end else if ((init_state_r == INIT_REG_WRITE)& (DRAM_TYPE == "DDR3"))begin // bank_w is assigned a 3 bit value. In some // DDR2 cases there will be only two bank bits. //Qualifying the condition with DDR3 bank_w = 'b0; address_w = 'b0; case (reg_ctrl_cnt_r) REG_RC0[2:0]: address_w[4:0] = REG_RC0[4:0]; REG_RC1[2:0]:begin address_w[4:0] = REG_RC1[4:0]; bank_w = REG_RC1[7:5]; end REG_RC2[2:0]: address_w[4:0] = REG_RC2[4:0]; REG_RC3[2:0]: address_w[4:0] = REG_RC3[4:0]; REG_RC4[2:0]: address_w[4:0] = REG_RC4[4:0]; REG_RC5[2:0]: address_w[4:0] = REG_RC5[4:0]; endcase end else if (init_state_r == INIT_LOAD_MR) begin // If loading mode register, look at cnt_init_mr to determine // which MR is currently being programmed address_w = 'b0; bank_w = 'b0; if(DRAM_TYPE == "DDR3")begin if(rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done)begin // end of the calibration programming correct // burst length if (TEST_AL == "0") begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //Don't reset DLL end else begin // programming correct AL value bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; if (TEST_AL == "CL-1") address_w[4:3]= 2'b01; // AL="CL-1" else address_w[4:3]= 2'b10; // AL="CL-2" end end else begin case (cnt_init_mr_r) INIT_CNT_MR2: begin bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; address_w[10:9] = mr2_r[chip_cnt_r]; end INIT_CNT_MR3: begin bank_w[1:0] = 2'b11; address_w = load_mr3[ROW_WIDTH-1:0]; end INIT_CNT_MR1: begin bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; end INIT_CNT_MR0: begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; // fixing it to BL8 for calibration address_w[1:0] = 2'b00; end default: begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end endcase end end else begin // DDR2 case (cnt_init_mr_r) INIT_CNT_MR2: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //MRS command without resetting DLL end end INIT_CNT_MR3: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b11; address_w = load_mr3[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //MRS command without resetting DLL. Repeted again // because there is an extra state. end end INIT_CNT_MR1: begin bank_w[1:0] = 2'b01; if(~ddr2_refresh_flag_r)begin address_w = load_mr1[ROW_WIDTH-1:0]; end else begin // second set of lm commands address_w = load_mr1[ROW_WIDTH-1:0]; address_w[9:7] = 3'b111; //OCD default state end end INIT_CNT_MR0: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; if((chip_cnt_r == 2'd1) || (chip_cnt_r == 2'd3))begin // always disable odt for rank 1 and rank 3 as per SPEC address_w[2] = 'b0; address_w[6] = 'b0; end //OCD exit end end default: begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end endcase end end else if ((init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_READ)) begin // Writing and reading PRBS pattern for read leveling stage 1 // Need to support burst length 4 or 8. PRBS pattern will be // written to entire row and read back from the same row repeatedly bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (((stg1_wr_rd_cnt == NUM_STG1_WR_RD) && ~rdlvl_stg1_done) || (stg1_wr_rd_cnt == 'd128)) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((stg1_wr_rd_cnt >= 9'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_OCLKDELAY_READ)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (oclk_wr_cnt == NUM_STG1_WR_RD) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((oclk_wr_cnt >= 4'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_READ)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (wrcal_wr_cnt == NUM_STG1_WR_RD) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((wrcal_wr_cnt >= 4'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_WRCAL_MULT_READS) || (init_state_r == INIT_RDLVL_STG2_READ)) begin // when writing or reading back training pattern for read leveling stage2 // need to support burst length of 4 or 8. This may mean issuing // multiple commands to cover the entire range of addresses accessed // during read leveling. // Hard coding A[12] to 1 so that it will always be burst length of 8 // for DDR3. Does not have any effect on DDR2. bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; address_w[COL_WIDTH-1:0] = {CALIB_COL_ADD[COL_WIDTH-1:3],burst_addr_r, 3'b000}; address_w[12] = 1'b1; end else if ((init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w = CALIB_ROW_ADD[ROW_WIDTH-1:0]; end else begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end end // registring before sending out generate genvar r,s; if ((DRAM_TYPE != "DDR3") || (CA_MIRROR != "ON")) begin: gen_no_mirror for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: div_clk_loop always @(posedge clk) begin phy_address[(r*ROW_WIDTH) +: ROW_WIDTH] <= #TCQ address_w; phy_bank[(r*BANK_WIDTH) +: BANK_WIDTH] <= #TCQ bank_w; end end end else begin: gen_mirror // Control/addressing mirroring (optional for DDR3 dual rank DIMMs) // Mirror for the 2nd rank only. Logic needs to be enhanced to account // for multiple slots, currently only supports one slot, 2-rank config for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: gen_ba_div_clk_loop for (s = 0; s < BANK_WIDTH; s = s + 1) begin: gen_ba always @(posedge clk) if (chip_cnt_r == 2'b00) begin phy_bank[(r*BANK_WIDTH) + s] <= #TCQ bank_w[s]; end else begin phy_bank[(r*BANK_WIDTH) + s] <= #TCQ bank_w[(s == 0) ? 1 : ((s == 1) ? 0 : s)]; end end end for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: gen_addr_div_clk_loop for (s = 0; s < ROW_WIDTH; s = s + 1) begin: gen_addr always @(posedge clk) if (chip_cnt_r == 2'b00) begin phy_address[(r*ROW_WIDTH) + s] <= #TCQ address_w[s]; end else begin phy_address[(r*ROW_WIDTH) + s] <= #TCQ address_w[ (s == 3) ? 4 : ((s == 4) ? 3 : ((s == 5) ? 6 : ((s == 6) ? 5 : ((s == 7) ? 8 : ((s == 8) ? 7 : s)))))]; end end end end endgenerate endmodule
//***************************************************************************** // (c) Copyright 2009 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor: Xilinx // \ \ \/ Version: %version // \ \ Application: MIG // / / Filename: ddr_phy_init.v // /___/ /\ Date Last Modified: $Date: 2011/06/02 08:35:09 $ // \ \ / \ Date Created: // \___\/\___\ // //Device: 7 Series //Design Name: DDR3 SDRAM //Purpose: // Memory initialization and overall master state control during // initialization and calibration. Specifically, the following functions // are performed: // 1. Memory initialization (initial AR, mode register programming, etc.) // 2. Initiating write leveling // 3. Generate training pattern writes for read leveling. Generate // memory readback for read leveling. // This module has an interface for providing control/address and write // data to the PHY Control Block during initialization/calibration. // Once initialization and calibration are complete, control is passed to the MC. // //Reference: //Revision History: // //***************************************************************************** /****************************************************************************** **$Id: ddr_phy_init.v,v 1.1 2011/06/02 08:35:09 mishra Exp $ **$Date: 2011/06/02 08:35:09 $ **$Author: mishra $ **$Revision: 1.1 $ **$Source: /devl/xcs/repo/env/Databases/ip/src2/O/mig_7series_v1_3/data/dlib/7series/ddr3_sdram/verilog/rtl/phy/ddr_phy_init.v,v $ ******************************************************************************/ `timescale 1ps/1ps module mig_7series_v1_9_ddr_phy_init # ( parameter TCQ = 100, parameter nCK_PER_CLK = 4, // # of memory clocks per CLK parameter CLK_PERIOD = 3000, // Logic (internal) clk period (in ps) parameter USE_ODT_PORT = 0, // 0 - No ODT output from FPGA // 1 - ODT output from FPGA parameter PRBS_WIDTH = 8, // PRBS sequence = 2^PRBS_WIDTH parameter BANK_WIDTH = 2, parameter CA_MIRROR = "OFF", // C/A mirror opt for DDR3 dual rank parameter COL_WIDTH = 10, parameter nCS_PER_RANK = 1, // # of CS bits per rank e.g. for // component I/F with CS_WIDTH=1, // nCS_PER_RANK=# of components parameter DQ_WIDTH = 64, parameter DQS_WIDTH = 8, parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH)) parameter ROW_WIDTH = 14, parameter CS_WIDTH = 1, parameter RANKS = 1, // # of memory ranks in the interface parameter CKE_WIDTH = 1, // # of cke outputs parameter DRAM_TYPE = "DDR3", parameter REG_CTRL = "ON", parameter ADDR_CMD_MODE= "1T", // calibration Address parameter CALIB_ROW_ADD = 16'h0000,// Calibration row address parameter CALIB_COL_ADD = 12'h000, // Calibration column address parameter CALIB_BA_ADD = 3'h0, // Calibration bank address // DRAM mode settings parameter AL = "0", // Additive Latency option parameter BURST_MODE = "8", // Burst length parameter BURST_TYPE = "SEQ", // Burst type // parameter nAL = 0, // Additive latency (in clk cyc) parameter nCL = 5, // Read CAS latency (in clk cyc) parameter nCWL = 5, // Write CAS latency (in clk cyc) parameter tRFC = 110000, // Refresh-to-command delay (in ps) parameter OUTPUT_DRV = "HIGH", // DRAM reduced output drive option parameter RTT_NOM = "60", // Nominal ODT termination value parameter RTT_WR = "60", // Write ODT termination value parameter WRLVL = "ON", // Enable write leveling // parameter PHASE_DETECT = "ON", // Enable read phase detector parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2 parameter nSLOTS = 1, // Number of DIMM SLOTs in the system parameter SIM_INIT_OPTION = "NONE", // "NONE", "SKIP_PU_DLY", "SKIP_INIT" parameter SIM_CAL_OPTION = "NONE", // "NONE", "FAST_CAL", "SKIP_CAL" parameter CKE_ODT_AUX = "FALSE", parameter PRE_REV3ES = "OFF", // Enable TG error detection during calibration parameter TEST_AL = "0" // Internal use for ICM verification ) ( input clk, input rst, input [2*8*nCK_PER_CLK-1:0] prbs_o, input delay_incdec_done, input ck_addr_cmd_delay_done, input pi_phase_locked_all, input pi_dqs_found_done, input dqsfound_retry, input dqs_found_prech_req, output reg pi_phaselock_start, output pi_phase_locked_err, output pi_calib_done, input phy_if_empty, // Read/write calibration interface input wrlvl_done, input wrlvl_rank_done, input wrlvl_byte_done, input wrlvl_byte_redo, input wrlvl_final, output reg wrlvl_final_if_rst, input oclkdelay_calib_done, input oclk_prech_req, input oclk_calib_resume, output reg oclkdelay_calib_start, input done_dqs_tap_inc, input [5:0] rd_data_offset_0, input [5:0] rd_data_offset_1, input [5:0] rd_data_offset_2, input [6*RANKS-1:0] rd_data_offset_ranks_0, input [6*RANKS-1:0] rd_data_offset_ranks_1, input [6*RANKS-1:0] rd_data_offset_ranks_2, input pi_dqs_found_rank_done, input wrcal_done, input wrcal_prech_req, input wrcal_read_req, input wrcal_act_req, input temp_wrcal_done, input [7:0] slot_0_present, input [7:0] slot_1_present, output reg wl_sm_start, output reg wr_lvl_start, output reg wrcal_start, output reg wrcal_rd_wait, output reg wrcal_sanity_chk, output reg tg_timer_done, output reg no_rst_tg_mc, input rdlvl_stg1_done, input rdlvl_stg1_rank_done, output reg rdlvl_stg1_start, output reg pi_dqs_found_start, output reg detect_pi_found_dqs, // rdlvl stage 1 precharge requested after each DQS input rdlvl_prech_req, input rdlvl_last_byte_done, input wrcal_resume, input wrcal_sanity_chk_done, // MPR read leveling input mpr_rdlvl_done, input mpr_rnk_done, input mpr_last_byte_done, output reg mpr_rdlvl_start, output reg mpr_end_if_reset, // PRBS Read Leveling input prbs_rdlvl_done, input prbs_last_byte_done, input prbs_rdlvl_prech_req, output reg prbs_rdlvl_start, output reg prbs_gen_clk_en, // Signals shared btw multiple calibration stages output reg prech_done, // Data select / status output reg init_calib_complete, // Signal to mask memory model error for Invalid latching edge output reg calib_writes, // PHY address/control // 2 commands to PHY Control Block per div 2 clock in 2:1 mode // 4 commands to PHY Control Block per div 4 clock in 4:1 mode output reg [nCK_PER_CLK*ROW_WIDTH-1:0] phy_address, output reg [nCK_PER_CLK*BANK_WIDTH-1:0]phy_bank, output reg [nCK_PER_CLK-1:0] phy_ras_n, output reg [nCK_PER_CLK-1:0] phy_cas_n, output reg [nCK_PER_CLK-1:0] phy_we_n, output reg phy_reset_n, output [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] phy_cs_n, // Hard PHY Interface signals input phy_ctl_ready, input phy_ctl_full, input phy_cmd_full, input phy_data_full, output reg calib_ctl_wren, output reg calib_cmd_wren, output reg [1:0] calib_seq, output reg write_calib, output reg read_calib, // PHY_Ctl_Wd output reg [2:0] calib_cmd, // calib_aux_out used for CKE and ODT output reg [3:0] calib_aux_out, output reg [1:0] calib_odt , output reg [nCK_PER_CLK-1:0] calib_cke , output [1:0] calib_rank_cnt, output reg [1:0] calib_cas_slot, output reg [5:0] calib_data_offset_0, output reg [5:0] calib_data_offset_1, output reg [5:0] calib_data_offset_2, // PHY OUT_FIFO output reg calib_wrdata_en, output reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_wrdata, // PHY Read output phy_rddata_en, output phy_rddata_valid, output [255:0] dbg_phy_init ); //***************************************************************************** // Assertions to be added //***************************************************************************** // The phy_ctl_full signal must never be asserted in synchronous mode of // operation either 4:1 or 2:1 // // The RANKS parameter must never be set to '0' by the user // valid values: 1 to 4 // //***************************************************************************** //*************************************************************************** // Number of Read level stage 1 writes limited to a SDRAM row // The address of Read Level stage 1 reads must also be limited // to a single SDRAM row // (2^COL_WIDTH)/BURST_MODE = (2^10)/8 = 128 localparam NUM_STG1_WR_RD = (BURST_MODE == "8") ? 4 : (BURST_MODE == "4") ? 8 : 4; localparam ADDR_INC = (BURST_MODE == "8") ? 8 : (BURST_MODE == "4") ? 4 : 8; // In a 2 slot dual rank per system RTT_NOM values // for Rank2 and Rank3 default to 40 ohms localparam RTT_NOM2 = "40"; localparam RTT_NOM3 = "40"; localparam RTT_NOM_int = (USE_ODT_PORT == 1) ? RTT_NOM : RTT_WR; // Specifically for use with half-frequency controller (nCK_PER_CLK=2) // = 1 if burst length = 4, = 0 if burst length = 8. Determines how // often row command needs to be issued during read-leveling // For DDR3 the burst length is fixed during calibration localparam BURST4_FLAG = (DRAM_TYPE == "DDR3")? 1'b0 : (BURST_MODE == "8") ? 1'b0 : ((BURST_MODE == "4") ? 1'b1 : 1'b0); //*************************************************************************** // Counter values used to determine bus timing // NOTE on all counter terminal counts - these can/should be one less than // the actual delay to take into account extra clock cycle delay in // generating the corresponding "done" signal //*************************************************************************** localparam CLK_MEM_PERIOD = CLK_PERIOD / nCK_PER_CLK; // Calculate initial delay required in number of CLK clock cycles // to delay initially. The counter is clocked by [CLK/1024] - which // is approximately division by 1000 - note that the formulas below will // result in more than the minimum wait time because of this approximation. // NOTE: For DDR3 JEDEC specifies to delay reset // by 200us, and CKE by an additional 500us after power-up // For DDR2 CKE is delayed by 200us after power up. localparam DDR3_RESET_DELAY_NS = 200000; localparam DDR3_CKE_DELAY_NS = 500000 + DDR3_RESET_DELAY_NS; localparam DDR2_CKE_DELAY_NS = 200000; localparam PWRON_RESET_DELAY_CNT = ((DDR3_RESET_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD); localparam PWRON_CKE_DELAY_CNT = (DRAM_TYPE == "DDR3") ? (((DDR3_CKE_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD)) : (((DDR2_CKE_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD)); // FOR DDR2 -1 taken out. With -1 not getting 200us. The equation // needs to be reworked. localparam DDR2_INIT_PRE_DELAY_PS = 400000; localparam DDR2_INIT_PRE_CNT = ((DDR2_INIT_PRE_DELAY_PS+CLK_PERIOD-1)/CLK_PERIOD)-1; // Calculate tXPR time: reset from CKE HIGH to valid command after power-up // tXPR = (max(5nCK, tRFC(min)+10ns). Add a few (blah, messy) more clock // cycles because this counter actually starts up before CKE is asserted // to memory. localparam TXPR_DELAY_CNT = (5*CLK_MEM_PERIOD > tRFC+10000) ? (((5+nCK_PER_CLK-1)/nCK_PER_CLK)-1)+11 : (((tRFC+10000+CLK_PERIOD-1)/CLK_PERIOD)-1)+11; // tDLLK/tZQINIT time = 512*tCK = 256*tCLKDIV localparam TDLLK_TZQINIT_DELAY_CNT = 255; // TWR values in ns. Both DDR2 and DDR3 have the same value. // 15000ns/tCK localparam TWR_CYC = ((15000) % CLK_MEM_PERIOD) ? (15000/CLK_MEM_PERIOD) + 1 : 15000/CLK_MEM_PERIOD; // time to wait between consecutive commands in PHY_INIT - this is a // generic number, and must be large enough to account for worst case // timing parameter (tRFC - refresh-to-active) across all memory speed // grades and operating frequencies. Expressed in clk // (Divided by 4 or Divided by 2) clock cycles. localparam CNTNEXT_CMD = 7'b1111111; // Counter values to keep track of which MR register to load during init // Set value of INIT_CNT_MR_DONE to equal value of counter for last mode // register configured during initialization. // NOTE: Reserve more bits for DDR2 - more MR accesses for DDR2 init localparam INIT_CNT_MR2 = 2'b00; localparam INIT_CNT_MR3 = 2'b01; localparam INIT_CNT_MR1 = 2'b10; localparam INIT_CNT_MR0 = 2'b11; localparam INIT_CNT_MR_DONE = 2'b11; // Register chip programmable values for DDR3 // The register chip for the registered DIMM needs to be programmed // before the initialization of the registered DIMM. // Address for the control word is in : DBA2, DA2, DA1, DA0 // Data for the control word is in: DBA1 DBA0, DA4, DA3 // The values will be stored in the local param in the following format // {DBA[2:0], DA[4:0]} // RC0 is global features control word. Address == 000 localparam REG_RC0 = 8'b00000000; // RC1 Clock driver enable control word. Enables or disables the four // output clocks in the register chip. For single rank and dual rank // two clocks will be enabled and for quad rank all the four clocks // will be enabled. Address == 000. Data = 0110 for single and dual rank. // = 0000 for quad rank localparam REG_RC1 = (RANKS <= 2) ? 8'b00110001 : 8'b00000001; // RC2 timing control word. Set in 1T timing mode // Address = 010. Data = 0000 localparam REG_RC2 = 8'b00000010; // RC3 timing control word. Setting the data to 0000 localparam REG_RC3 = 8'b00000011; // RC4 timing control work. Setting the data to 0000 localparam REG_RC4 = 8'b00000100; // RC5 timing control work. Setting the data to 0000 localparam REG_RC5 = 8'b00000101; // For non-zero AL values localparam nAL = (AL == "CL-1") ? nCL - 1 : 0; // Adding the register dimm latency to write latency localparam CWL_M = (REG_CTRL == "ON") ? nCWL + nAL + 1 : nCWL + nAL; // Count value to generate pi_phase_locked_err signal localparam PHASELOCKED_TIMEOUT = (SIM_CAL_OPTION == "NONE") ? 16383 : 1000; // Timeout interval for detecting error with Traffic Generator localparam [13:0] TG_TIMER_TIMEOUT = (SIM_CAL_OPTION == "NONE") ? 14'h3FFF : 14'h0001; // Master state machine encoding localparam INIT_IDLE = 6'b000000; //0 localparam INIT_WAIT_CKE_EXIT = 6'b000001; //1 localparam INIT_LOAD_MR = 6'b000010; //2 localparam INIT_LOAD_MR_WAIT = 6'b000011; //3 localparam INIT_ZQCL = 6'b000100; //4 localparam INIT_WAIT_DLLK_ZQINIT = 6'b000101; //5 localparam INIT_WRLVL_START = 6'b000110; //6 localparam INIT_WRLVL_WAIT = 6'b000111; //7 localparam INIT_WRLVL_LOAD_MR = 6'b001000; //8 localparam INIT_WRLVL_LOAD_MR_WAIT = 6'b001001; //9 localparam INIT_WRLVL_LOAD_MR2 = 6'b001010; //A localparam INIT_WRLVL_LOAD_MR2_WAIT = 6'b001011; //B localparam INIT_RDLVL_ACT = 6'b001100; //C localparam INIT_RDLVL_ACT_WAIT = 6'b001101; //D localparam INIT_RDLVL_STG1_WRITE = 6'b001110; //E localparam INIT_RDLVL_STG1_WRITE_READ = 6'b001111; //F localparam INIT_RDLVL_STG1_READ = 6'b010000; //10 localparam INIT_RDLVL_STG2_READ = 6'b010001; //11 localparam INIT_RDLVL_STG2_READ_WAIT = 6'b010010; //12 localparam INIT_PRECHARGE_PREWAIT = 6'b010011; //13 localparam INIT_PRECHARGE = 6'b010100; //14 localparam INIT_PRECHARGE_WAIT = 6'b010101; //15 localparam INIT_DONE = 6'b010110; //16 localparam INIT_DDR2_PRECHARGE = 6'b010111; //17 localparam INIT_DDR2_PRECHARGE_WAIT = 6'b011000; //18 localparam INIT_REFRESH = 6'b011001; //19 localparam INIT_REFRESH_WAIT = 6'b011010; //1A localparam INIT_REG_WRITE = 6'b011011; //1B localparam INIT_REG_WRITE_WAIT = 6'b011100; //1C localparam INIT_DDR2_MULTI_RANK = 6'b011101; //1D localparam INIT_DDR2_MULTI_RANK_WAIT = 6'b011110; //1E localparam INIT_WRCAL_ACT = 6'b011111; //1F localparam INIT_WRCAL_ACT_WAIT = 6'b100000; //20 localparam INIT_WRCAL_WRITE = 6'b100001; //21 localparam INIT_WRCAL_WRITE_READ = 6'b100010; //22 localparam INIT_WRCAL_READ = 6'b100011; //23 localparam INIT_WRCAL_READ_WAIT = 6'b100100; //24 localparam INIT_WRCAL_MULT_READS = 6'b100101; //25 localparam INIT_PI_PHASELOCK_READS = 6'b100110; //26 localparam INIT_MPR_RDEN = 6'b100111; //27 localparam INIT_MPR_WAIT = 6'b101000; //28 localparam INIT_MPR_READ = 6'b101001; //29 localparam INIT_MPR_DISABLE_PREWAIT = 6'b101010; //2A localparam INIT_MPR_DISABLE = 6'b101011; //2B localparam INIT_MPR_DISABLE_WAIT = 6'b101100; //2C localparam INIT_OCLKDELAY_ACT = 6'b101101; //2D localparam INIT_OCLKDELAY_ACT_WAIT = 6'b101110; //2E localparam INIT_OCLKDELAY_WRITE = 6'b101111; //2F localparam INIT_OCLKDELAY_WRITE_WAIT = 6'b110000; //30 localparam INIT_OCLKDELAY_READ = 6'b110001; //31 localparam INIT_OCLKDELAY_READ_WAIT = 6'b110010; //32 localparam INIT_REFRESH_RNK2_WAIT = 6'b110011; //33 integer i, j, k, l, m, n, p, q; reg pi_dqs_found_all_r; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r1; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r2; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r3; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r4; reg pi_calib_rank_done_r; reg [13:0] pi_phaselock_timer; reg stg1_wr_done; reg rnk_ref_cnt; reg pi_dqs_found_done_r1; reg pi_dqs_found_rank_done_r; reg read_calib_int; reg read_calib_r; reg pi_calib_done_r; reg pi_calib_done_r1; reg burst_addr_r; reg [1:0] chip_cnt_r; reg [6:0] cnt_cmd_r; reg cnt_cmd_done_r; reg cnt_cmd_done_m7_r; reg [7:0] cnt_dllk_zqinit_r; reg cnt_dllk_zqinit_done_r; reg cnt_init_af_done_r; reg [1:0] cnt_init_af_r; reg [1:0] cnt_init_data_r; reg [1:0] cnt_init_mr_r; reg cnt_init_mr_done_r; reg cnt_init_pre_wait_done_r; reg [7:0] cnt_init_pre_wait_r; reg [9:0] cnt_pwron_ce_r; reg cnt_pwron_cke_done_r; reg cnt_pwron_cke_done_r1; reg [8:0] cnt_pwron_r; reg cnt_pwron_reset_done_r; reg cnt_txpr_done_r; reg [7:0] cnt_txpr_r; reg ddr2_pre_flag_r; reg ddr2_refresh_flag_r; reg ddr3_lm_done_r; reg [4:0] enable_wrlvl_cnt; reg init_complete_r; reg init_complete_r1; reg init_complete_r2; (* keep = "true" *) reg init_complete_r_timing; (* keep = "true" *) reg init_complete_r1_timing; reg [5:0] init_next_state; reg [5:0] init_state_r; reg [5:0] init_state_r1; wire [15:0] load_mr0; wire [15:0] load_mr1; wire [15:0] load_mr2; wire [15:0] load_mr3; reg mem_init_done_r; reg [1:0] mr2_r [0:3]; reg [2:0] mr1_r [0:3]; reg new_burst_r; reg [15:0] wrcal_start_dly_r; wire wrcal_start_pre; reg wrcal_resume_r; // Only one ODT signal per rank in PHY Control Block reg [nCK_PER_CLK-1:0] phy_tmp_odt_r; reg [nCK_PER_CLK-1:0] phy_tmp_odt_r1; reg [CS_WIDTH*nCS_PER_RANK-1:0] phy_tmp_cs1_r; reg [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] phy_int_cs_n; wire prech_done_pre; reg [15:0] prech_done_dly_r; reg prech_pending_r; reg prech_req_posedge_r; reg prech_req_r; reg pwron_ce_r; reg first_rdlvl_pat_r; reg first_wrcal_pat_r; reg phy_wrdata_en; reg phy_wrdata_en_r1; reg [1:0] wrdata_pat_cnt; reg [1:0] wrcal_pat_cnt; reg [ROW_WIDTH-1:0] address_w; reg [BANK_WIDTH-1:0] bank_w; reg rdlvl_stg1_done_r1; reg rdlvl_stg1_start_int; reg [15:0] rdlvl_start_dly0_r; reg rdlvl_start_pre; reg rdlvl_last_byte_done_r; wire rdlvl_rd; wire rdlvl_wr; reg rdlvl_wr_r; wire rdlvl_wr_rd; reg [2:0] reg_ctrl_cnt_r; reg [1:0] tmp_mr2_r [0:3]; reg [2:0] tmp_mr1_r [0:3]; reg wrlvl_done_r; reg wrlvl_done_r1; reg wrlvl_rank_done_r1; reg wrlvl_rank_done_r2; reg wrlvl_rank_done_r3; reg wrlvl_rank_done_r4; reg wrlvl_rank_done_r5; reg wrlvl_rank_done_r6; reg wrlvl_rank_done_r7; reg [2:0] wrlvl_rank_cntr; reg wrlvl_odt_ctl; reg wrlvl_odt; reg wrlvl_active; reg wrlvl_active_r1; reg [2:0] num_reads; reg temp_wrcal_done_r; reg temp_lmr_done; reg extend_cal_pat; reg [13:0] tg_timer; reg tg_timer_go; reg cnt_wrcal_rd; reg [3:0] cnt_wait; reg [7:0] wrcal_reads; reg [8:0] stg1_wr_rd_cnt; reg phy_data_full_r; reg wr_level_dqs_asrt; reg wr_level_dqs_asrt_r1; reg [1:0] dqs_asrt_cnt; reg [3:0] num_refresh; wire oclkdelay_calib_start_pre; reg [15:0] oclkdelay_start_dly_r; reg [3:0] oclk_wr_cnt; reg [3:0] wrcal_wr_cnt; reg wrlvl_final_r; reg prbs_rdlvl_done_r1; reg prbs_last_byte_done_r; reg phy_if_empty_r; reg wrcal_final_chk; //*************************************************************************** // Debug //*************************************************************************** //synthesis translate_off always @(posedge mem_init_done_r) begin if (!rst) $display ("PHY_INIT: Memory Initialization completed at %t", $time); end always @(posedge wrlvl_done) begin if (!rst && (WRLVL == "ON")) $display ("PHY_INIT: Write Leveling completed at %t", $time); end always @(posedge rdlvl_stg1_done) begin if (!rst) $display ("PHY_INIT: Read Leveling Stage 1 completed at %t", $time); end always @(posedge mpr_rdlvl_done) begin if (!rst) $display ("PHY_INIT: MPR Read Leveling completed at %t", $time); end always @(posedge oclkdelay_calib_done) begin if (!rst) $display ("PHY_INIT: OCLKDELAY calibration completed at %t", $time); end always @(posedge pi_calib_done_r1) begin if (!rst) $display ("PHY_INIT: Phaser_In Phase Locked at %t", $time); end always @(posedge pi_dqs_found_done) begin if (!rst) $display ("PHY_INIT: Phaser_In DQSFOUND completed at %t", $time); end always @(posedge wrcal_done) begin if (!rst && (WRLVL == "ON")) $display ("PHY_INIT: Write Calibration completed at %t", $time); end //synthesis translate_on assign dbg_phy_init[5:0] = init_state_r; //*************************************************************************** // DQS count to be sent to hard PHY during Phaser_IN Phase Locking stage //*************************************************************************** // assign pi_phaselock_calib_cnt = dqs_cnt_r; assign pi_calib_done = pi_calib_done_r1; always @(posedge clk) begin if (rst) wrcal_final_chk <= #TCQ 1'b0; else if ((init_next_state == INIT_WRCAL_ACT) && wrcal_done && (DRAM_TYPE == "DDR3")) wrcal_final_chk <= #TCQ 1'b1; end always @(posedge clk) begin rdlvl_stg1_done_r1 <= #TCQ rdlvl_stg1_done; prbs_rdlvl_done_r1 <= #TCQ prbs_rdlvl_done; wrcal_resume_r <= #TCQ wrcal_resume; wrcal_sanity_chk <= #TCQ wrcal_final_chk; end always @(posedge clk) begin if (rst) mpr_end_if_reset <= #TCQ 1'b0; else if (mpr_last_byte_done && (num_refresh != 'd0)) mpr_end_if_reset <= #TCQ 1'b1; else mpr_end_if_reset <= #TCQ 1'b0; end // Siganl to mask memory model error for Invalid latching edge always @(posedge clk) if (rst) calib_writes <= #TCQ 1'b0; else if ((init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ)) calib_writes <= #TCQ 1'b1; else calib_writes <= #TCQ 1'b0; always @(posedge clk) if (rst) wrcal_rd_wait <= #TCQ 1'b0; else if (init_state_r == INIT_WRCAL_READ_WAIT) wrcal_rd_wait <= #TCQ 1'b1; else wrcal_rd_wait <= #TCQ 1'b0; //*************************************************************************** // Signal PHY completion when calibration is finished // Signal assertion is delayed by four clock cycles to account for the // multi cycle path constraint to (phy_init_data_sel) signal. //*************************************************************************** always @(posedge clk) if (rst) begin init_complete_r <= #TCQ 1'b0; init_complete_r_timing <= #TCQ 1'b0; init_complete_r1 <= #TCQ 1'b0; init_complete_r1_timing <= #TCQ 1'b0; init_complete_r2 <= #TCQ 1'b0; init_calib_complete <= #TCQ 1'b0; end else begin if (init_state_r == INIT_DONE) begin init_complete_r <= #TCQ 1'b1; init_complete_r_timing <= #TCQ 1'b1; end init_complete_r1 <= #TCQ init_complete_r; init_complete_r1_timing <= #TCQ init_complete_r_timing; init_complete_r2 <= #TCQ init_complete_r1; init_calib_complete <= #TCQ init_complete_r2; end //*************************************************************************** // Instantiate FF for the phy_init_data_sel signal. A multi cycle path // constraint will be assigned to this signal. This signal will only be // used within the PHY //*************************************************************************** // FDRSE u_ff_phy_init_data_sel // ( // .Q (phy_init_data_sel), // .C (clk), // .CE (1'b1), // .D (init_complete_r), // .R (1'b0), // .S (1'b0) // ) /* synthesis syn_preserve=1 */ // /* synthesis syn_replicate = 0 */; //*************************************************************************** // Mode register programming //*************************************************************************** //***************************************************************** // DDR3 Load mode reg0 // Mode Register (MR0): // [15:13] - unused - 000 // [12] - Precharge Power-down DLL usage - 0 (DLL frozen, slow-exit), // 1 (DLL maintained) // [11:9] - write recovery for Auto Precharge (tWR/tCK = 6) // [8] - DLL reset - 0 or 1 // [7] - Test Mode - 0 (normal) // [6:4],[2] - CAS latency - CAS_LAT // [3] - Burst Type - BURST_TYPE // [1:0] - Burst Length - BURST_LEN // DDR2 Load mode register // Mode Register (MR): // [15:14] - unused - 00 // [13] - reserved - 0 // [12] - Power-down mode - 0 (normal) // [11:9] - write recovery - write recovery for Auto Precharge // (tWR/tCK = 6) // [8] - DLL reset - 0 or 1 // [7] - Test Mode - 0 (normal) // [6:4] - CAS latency - CAS_LAT // [3] - Burst Type - BURST_TYPE // [2:0] - Burst Length - BURST_LEN //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr0_DDR3 assign load_mr0[1:0] = (BURST_MODE == "8") ? 2'b00 : (BURST_MODE == "OTF") ? 2'b01 : (BURST_MODE == "4") ? 2'b10 : 2'b11; assign load_mr0[2] = (nCL >= 12) ? 1'b1 : 1'b0; // LSb of CAS latency assign load_mr0[3] = (BURST_TYPE == "SEQ") ? 1'b0 : 1'b1; assign load_mr0[6:4] = ((nCL == 5) || (nCL == 13)) ? 3'b001 : ((nCL == 6) || (nCL == 14)) ? 3'b010 : (nCL == 7) ? 3'b011 : (nCL == 8) ? 3'b100 : (nCL == 9) ? 3'b101 : (nCL == 10) ? 3'b110 : (nCL == 11) ? 3'b111 : (nCL == 12) ? 3'b000 : 3'b111; assign load_mr0[7] = 1'b0; assign load_mr0[8] = 1'b1; // Reset DLL (init only) assign load_mr0[11:9] = (TWR_CYC == 5) ? 3'b001 : (TWR_CYC == 6) ? 3'b010 : (TWR_CYC == 7) ? 3'b011 : (TWR_CYC == 8) ? 3'b100 : (TWR_CYC == 9) ? 3'b101 : (TWR_CYC == 10) ? 3'b101 : (TWR_CYC == 11) ? 3'b110 : (TWR_CYC == 12) ? 3'b110 : (TWR_CYC == 13) ? 3'b111 : (TWR_CYC == 14) ? 3'b111 : (TWR_CYC == 15) ? 3'b000 : (TWR_CYC == 16) ? 3'b000 : 3'b010; assign load_mr0[12] = 1'b0; // Precharge Power-Down DLL 'slow-exit' assign load_mr0[15:13] = 3'b000; end else if (DRAM_TYPE == "DDR2") begin: gen_load_mr0_DDR2 // block: gen assign load_mr0[2:0] = (BURST_MODE == "8") ? 3'b011 : (BURST_MODE == "4") ? 3'b010 : 3'b111; assign load_mr0[3] = (BURST_TYPE == "SEQ") ? 1'b0 : 1'b1; assign load_mr0[6:4] = (nCL == 3) ? 3'b011 : (nCL == 4) ? 3'b100 : (nCL == 5) ? 3'b101 : (nCL == 6) ? 3'b110 : 3'b111; assign load_mr0[7] = 1'b0; assign load_mr0[8] = 1'b1; // Reset DLL (init only) assign load_mr0[11:9] = (TWR_CYC == 2) ? 3'b001 : (TWR_CYC == 3) ? 3'b010 : (TWR_CYC == 4) ? 3'b011 : (TWR_CYC == 5) ? 3'b100 : (TWR_CYC == 6) ? 3'b101 : 3'b010; assign load_mr0[15:12]= 4'b0000; // Reserved end endgenerate //***************************************************************** // DDR3 Load mode reg1 // Mode Register (MR1): // [15:13] - unused - 00 // [12] - output enable - 0 (enabled for DQ, DQS, DQS#) // [11] - TDQS enable - 0 (TDQS disabled and DM enabled) // [10] - reserved - 0 (must be '0') // [9] - RTT[2] - 0 // [8] - reserved - 0 (must be '0') // [7] - write leveling - 0 (disabled), 1 (enabled) // [6] - RTT[1] - RTT[1:0] = 0(no ODT), 1(75), 2(150), 3(50) // [5] - Output driver impedance[1] - 0 (RZQ/6 and RZQ/7) // [4:3] - Additive CAS - ADDITIVE_CAS // [2] - RTT[0] // [1] - Output driver impedance[0] - 0(RZQ/6), or 1 (RZQ/7) // [0] - DLL enable - 0 (normal) // DDR2 ext mode register // Extended Mode Register (MR): // [15:14] - unused - 00 // [13] - reserved - 0 // [12] - output enable - 0 (enabled) // [11] - RDQS enable - 0 (disabled) // [10] - DQS# enable - 0 (enabled) // [9:7] - OCD Program - 111 or 000 (first 111, then 000 during init) // [6] - RTT[1] - RTT[1:0] = 0(no ODT), 1(75), 2(150), 3(50) // [5:3] - Additive CAS - ADDITIVE_CAS // [2] - RTT[0] // [1] - Output drive - REDUCE_DRV (= 0(full), = 1 (reduced) // [0] - DLL enable - 0 (normal) //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr1_DDR3 assign load_mr1[0] = 1'b0; // DLL enabled during Imitialization assign load_mr1[1] = (OUTPUT_DRV == "LOW") ? 1'b0 : 1'b1; assign load_mr1[2] = ((RTT_NOM_int == "30") || (RTT_NOM_int == "40") || (RTT_NOM_int == "60")) ? 1'b1 : 1'b0; assign load_mr1[4:3] = (AL == "0") ? 2'b00 : (AL == "CL-1") ? 2'b01 : (AL == "CL-2") ? 2'b10 : 2'b11; assign load_mr1[5] = 1'b0; assign load_mr1[6] = ((RTT_NOM_int == "40") || (RTT_NOM_int == "120")) ? 1'b1 : 1'b0; assign load_mr1[7] = 1'b0; // Enable write lvl after init sequence assign load_mr1[8] = 1'b0; assign load_mr1[9] = ((RTT_NOM_int == "20") || (RTT_NOM_int == "30")) ? 1'b1 : 1'b0; assign load_mr1[10] = 1'b0; assign load_mr1[15:11] = 5'b00000; end else if (DRAM_TYPE == "DDR2") begin: gen_load_mr1_DDR2 assign load_mr1[0] = 1'b0; // DLL enabled during Imitialization assign load_mr1[1] = (OUTPUT_DRV == "LOW") ? 1'b1 : 1'b0; assign load_mr1[2] = ((RTT_NOM_int == "75") || (RTT_NOM_int == "50")) ? 1'b1 : 1'b0; assign load_mr1[5:3] = (AL == "0") ? 3'b000 : (AL == "1") ? 3'b001 : (AL == "2") ? 3'b010 : (AL == "3") ? 3'b011 : (AL == "4") ? 3'b100 : 3'b111; assign load_mr1[6] = ((RTT_NOM_int == "50") || (RTT_NOM_int == "150")) ? 1'b1 : 1'b0; assign load_mr1[9:7] = 3'b000; assign load_mr1[10] = (DDR2_DQSN_ENABLE == "YES") ? 1'b0 : 1'b1; assign load_mr1[15:11] = 5'b00000; end endgenerate //***************************************************************** // DDR3 Load mode reg2 // Mode Register (MR2): // [15:11] - unused - 00 // [10:9] - RTT_WR - 00 (Dynamic ODT off) // [8] - reserved - 0 (must be '0') // [7] - self-refresh temperature range - // 0 (normal), 1 (extended) // [6] - Auto Self-Refresh - 0 (manual), 1(auto) // [5:3] - CAS Write Latency (CWL) - // 000 (5 for 400 MHz device), // 001 (6 for 400 MHz to 533 MHz devices), // 010 (7 for 533 MHz to 667 MHz devices), // 011 (8 for 667 MHz to 800 MHz) // [2:0] - Partial Array Self-Refresh (Optional) - // 000 (full array) // Not used for DDR2 //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr2_DDR3 assign load_mr2[2:0] = 3'b000; assign load_mr2[5:3] = (nCWL == 5) ? 3'b000 : (nCWL == 6) ? 3'b001 : (nCWL == 7) ? 3'b010 : (nCWL == 8) ? 3'b011 : (nCWL == 9) ? 3'b100 : (nCWL == 10) ? 3'b101 : (nCWL == 11) ? 3'b110 : 3'b111; assign load_mr2[6] = 1'b0; assign load_mr2[7] = 1'b0; assign load_mr2[8] = 1'b0; // Dynamic ODT disabled assign load_mr2[10:9] = 2'b00; assign load_mr2[15:11] = 5'b00000; end else begin: gen_load_mr2_DDR2 assign load_mr2[15:0] = 16'd0; end endgenerate //***************************************************************** // DDR3 Load mode reg3 // Mode Register (MR3): // [15:3] - unused - All zeros // [2] - MPR Operation - 0(normal operation), 1(data flow from MPR) // [1:0] - MPR location - 00 (Predefined pattern) //***************************************************************** assign load_mr3[1:0] = 2'b00; assign load_mr3[2] = 1'b0; assign load_mr3[15:3] = 13'b0000000000000; // For multi-rank systems the rank being accessed during writes in // Read Leveling must be sent to phy_write for the bitslip logic assign calib_rank_cnt = chip_cnt_r; //*************************************************************************** // Logic to begin initial calibration, and to handle precharge requests // during read-leveling (to avoid tRAS violations if individual read // levelling calibration stages take more than max{tRAS) to complete). //*************************************************************************** // Assert when readback for each stage of read-leveling begins. However, // note this indicates only when the read command is issued and when // Phaser_IN has phase aligned FREQ_REF clock to read DQS. It does not // indicate when the read data is present on the bus (when this happens // after the read command is issued depends on CAS LATENCY) - there will // need to be some delay before valid data is present on the bus. // assign rdlvl_start_pre = (init_state_r == INIT_PI_PHASELOCK_READS); // Assert when read back for oclkdelay calibration begins assign oclkdelay_calib_start_pre = (init_state_r == INIT_OCLKDELAY_READ); // Assert when read back for write calibration begins assign wrcal_start_pre = (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS); // Common precharge signal done signal - pulses only when there has been // a precharge issued as a result of a PRECH_REQ pulse. Note also a common // PRECH_DONE signal is used for all blocks assign prech_done_pre = (((init_state_r == INIT_RDLVL_STG1_READ) || ((rdlvl_last_byte_done_r || prbs_last_byte_done_r) && (init_state_r == INIT_RDLVL_ACT_WAIT) && cnt_cmd_done_r) || (dqs_found_prech_req && (init_state_r == INIT_RDLVL_ACT_WAIT)) || (init_state_r == INIT_MPR_RDEN) || ((init_state_r == INIT_WRCAL_ACT_WAIT) && cnt_cmd_done_r) || ((init_state_r == INIT_OCLKDELAY_ACT_WAIT) && cnt_cmd_done_r) || (wrlvl_final && (init_state_r == INIT_REFRESH_WAIT) && cnt_cmd_done_r && ~oclkdelay_calib_done)) && prech_pending_r && !prech_req_posedge_r); always @(posedge clk) if (rst) pi_phaselock_start <= #TCQ 1'b0; else if (init_state_r == INIT_PI_PHASELOCK_READS) pi_phaselock_start <= #TCQ 1'b1; // Delay start of each calibration by 16 clock cycles to ensure that when // calibration logic begins, read data is already appearing on the bus. // Each circuit should synthesize using an SRL16. Assume that reset is // long enough to clear contents of SRL16. always @(posedge clk) begin rdlvl_last_byte_done_r <= #TCQ rdlvl_last_byte_done; prbs_last_byte_done_r <= #TCQ prbs_last_byte_done; rdlvl_start_dly0_r <= #TCQ {rdlvl_start_dly0_r[14:0], rdlvl_start_pre}; wrcal_start_dly_r <= #TCQ {wrcal_start_dly_r[14:0], wrcal_start_pre}; oclkdelay_start_dly_r <= #TCQ {oclkdelay_start_dly_r[14:0], oclkdelay_calib_start_pre}; prech_done_dly_r <= #TCQ {prech_done_dly_r[14:0], prech_done_pre}; end always @(posedge clk) prech_done <= #TCQ prech_done_dly_r[15]; always @(posedge clk) if (rst) mpr_rdlvl_start <= #TCQ 1'b0; else if (pi_dqs_found_done && (init_state_r == INIT_MPR_READ)) mpr_rdlvl_start <= #TCQ 1'b1; always @(posedge clk) phy_if_empty_r <= #TCQ phy_if_empty; always @(posedge clk) if (rst || (phy_if_empty_r && prbs_rdlvl_prech_req) || ((stg1_wr_rd_cnt == 'd1) && ~stg1_wr_done) || prbs_rdlvl_done) prbs_gen_clk_en <= #TCQ 1'b0; else if ((~phy_if_empty_r && rdlvl_stg1_done_r1 && ~prbs_rdlvl_done) || ((init_state_r == INIT_RDLVL_ACT_WAIT) && rdlvl_stg1_done_r1 && (cnt_cmd_r == 'd0))) prbs_gen_clk_en <= #TCQ 1'b1; generate if (RANKS < 2) begin always @(posedge clk) if (rst) begin rdlvl_stg1_start <= #TCQ 1'b0; rdlvl_stg1_start_int <= #TCQ 1'b0; rdlvl_start_pre <= #TCQ 1'b0; prbs_rdlvl_start <= #TCQ 1'b0; end else begin if (pi_dqs_found_done && cnt_cmd_done_r && (init_state_r == INIT_RDLVL_ACT_WAIT)) rdlvl_stg1_start_int <= #TCQ 1'b1; if (pi_dqs_found_done && (init_state_r == INIT_RDLVL_STG1_READ))begin rdlvl_start_pre <= #TCQ 1'b1; rdlvl_stg1_start <= #TCQ rdlvl_start_dly0_r[14]; end if (pi_dqs_found_done && rdlvl_stg1_done && (init_state_r == INIT_RDLVL_STG1_READ) && (WRLVL == "ON")) begin prbs_rdlvl_start <= #TCQ 1'b1; end end end else begin always @(posedge clk) if (rst || rdlvl_stg1_rank_done) begin rdlvl_stg1_start <= #TCQ 1'b0; rdlvl_stg1_start_int <= #TCQ 1'b0; rdlvl_start_pre <= #TCQ 1'b0; prbs_rdlvl_start <= #TCQ 1'b0; end else begin if (pi_dqs_found_done && cnt_cmd_done_r && (init_state_r == INIT_RDLVL_ACT_WAIT)) rdlvl_stg1_start_int <= #TCQ 1'b1; if (pi_dqs_found_done && (init_state_r == INIT_RDLVL_STG1_READ))begin rdlvl_start_pre <= #TCQ 1'b1; rdlvl_stg1_start <= #TCQ rdlvl_start_dly0_r[14]; end if (pi_dqs_found_done && rdlvl_stg1_done && (init_state_r == INIT_RDLVL_STG1_READ) && (WRLVL == "ON")) begin prbs_rdlvl_start <= #TCQ 1'b1; end end end endgenerate always @(posedge clk) begin if (rst || dqsfound_retry || wrlvl_byte_redo) begin pi_dqs_found_start <= #TCQ 1'b0; wrcal_start <= #TCQ 1'b0; end else begin if (!pi_dqs_found_done && init_state_r == INIT_RDLVL_STG2_READ) pi_dqs_found_start <= #TCQ 1'b1; if (wrcal_start_dly_r[5]) wrcal_start <= #TCQ 1'b1; end end // else: !if(rst) always @(posedge clk) if (rst) oclkdelay_calib_start <= #TCQ 1'b0; else if (oclkdelay_start_dly_r[5]) oclkdelay_calib_start <= #TCQ 1'b1; always @(posedge clk) if (rst) pi_dqs_found_done_r1 <= #TCQ 1'b0; else pi_dqs_found_done_r1 <= #TCQ pi_dqs_found_done; always @(posedge clk) wrlvl_final_r <= #TCQ wrlvl_final; // Reset IN_FIFO after final write leveling to make sure the FIFO // pointers are initialized always @(posedge clk) if (rst || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_REFRESH)) wrlvl_final_if_rst <= #TCQ 1'b0; else if (wrlvl_done_r && //(wrlvl_final_r && wrlvl_done_r && (init_state_r == INIT_WRLVL_LOAD_MR2)) wrlvl_final_if_rst <= #TCQ 1'b1; // Constantly enable DQS while write leveling is enabled in the memory // This is more to get rid of warnings in simulation, can later change // this code to only enable WRLVL_ACTIVE when WRLVL_START is asserted always @(posedge clk) if (rst || ((init_state_r1 != INIT_WRLVL_START) && (init_state_r == INIT_WRLVL_START))) wrlvl_odt_ctl <= #TCQ 1'b0; else if (wrlvl_rank_done && ~wrlvl_rank_done_r1) wrlvl_odt_ctl <= #TCQ 1'b1; generate if (nCK_PER_CLK == 4) begin: en_cnt_div4 always @ (posedge clk) if (rst) enable_wrlvl_cnt <= #TCQ 5'd0; else if ((init_state_r == INIT_WRLVL_START) || (wrlvl_odt && (enable_wrlvl_cnt == 5'd0))) enable_wrlvl_cnt <= #TCQ 5'd12; else if ((enable_wrlvl_cnt > 5'd0) && ~(phy_ctl_full || phy_cmd_full)) enable_wrlvl_cnt <= #TCQ enable_wrlvl_cnt - 1; // ODT stays asserted as long as write_calib // signal is asserted always @(posedge clk) if (rst || wrlvl_odt_ctl) wrlvl_odt <= #TCQ 1'b0; else if (enable_wrlvl_cnt == 5'd1) wrlvl_odt <= #TCQ 1'b1; end else begin: en_cnt_div2 always @ (posedge clk) if (rst) enable_wrlvl_cnt <= #TCQ 5'd0; else if ((init_state_r == INIT_WRLVL_START) || (wrlvl_odt && (enable_wrlvl_cnt == 5'd0))) enable_wrlvl_cnt <= #TCQ 5'd21; else if ((enable_wrlvl_cnt > 5'd0) && ~(phy_ctl_full || phy_cmd_full)) enable_wrlvl_cnt <= #TCQ enable_wrlvl_cnt - 1; // ODT stays asserted as long as write_calib // signal is asserted always @(posedge clk) if (rst || wrlvl_odt_ctl) wrlvl_odt <= #TCQ 1'b0; else if (enable_wrlvl_cnt == 5'd1) wrlvl_odt <= #TCQ 1'b1; end endgenerate always @(posedge clk) if (rst || wrlvl_rank_done || done_dqs_tap_inc) wrlvl_active <= #TCQ 1'b0; else if ((enable_wrlvl_cnt == 5'd1) && wrlvl_odt && !wrlvl_active) wrlvl_active <= #TCQ 1'b1; // signal used to assert DQS for write leveling. // the DQS will be asserted once every 16 clock cycles. always @(posedge clk)begin if(rst || (enable_wrlvl_cnt != 5'd1)) begin wr_level_dqs_asrt <= #TCQ 1'd0; end else if ((enable_wrlvl_cnt == 5'd1) && (wrlvl_active_r1)) begin wr_level_dqs_asrt <= #TCQ 1'd1; end end always @ (posedge clk) begin if (rst || (wrlvl_done_r && ~wrlvl_done_r1)) dqs_asrt_cnt <= #TCQ 2'd0; else if (wr_level_dqs_asrt && dqs_asrt_cnt != 2'd3) dqs_asrt_cnt <= #TCQ (dqs_asrt_cnt + 1); end always @ (posedge clk) begin if (rst || ~wrlvl_active) wr_lvl_start <= #TCQ 1'd0; else if (dqs_asrt_cnt == 2'd3) wr_lvl_start <= #TCQ 1'd1; end always @(posedge clk) begin if (rst) wl_sm_start <= #TCQ 1'b0; else wl_sm_start <= #TCQ wr_level_dqs_asrt_r1; end always @(posedge clk) begin wrlvl_active_r1 <= #TCQ wrlvl_active; wr_level_dqs_asrt_r1 <= #TCQ wr_level_dqs_asrt; wrlvl_done_r <= #TCQ wrlvl_done; wrlvl_done_r1 <= #TCQ wrlvl_done_r; wrlvl_rank_done_r1 <= #TCQ wrlvl_rank_done; wrlvl_rank_done_r2 <= #TCQ wrlvl_rank_done_r1; wrlvl_rank_done_r3 <= #TCQ wrlvl_rank_done_r2; wrlvl_rank_done_r4 <= #TCQ wrlvl_rank_done_r3; wrlvl_rank_done_r5 <= #TCQ wrlvl_rank_done_r4; wrlvl_rank_done_r6 <= #TCQ wrlvl_rank_done_r5; wrlvl_rank_done_r7 <= #TCQ wrlvl_rank_done_r6; end always @ (posedge clk) begin //if (rst) wrlvl_rank_cntr <= #TCQ 3'd0; //else if (wrlvl_rank_done) // wrlvl_rank_cntr <= #TCQ wrlvl_rank_cntr + 1'b1; end //***************************************************************** // Precharge request logic - those calibration logic blocks // that require greater than tRAS(max) to finish must break up // their calibration into smaller units of time, with precharges // issued in between. This is done using the XXX_PRECH_REQ and // PRECH_DONE handshaking between PHY_INIT and those blocks //***************************************************************** // Shared request from multiple sources assign prech_req = oclk_prech_req | rdlvl_prech_req | wrcal_prech_req | prbs_rdlvl_prech_req | (dqs_found_prech_req & (init_state_r == INIT_RDLVL_STG2_READ_WAIT)); // Handshaking logic to force precharge during read leveling, and to // notify read leveling logic when precharge has been initiated and // it's okay to proceed with leveling again always @(posedge clk) if (rst) begin prech_req_r <= #TCQ 1'b0; prech_req_posedge_r <= #TCQ 1'b0; prech_pending_r <= #TCQ 1'b0; end else begin prech_req_r <= #TCQ prech_req; prech_req_posedge_r <= #TCQ prech_req & ~prech_req_r; if (prech_req_posedge_r) prech_pending_r <= #TCQ 1'b1; // Clear after we've finished with the precharge and have // returned to issuing read leveling calibration reads else if (prech_done_pre) prech_pending_r <= #TCQ 1'b0; end //*************************************************************************** // Various timing counters //*************************************************************************** //***************************************************************** // Generic delay for various states that require it (e.g. for turnaround // between read and write). Make this a sufficiently large number of clock // cycles to cover all possible frequencies and memory components) // Requirements for this counter: // 1. Greater than tMRD // 2. tRFC (refresh-active) for DDR2 // 3. (list the other requirements, slacker...) //***************************************************************** always @(posedge clk) begin case (init_state_r) INIT_LOAD_MR_WAIT, INIT_WRLVL_LOAD_MR_WAIT, INIT_WRLVL_LOAD_MR2_WAIT, INIT_MPR_WAIT, INIT_MPR_DISABLE_PREWAIT, INIT_MPR_DISABLE_WAIT, INIT_OCLKDELAY_ACT_WAIT, INIT_OCLKDELAY_WRITE_WAIT, INIT_RDLVL_ACT_WAIT, INIT_RDLVL_STG1_WRITE_READ, INIT_RDLVL_STG2_READ_WAIT, INIT_WRCAL_ACT_WAIT, INIT_WRCAL_WRITE_READ, INIT_WRCAL_READ_WAIT, INIT_PRECHARGE_PREWAIT, INIT_PRECHARGE_WAIT, INIT_DDR2_PRECHARGE_WAIT, INIT_REG_WRITE_WAIT, INIT_REFRESH_WAIT, INIT_REFRESH_RNK2_WAIT: begin if (phy_ctl_full || phy_cmd_full) cnt_cmd_r <= #TCQ cnt_cmd_r; else cnt_cmd_r <= #TCQ cnt_cmd_r + 1; end INIT_WRLVL_WAIT: cnt_cmd_r <= #TCQ 'b0; default: cnt_cmd_r <= #TCQ 'b0; endcase end // pulse when count reaches terminal count always @(posedge clk) cnt_cmd_done_r <= #TCQ (cnt_cmd_r == CNTNEXT_CMD); // For ODT deassertion - hold throughout post read/write wait stage, but // deassert before next command. The post read/write stage is very long, so // we simply address the longest case here plus some margin. always @(posedge clk) cnt_cmd_done_m7_r <= #TCQ (cnt_cmd_r == (CNTNEXT_CMD - 7)); //************************************************************************ // Added to support PO fine delay inc when TG errors always @(posedge clk) begin case (init_state_r) INIT_WRCAL_READ_WAIT: begin if (phy_ctl_full || phy_cmd_full) cnt_wait <= #TCQ cnt_wait; else cnt_wait <= #TCQ cnt_wait + 1; end default: cnt_wait <= #TCQ 'b0; endcase end always @(posedge clk) cnt_wrcal_rd <= #TCQ (cnt_wait == 'd4); always @(posedge clk) begin if (rst || ~temp_wrcal_done) temp_lmr_done <= #TCQ 1'b0; else if (temp_wrcal_done && (init_state_r == INIT_LOAD_MR)) temp_lmr_done <= #TCQ 1'b1; end always @(posedge clk) temp_wrcal_done_r <= #TCQ temp_wrcal_done; always @(posedge clk) if (rst) begin tg_timer_go <= #TCQ 1'b0; end else if ((PRE_REV3ES == "ON") && temp_wrcal_done && temp_lmr_done && (init_state_r == INIT_WRCAL_READ_WAIT)) begin tg_timer_go <= #TCQ 1'b1; end else begin tg_timer_go <= #TCQ 1'b0; end always @(posedge clk) begin if (rst || (temp_wrcal_done && ~temp_wrcal_done_r) || (init_state_r == INIT_PRECHARGE_PREWAIT)) tg_timer <= #TCQ 'd0; else if ((pi_phaselock_timer == PHASELOCKED_TIMEOUT) && tg_timer_go && (tg_timer != TG_TIMER_TIMEOUT)) tg_timer <= #TCQ tg_timer + 1; end always @(posedge clk) begin if (rst) tg_timer_done <= #TCQ 1'b0; else if (tg_timer == TG_TIMER_TIMEOUT) tg_timer_done <= #TCQ 1'b1; else tg_timer_done <= #TCQ 1'b0; end always @(posedge clk) begin if (rst) no_rst_tg_mc <= #TCQ 1'b0; else if ((init_state_r == INIT_WRCAL_ACT) && wrcal_read_req) no_rst_tg_mc <= #TCQ 1'b1; else no_rst_tg_mc <= #TCQ 1'b0; end //************************************************************************ always @(posedge clk) begin if (rst) detect_pi_found_dqs <= #TCQ 1'b0; else if ((cnt_cmd_r == 7'b0111111) && (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) detect_pi_found_dqs <= #TCQ 1'b1; else detect_pi_found_dqs <= #TCQ 1'b0; end //***************************************************************** // Initial delay after power-on for RESET, CKE // NOTE: Could reduce power consumption by turning off these counters // after initial power-up (at expense of more logic) // NOTE: Likely can combine multiple counters into single counter //***************************************************************** // Create divided by 1024 version of clock always @(posedge clk) if (rst) begin cnt_pwron_ce_r <= #TCQ 10'h000; pwron_ce_r <= #TCQ 1'b0; end else begin cnt_pwron_ce_r <= #TCQ cnt_pwron_ce_r + 1; pwron_ce_r <= #TCQ (cnt_pwron_ce_r == 10'h3FF); end // "Main" power-on counter - ticks every CLKDIV/1024 cycles always @(posedge clk) if (rst) cnt_pwron_r <= #TCQ 'b0; else if (pwron_ce_r) cnt_pwron_r <= #TCQ cnt_pwron_r + 1; always @(posedge clk) if (rst || ~phy_ctl_ready) begin cnt_pwron_reset_done_r <= #TCQ 1'b0; cnt_pwron_cke_done_r <= #TCQ 1'b0; end else begin // skip power-up count for simulation purposes only if ((SIM_INIT_OPTION == "SKIP_PU_DLY") || (SIM_INIT_OPTION == "SKIP_INIT")) begin cnt_pwron_reset_done_r <= #TCQ 1'b1; cnt_pwron_cke_done_r <= #TCQ 1'b1; end else begin // otherwise, create latched version of done signal for RESET, CKE if (DRAM_TYPE == "DDR3") begin if (!cnt_pwron_reset_done_r) cnt_pwron_reset_done_r <= #TCQ (cnt_pwron_r == PWRON_RESET_DELAY_CNT); if (!cnt_pwron_cke_done_r) cnt_pwron_cke_done_r <= #TCQ (cnt_pwron_r == PWRON_CKE_DELAY_CNT); end else begin // DDR2 cnt_pwron_reset_done_r <= #TCQ 1'b1; // not needed if (!cnt_pwron_cke_done_r) cnt_pwron_cke_done_r <= #TCQ (cnt_pwron_r == PWRON_CKE_DELAY_CNT); end end end // else: !if(rst || ~phy_ctl_ready) always @(posedge clk) cnt_pwron_cke_done_r1 <= #TCQ cnt_pwron_cke_done_r; // Keep RESET asserted and CKE deasserted until after power-on delay always @(posedge clk or posedge rst) begin if (rst) phy_reset_n <= #TCQ 1'b0; else phy_reset_n <= #TCQ cnt_pwron_reset_done_r; // phy_cke <= #TCQ {CKE_WIDTH{cnt_pwron_cke_done_r}}; end //***************************************************************** // Counter for tXPR (pronouned "Tax-Payer") - wait time after // CKE deassertion before first MRS command can be asserted //***************************************************************** always @(posedge clk) if (!cnt_pwron_cke_done_r) begin cnt_txpr_r <= #TCQ 'b0; cnt_txpr_done_r <= #TCQ 1'b0; end else begin cnt_txpr_r <= #TCQ cnt_txpr_r + 1; if (!cnt_txpr_done_r) cnt_txpr_done_r <= #TCQ (cnt_txpr_r == TXPR_DELAY_CNT); end //***************************************************************** // Counter for the initial 400ns wait for issuing precharge all // command after CKE assertion. Only for DDR2. //***************************************************************** always @(posedge clk) if (!cnt_pwron_cke_done_r) begin cnt_init_pre_wait_r <= #TCQ 'b0; cnt_init_pre_wait_done_r <= #TCQ 1'b0; end else begin cnt_init_pre_wait_r <= #TCQ cnt_init_pre_wait_r + 1; if (!cnt_init_pre_wait_done_r) cnt_init_pre_wait_done_r <= #TCQ (cnt_init_pre_wait_r >= DDR2_INIT_PRE_CNT); end //***************************************************************** // Wait for both DLL to lock (tDLLK) and ZQ calibration to finish // (tZQINIT). Both take the same amount of time (512*tCK) //***************************************************************** always @(posedge clk) if (init_state_r == INIT_ZQCL) begin cnt_dllk_zqinit_r <= #TCQ 'b0; cnt_dllk_zqinit_done_r <= #TCQ 1'b0; end else if (~(phy_ctl_full || phy_cmd_full)) begin cnt_dllk_zqinit_r <= #TCQ cnt_dllk_zqinit_r + 1; if (!cnt_dllk_zqinit_done_r) cnt_dllk_zqinit_done_r <= #TCQ (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT); end //***************************************************************** // Keep track of which MRS counter needs to be programmed during // memory initialization // The counter and the done signal are reset an additional time // for DDR2. The same signals are used for the additional DDR2 // initialization sequence. //***************************************************************** always @(posedge clk) if ((init_state_r == INIT_IDLE)|| ((init_state_r == INIT_REFRESH) && (~mem_init_done_r))) begin cnt_init_mr_r <= #TCQ 'b0; cnt_init_mr_done_r <= #TCQ 1'b0; end else if (init_state_r == INIT_LOAD_MR) begin cnt_init_mr_r <= #TCQ cnt_init_mr_r + 1; cnt_init_mr_done_r <= #TCQ (cnt_init_mr_r == INIT_CNT_MR_DONE); end //***************************************************************** // Flag to tell if the first precharge for DDR2 init sequence is // done //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) ddr2_pre_flag_r<= #TCQ 'b0; else if (init_state_r == INIT_LOAD_MR) ddr2_pre_flag_r<= #TCQ 1'b1; // reset the flag for multi rank case else if ((ddr2_refresh_flag_r) && (init_state_r == INIT_LOAD_MR_WAIT)&& (cnt_cmd_done_r) && (cnt_init_mr_done_r)) ddr2_pre_flag_r <= #TCQ 'b0; //***************************************************************** // Flag to tell if the refresh stat for DDR2 init sequence is // reached //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) ddr2_refresh_flag_r<= #TCQ 'b0; else if ((init_state_r == INIT_REFRESH) && (~mem_init_done_r)) // reset the flag for multi rank case ddr2_refresh_flag_r<= #TCQ 1'b1; else if ((ddr2_refresh_flag_r) && (init_state_r == INIT_LOAD_MR_WAIT)&& (cnt_cmd_done_r) && (cnt_init_mr_done_r)) ddr2_refresh_flag_r <= #TCQ 'b0; //***************************************************************** // Keep track of the number of auto refreshes for DDR2 // initialization. The spec asks for a minimum of two refreshes. // Four refreshes are performed here. The two extra refreshes is to // account for the 200 clock cycle wait between step h and l. // Without the two extra refreshes we would have to have a // wait state. //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) begin cnt_init_af_r <= #TCQ 'b0; cnt_init_af_done_r <= #TCQ 1'b0; end else if ((init_state_r == INIT_REFRESH) && (~mem_init_done_r))begin cnt_init_af_r <= #TCQ cnt_init_af_r + 1; cnt_init_af_done_r <= #TCQ (cnt_init_af_r == 2'b11); end //***************************************************************** // Keep track of the register control word programming for // DDR3 RDIMM //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) reg_ctrl_cnt_r <= #TCQ 'b0; else if (init_state_r == INIT_REG_WRITE) reg_ctrl_cnt_r <= #TCQ reg_ctrl_cnt_r + 1; generate if (RANKS < 2) begin: one_rank always @(posedge clk) if ((init_state_r == INIT_IDLE) || rdlvl_last_byte_done) stg1_wr_done <= #TCQ 1'b0; else if (init_state_r == INIT_RDLVL_STG1_WRITE_READ) stg1_wr_done <= #TCQ 1'b1; end else begin: two_ranks always @(posedge clk) if ((init_state_r == INIT_IDLE) || rdlvl_last_byte_done || (rdlvl_stg1_rank_done )) stg1_wr_done <= #TCQ 1'b0; else if (init_state_r == INIT_RDLVL_STG1_WRITE_READ) stg1_wr_done <= #TCQ 1'b1; end endgenerate always @(posedge clk) if (rst) rnk_ref_cnt <= #TCQ 1'b0; else if (stg1_wr_done && (init_state_r == INIT_REFRESH_WAIT) && cnt_cmd_done_r) rnk_ref_cnt <= #TCQ ~rnk_ref_cnt; always @(posedge clk) if (rst || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_RDLVL_ACT)) num_refresh <= #TCQ 'd0; else if ((init_state_r == INIT_REFRESH) && (~pi_dqs_found_done || ((DRAM_TYPE == "DDR3") && ~oclkdelay_calib_done) || (rdlvl_stg1_done && ~prbs_rdlvl_done) || ((CLK_PERIOD/nCK_PER_CLK <= 2500) && wrcal_done && ~rdlvl_stg1_done) || ((CLK_PERIOD/nCK_PER_CLK > 2500) && wrlvl_done_r1 && ~rdlvl_stg1_done))) num_refresh <= #TCQ num_refresh + 1; //*************************************************************************** // Initialization state machine //*************************************************************************** //***************************************************************** // Next-state logic //***************************************************************** always @(posedge clk) if (rst)begin init_state_r <= #TCQ INIT_IDLE; init_state_r1 <= #TCQ INIT_IDLE; end else begin init_state_r <= #TCQ init_next_state; init_state_r1 <= #TCQ init_state_r; end always @(burst_addr_r or chip_cnt_r or cnt_cmd_done_r or cnt_dllk_zqinit_done_r or cnt_init_af_done_r or cnt_init_mr_done_r or phy_ctl_ready or phy_ctl_full or stg1_wr_done or rdlvl_last_byte_done or phy_cmd_full or num_reads or rnk_ref_cnt or mpr_last_byte_done or oclk_wr_cnt or mpr_rdlvl_done or mpr_rnk_done or num_refresh or oclkdelay_calib_done or oclk_prech_req or oclk_calib_resume or wrlvl_byte_redo or wrlvl_byte_done or wrlvl_final or wrlvl_final_r or cnt_init_pre_wait_done_r or cnt_pwron_cke_done_r or delay_incdec_done or wrcal_wr_cnt or ck_addr_cmd_delay_done or wrcal_read_req or wrcal_reads or cnt_wrcal_rd or wrcal_act_req or temp_wrcal_done or temp_lmr_done or cnt_txpr_done_r or ddr2_pre_flag_r or ddr2_refresh_flag_r or ddr3_lm_done_r or init_state_r or mem_init_done_r or dqsfound_retry or dqs_found_prech_req or prech_req_posedge_r or prech_req_r or wrcal_done or wrcal_resume_r or rdlvl_stg1_done or rdlvl_stg1_done_r1 or rdlvl_stg1_rank_done or rdlvl_stg1_start_int or prbs_rdlvl_done or prbs_last_byte_done or prbs_rdlvl_done_r1 or stg1_wr_rd_cnt or rdlvl_prech_req or wrcal_prech_req or read_calib_int or read_calib_r or pi_calib_done_r1 or pi_phase_locked_all_r3 or pi_phase_locked_all_r4 or pi_dqs_found_done or pi_dqs_found_rank_done or pi_dqs_found_start or reg_ctrl_cnt_r or wrlvl_done_r1 or wrlvl_rank_done_r7 or wrcal_final_chk or wrcal_sanity_chk_done) begin init_next_state = init_state_r; (* full_case, parallel_case *) case (init_state_r) //******************************************************* // DRAM initialization //******************************************************* // Initial state - wait for: // 1. Power-on delays to pass // 2. PHY Control Block to assert phy_ctl_ready // 3. PHY Control FIFO must not be FULL // 4. Read path initialization to finish INIT_IDLE: if (cnt_pwron_cke_done_r && phy_ctl_ready && ck_addr_cmd_delay_done && delay_incdec_done && ~(phy_ctl_full || phy_cmd_full) ) begin // If skipping memory initialization (simulation only) if (SIM_INIT_OPTION == "SKIP_INIT") //if (WRLVL == "ON") // Proceed to write leveling // init_next_state = INIT_WRLVL_START; //else //if (SIM_CAL_OPTION != "SKIP_CAL") // Proceed to Phaser_In phase lock init_next_state = INIT_RDLVL_ACT; // else // Skip read leveling //init_next_state = INIT_DONE; else init_next_state = INIT_WAIT_CKE_EXIT; end // Wait minimum of Reset CKE exit time (tXPR = max(tXS, INIT_WAIT_CKE_EXIT: if ((cnt_txpr_done_r) && (DRAM_TYPE == "DDR3") && ~(phy_ctl_full || phy_cmd_full)) begin if((REG_CTRL == "ON") && ((nCS_PER_RANK > 1) || (RANKS > 1))) //register write for reg dimm. Some register chips // have the register chip in a pre-programmed state // in that case the nCS_PER_RANK == 1 && RANKS == 1 init_next_state = INIT_REG_WRITE; else // Load mode register - this state is repeated multiple times init_next_state = INIT_LOAD_MR; end else if ((cnt_init_pre_wait_done_r) && (DRAM_TYPE == "DDR2") && ~(phy_ctl_full || phy_cmd_full)) // DDR2 start with a precharge all command init_next_state = INIT_DDR2_PRECHARGE; INIT_REG_WRITE: init_next_state = INIT_REG_WRITE_WAIT; INIT_REG_WRITE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if(reg_ctrl_cnt_r == 3'd5) init_next_state = INIT_LOAD_MR; else init_next_state = INIT_REG_WRITE; end INIT_LOAD_MR: init_next_state = INIT_LOAD_MR_WAIT; // After loading MR, wait at least tMRD INIT_LOAD_MR_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin // If finished loading all mode registers, proceed to next step if (prbs_rdlvl_done && pi_dqs_found_done && rdlvl_stg1_done) // for ddr3 when the correct burst length is writtern at end init_next_state = INIT_PRECHARGE; else if (~wrcal_done && temp_lmr_done) init_next_state = INIT_PRECHARGE_PREWAIT; else if (cnt_init_mr_done_r)begin if(DRAM_TYPE == "DDR3") init_next_state = INIT_ZQCL; else begin //DDR2 if(ddr2_refresh_flag_r)begin // memory initialization per rank for multi-rank case if (!mem_init_done_r && (chip_cnt_r <= RANKS-1)) init_next_state = INIT_DDR2_MULTI_RANK; else init_next_state = INIT_RDLVL_ACT; // ddr2 initialization done.load mode state after refresh end else init_next_state = INIT_DDR2_PRECHARGE; end end else init_next_state = INIT_LOAD_MR; end // DDR2 multi rank transition state INIT_DDR2_MULTI_RANK: init_next_state = INIT_DDR2_MULTI_RANK_WAIT; INIT_DDR2_MULTI_RANK_WAIT: init_next_state = INIT_DDR2_PRECHARGE; // Initial ZQ calibration INIT_ZQCL: init_next_state = INIT_WAIT_DLLK_ZQINIT; // Wait until both DLL have locked, and ZQ calibration done INIT_WAIT_DLLK_ZQINIT: if (cnt_dllk_zqinit_done_r && ~(phy_ctl_full || phy_cmd_full)) // memory initialization per rank for multi-rank case if (!mem_init_done_r && (chip_cnt_r <= RANKS-1)) init_next_state = INIT_LOAD_MR; //else if (WRLVL == "ON") // init_next_state = INIT_WRLVL_START; else // skip write-leveling (e.g. for DDR2 interface) init_next_state = INIT_RDLVL_ACT; // Initial precharge for DDR2 INIT_DDR2_PRECHARGE: init_next_state = INIT_DDR2_PRECHARGE_WAIT; INIT_DDR2_PRECHARGE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if (ddr2_pre_flag_r) init_next_state = INIT_REFRESH; else // from precharge state initially go to load mode init_next_state = INIT_LOAD_MR; end INIT_REFRESH: if ((RANKS == 2) && (chip_cnt_r == RANKS - 1)) init_next_state = INIT_REFRESH_RNK2_WAIT; else init_next_state = INIT_REFRESH_WAIT; INIT_REFRESH_RNK2_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_PRECHARGE; INIT_REFRESH_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full))begin if(cnt_init_af_done_r && (~mem_init_done_r)) // go to lm state as part of DDR2 init sequence init_next_state = INIT_LOAD_MR; else if (pi_dqs_found_done && ~wrlvl_done_r1 && ~wrlvl_final && ~wrlvl_byte_redo && (WRLVL == "ON")) init_next_state = INIT_WRLVL_START; else if (~pi_dqs_found_done || (rdlvl_stg1_done && ~prbs_rdlvl_done) || ((CLK_PERIOD/nCK_PER_CLK <= 2500) && wrcal_done && ~rdlvl_stg1_done) || ((CLK_PERIOD/nCK_PER_CLK > 2500) && wrlvl_done_r1 && ~rdlvl_stg1_done)) begin if (num_refresh == 'd8) init_next_state = INIT_RDLVL_ACT; else init_next_state = INIT_REFRESH; end else if ((~wrcal_done && wrlvl_byte_redo)&& (DRAM_TYPE == "DDR3") && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRLVL_LOAD_MR2; else if (((prbs_rdlvl_done && rdlvl_stg1_done && pi_dqs_found_done) && (WRLVL == "ON")) && mem_init_done_r && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRCAL_ACT; else if (pi_dqs_found_done && (DRAM_TYPE == "DDR3") && ~(mpr_last_byte_done || mpr_rdlvl_done)) begin if (num_refresh == 'd8) init_next_state = INIT_MPR_RDEN; else init_next_state = INIT_REFRESH; end else if (((~oclkdelay_calib_done && wrlvl_final) || (~wrcal_done && wrlvl_byte_redo)) && (DRAM_TYPE == "DDR3")) init_next_state = INIT_WRLVL_LOAD_MR2; else if (~oclkdelay_calib_done && (mpr_last_byte_done || mpr_rdlvl_done) && (DRAM_TYPE == "DDR3")) begin if (num_refresh == 'd8) init_next_state = INIT_OCLKDELAY_ACT; else init_next_state = INIT_REFRESH; end else if ((~wrcal_done && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK <= 2500)) && pi_dqs_found_done) init_next_state = INIT_WRCAL_ACT; else if (mem_init_done_r) begin if (RANKS < 2) init_next_state = INIT_RDLVL_ACT; else if (stg1_wr_done && ~rnk_ref_cnt && ~rdlvl_stg1_done) init_next_state = INIT_PRECHARGE; else init_next_state = INIT_RDLVL_ACT; end else // to DDR2 init state as part of DDR2 init sequence init_next_state = INIT_REFRESH; end //****************************************************** // Write Leveling //******************************************************* // Enable write leveling in MR1 and start write leveling // for current rank INIT_WRLVL_START: init_next_state = INIT_WRLVL_WAIT; // Wait for both MR load and write leveling to complete // (write leveling should take much longer than MR load..) INIT_WRLVL_WAIT: if (wrlvl_rank_done_r7 && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRLVL_LOAD_MR; // Disable write leveling in MR1 for current rank INIT_WRLVL_LOAD_MR: init_next_state = INIT_WRLVL_LOAD_MR_WAIT; INIT_WRLVL_LOAD_MR_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRLVL_LOAD_MR2; // Load MR2 to set ODT: Dynamic ODT for single rank case // And ODTs for multi-rank case as well INIT_WRLVL_LOAD_MR2: init_next_state = INIT_WRLVL_LOAD_MR2_WAIT; // Wait tMRD before proceeding INIT_WRLVL_LOAD_MR2_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin //if (wrlvl_byte_done) // init_next_state = INIT_PRECHARGE_PREWAIT; // else if ((RANKS == 2) && wrlvl_rank_done_r2) // init_next_state = INIT_WRLVL_LOAD_MR2_WAIT; if (~wrlvl_done_r1) init_next_state = INIT_WRLVL_START; else if (SIM_CAL_OPTION == "SKIP_CAL") // If skip rdlvl, then we're done init_next_state = INIT_DONE; else // Otherwise, proceed to read leveling //init_next_state = INIT_RDLVL_ACT; init_next_state = INIT_PRECHARGE_PREWAIT; end //******************************************************* // Read Leveling //******************************************************* // single row activate. All subsequent read leveling writes and // read will take place in this row INIT_RDLVL_ACT: init_next_state = INIT_RDLVL_ACT_WAIT; // hang out for awhile before issuing subsequent column commands // it's also possible to reach this state at various points // during read leveling - determine what the current stage is INIT_RDLVL_ACT_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin // Just finished an activate. Now either write, read, or precharge // depending on where we are in the training sequence if (!pi_calib_done_r1) init_next_state = INIT_PI_PHASELOCK_READS; else if (!pi_dqs_found_done) // (!pi_dqs_found_start || pi_dqs_found_rank_done)) init_next_state = INIT_RDLVL_STG2_READ; else if (~wrcal_done && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK <= 2500)) init_next_state = INIT_WRCAL_ACT_WAIT; else if ((!rdlvl_stg1_done && ~stg1_wr_done && ~rdlvl_last_byte_done) || (!prbs_rdlvl_done && ~stg1_wr_done && ~prbs_last_byte_done)) begin // Added to avoid rdlvl_stg1 write data pattern at the start of PRBS rdlvl if (!prbs_rdlvl_done && ~stg1_wr_done && rdlvl_last_byte_done) init_next_state = INIT_RDLVL_ACT_WAIT; else init_next_state = INIT_RDLVL_STG1_WRITE; end else if ((!rdlvl_stg1_done && rdlvl_stg1_start_int) || !prbs_rdlvl_done) begin if (rdlvl_last_byte_done || prbs_last_byte_done) // Added to avoid extra reads at the end of read leveling init_next_state = INIT_RDLVL_ACT_WAIT; else // Case 2: If in stage 1, and just precharged after training // previous byte, then continue reading init_next_state = INIT_RDLVL_STG1_READ; end else if ((prbs_rdlvl_done && rdlvl_stg1_done && (RANKS == 1)) && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRCAL_ACT_WAIT; else // Otherwise, if we're finished with calibration, then precharge // the row - silly, because we just opened it - possible to take // this out by adding logic to avoid the ACT in first place. Make // sure that cnt_cmd_done will handle tRAS(min) init_next_state = INIT_PRECHARGE_PREWAIT; end //************************************************** // Back-to-back reads for Phaser_IN Phase locking // DQS to FREQ_REF clock //************************************************** INIT_PI_PHASELOCK_READS: if (pi_phase_locked_all_r3 && ~pi_phase_locked_all_r4) init_next_state = INIT_PRECHARGE_PREWAIT; //********************************************* // Stage 1 read-leveling (write and continuous read) //********************************************* // Write training pattern for stage 1 // PRBS pattern of TBD length INIT_RDLVL_STG1_WRITE: // 4:1 DDR3 BL8 will require all 8 words in 1 DIV4 clock cycle // 2:1 DDR2/DDR3 BL8 will require 2 DIV2 clock cycles for 8 words // 2:1 DDR2 BL4 will require 1 DIV2 clock cycle for 4 words // An entire row worth of writes issued before proceeding to reads // The number of write is (2^column width)/burst length to accomodate // PRBS pattern for window detection. if (stg1_wr_rd_cnt == 9'd1) init_next_state = INIT_RDLVL_STG1_WRITE_READ; // Write-read turnaround INIT_RDLVL_STG1_WRITE_READ: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_RDLVL_STG1_READ; // Continuous read, where interruptible by precharge request from // calibration logic. Also precharges when stage 1 is complete // No precharges when reads provided to Phaser_IN for phase locking // FREQ_REF to read DQS since data integrity is not important. INIT_RDLVL_STG1_READ: if (rdlvl_stg1_rank_done || (rdlvl_stg1_done && ~rdlvl_stg1_done_r1) || prech_req_posedge_r || (prbs_rdlvl_done && ~prbs_rdlvl_done_r1)) init_next_state = INIT_PRECHARGE_PREWAIT; //********************************************* // DQSFOUND calibration (set of 4 reads with gaps) //********************************************* // Read of training data. Note that Stage 2 is not a constant read, // instead there is a large gap between each set of back-to-back reads INIT_RDLVL_STG2_READ: // 4 read commands issued back-to-back if (num_reads == 'b1) init_next_state = INIT_RDLVL_STG2_READ_WAIT; // Wait before issuing the next set of reads. If a precharge request // comes in then handle - this can occur after stage 2 calibration is // completed for a DQS group INIT_RDLVL_STG2_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (pi_dqs_found_rank_done || pi_dqs_found_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; else if (cnt_cmd_done_r) init_next_state = INIT_RDLVL_STG2_READ; end //****************************************************************** // MPR Read Leveling for DDR3 OCLK_DELAYED calibration //****************************************************************** // Issue Load Mode Register 3 command with A[2]=1, A[1:0]=2'b00 // to enable Multi Purpose Register (MPR) Read INIT_MPR_RDEN: init_next_state = INIT_MPR_WAIT; //Wait tMRD, tMOD INIT_MPR_WAIT: if (cnt_cmd_done_r) begin init_next_state = INIT_MPR_READ; end // Issue back-to-back read commands to read from MPR with // Address bus 0x0000 for BL=8. DQ[0] will output the pre-defined // MPR pattern of 01010101 (Rise0 = 1'b0, Fall0 = 1'b1 ...) INIT_MPR_READ: if (mpr_rdlvl_done || mpr_rnk_done || rdlvl_prech_req) init_next_state = INIT_MPR_DISABLE_PREWAIT; INIT_MPR_DISABLE_PREWAIT: if (cnt_cmd_done_r) init_next_state = INIT_MPR_DISABLE; // Issue Load Mode Register 3 command with A[2]=0 to disable // MPR read INIT_MPR_DISABLE: init_next_state = INIT_MPR_DISABLE_WAIT; INIT_MPR_DISABLE_WAIT: init_next_state = INIT_PRECHARGE_PREWAIT; //*********************************************************************** // OCLKDELAY Calibration //*********************************************************************** // This calibration requires single write followed by single read to // determine the Phaser_Out stage 3 delay required to center write DQS // in write DQ valid window. // Single Row Activate command before issuing Write command INIT_OCLKDELAY_ACT: init_next_state = INIT_OCLKDELAY_ACT_WAIT; INIT_OCLKDELAY_ACT_WAIT: if (cnt_cmd_done_r && ~oclk_prech_req) init_next_state = INIT_OCLKDELAY_WRITE; else if (oclkdelay_calib_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; INIT_OCLKDELAY_WRITE: if (oclk_wr_cnt == 4'd1) init_next_state = INIT_OCLKDELAY_WRITE_WAIT; INIT_OCLKDELAY_WRITE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_OCLKDELAY_READ; INIT_OCLKDELAY_READ: init_next_state = INIT_OCLKDELAY_READ_WAIT; INIT_OCLKDELAY_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (oclk_calib_resume) init_next_state = INIT_OCLKDELAY_WRITE; else if (oclkdelay_calib_done || prech_req_posedge_r || wrlvl_final) init_next_state = INIT_PRECHARGE_PREWAIT; end //********************************************* // Write calibration //********************************************* // single row activate INIT_WRCAL_ACT: init_next_state = INIT_WRCAL_ACT_WAIT; // hang out for awhile before issuing subsequent column command INIT_WRCAL_ACT_WAIT: if (cnt_cmd_done_r && ~wrcal_prech_req) init_next_state = INIT_WRCAL_WRITE; else if (wrcal_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; // Write training pattern for write calibration INIT_WRCAL_WRITE: // Once we've issued enough commands for 8 words - proceed to reads //if (burst_addr_r == 1'b1) if (wrcal_wr_cnt == 4'd1) init_next_state = INIT_WRCAL_WRITE_READ; // Write-read turnaround INIT_WRCAL_WRITE_READ: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRCAL_READ; else if (dqsfound_retry) init_next_state = INIT_RDLVL_STG2_READ_WAIT; INIT_WRCAL_READ: if (burst_addr_r == 1'b1) init_next_state = INIT_WRCAL_READ_WAIT; INIT_WRCAL_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (wrcal_resume_r) begin if (wrcal_final_chk) init_next_state = INIT_WRCAL_READ; else init_next_state = INIT_WRCAL_WRITE; end else if (wrcal_done || prech_req_posedge_r || wrcal_act_req || // Added to support PO fine delay inc when TG errors wrlvl_byte_redo || (temp_wrcal_done && ~temp_lmr_done)) init_next_state = INIT_PRECHARGE_PREWAIT; else if (dqsfound_retry) init_next_state = INIT_RDLVL_STG2_READ_WAIT; else if (wrcal_read_req && cnt_wrcal_rd) init_next_state = INIT_WRCAL_MULT_READS; end INIT_WRCAL_MULT_READS: // multiple read commands issued back-to-back if (wrcal_reads == 'b1) init_next_state = INIT_WRCAL_READ_WAIT; //********************************************* // Handling of precharge during and in between read-level stages //********************************************* // Make sure we aren't violating any timing specs by precharging // immediately INIT_PRECHARGE_PREWAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_PRECHARGE; // Initiate precharge INIT_PRECHARGE: init_next_state = INIT_PRECHARGE_WAIT; INIT_PRECHARGE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if ((wrcal_sanity_chk_done && (DRAM_TYPE == "DDR3")) || (rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done && (DRAM_TYPE == "DDR2"))) init_next_state = INIT_DONE; else if ((wrcal_done || (WRLVL == "OFF")) && rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done && ((ddr3_lm_done_r) || (DRAM_TYPE == "DDR2"))) // If read leveling and phase detection calibration complete, // and programing the correct burst length then we're finished init_next_state = INIT_WRCAL_ACT; else if ((wrcal_done || (WRLVL == "OFF") || (~wrcal_done && temp_wrcal_done && ~temp_lmr_done)) && (rdlvl_stg1_done || (~wrcal_done && temp_wrcal_done && ~temp_lmr_done)) && prbs_rdlvl_done && rdlvl_stg1_done && pi_dqs_found_done) begin // after all calibration program the correct burst length init_next_state = INIT_LOAD_MR; // Added to support PO fine delay inc when TG errors end else if (~wrcal_done && temp_wrcal_done && temp_lmr_done) init_next_state = INIT_WRCAL_READ_WAIT; else if (rdlvl_stg1_done && pi_dqs_found_done && (WRLVL == "ON")) // If read leveling finished, proceed to write calibration init_next_state = INIT_REFRESH; else // Otherwise, open row for read-leveling purposes init_next_state = INIT_REFRESH; end //******************************************************* // Initialization/Calibration done. Take a long rest, relax //******************************************************* INIT_DONE: init_next_state = INIT_DONE; endcase end //***************************************************************** // Initialization done signal - asserted before leveling starts //***************************************************************** always @(posedge clk) if (rst) mem_init_done_r <= #TCQ 1'b0; else if ((!cnt_dllk_zqinit_done_r && (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT) && (chip_cnt_r == RANKS-1) && (DRAM_TYPE == "DDR3")) || ( (init_state_r == INIT_LOAD_MR_WAIT) && (ddr2_refresh_flag_r) && (chip_cnt_r == RANKS-1) && (cnt_init_mr_done_r) && (DRAM_TYPE == "DDR2"))) mem_init_done_r <= #TCQ 1'b1; //***************************************************************** // Write Calibration signal to PHY Control Block - asserted before // Write Leveling starts //***************************************************************** //generate //if (RANKS < 2) begin: ranks_one always @(posedge clk) begin if (rst || (done_dqs_tap_inc && (init_state_r == INIT_WRLVL_LOAD_MR2))) write_calib <= #TCQ 1'b0; else if (wrlvl_active_r1) write_calib <= #TCQ 1'b1; end //end else begin: ranks_two // always @(posedge clk) begin // if (rst || // ((init_state_r1 == INIT_WRLVL_LOAD_MR_WAIT) && // ((wrlvl_rank_done_r2 && (chip_cnt_r == RANKS-1)) || // (SIM_CAL_OPTION == "FAST_CAL")))) // write_calib <= #TCQ 1'b0; // else if (wrlvl_active_r1) // write_calib <= #TCQ 1'b1; // end //end //endgenerate //***************************************************************** // Read Calibration signal to PHY Control Block - asserted after // Write Leveling during PHASER_IN phase locking stage. // Must be de-asserted before Read Leveling //***************************************************************** always @(posedge clk) begin if (rst || pi_calib_done_r1) read_calib_int <= #TCQ 1'b0; else if (~pi_calib_done_r1 && (init_state_r == INIT_RDLVL_ACT_WAIT) && (cnt_cmd_r == CNTNEXT_CMD)) read_calib_int <= #TCQ 1'b1; end always @(posedge clk) read_calib_r <= #TCQ read_calib_int; always @(posedge clk) begin if (rst || pi_calib_done_r1) read_calib <= #TCQ 1'b0; else if (~pi_calib_done_r1 && (init_state_r == INIT_PI_PHASELOCK_READS)) read_calib <= #TCQ 1'b1; end always @(posedge clk) if (rst) pi_calib_done_r <= #TCQ 1'b0; else if (pi_calib_rank_done_r)// && (chip_cnt_r == RANKS-1)) pi_calib_done_r <= #TCQ 1'b1; always @(posedge clk) if (rst) pi_calib_rank_done_r <= #TCQ 1'b0; else if (pi_phase_locked_all_r3 && ~pi_phase_locked_all_r4) pi_calib_rank_done_r <= #TCQ 1'b1; else pi_calib_rank_done_r <= #TCQ 1'b0; always @(posedge clk) begin if (rst || ((PRE_REV3ES == "ON") && temp_wrcal_done && ~temp_wrcal_done_r)) pi_phaselock_timer <= #TCQ 'd0; else if (((init_state_r == INIT_PI_PHASELOCK_READS) && (pi_phaselock_timer != PHASELOCKED_TIMEOUT)) || tg_timer_go) pi_phaselock_timer <= #TCQ pi_phaselock_timer + 1; else pi_phaselock_timer <= #TCQ pi_phaselock_timer; end assign pi_phase_locked_err = (pi_phaselock_timer == PHASELOCKED_TIMEOUT) ? 1'b1 : 1'b0; //***************************************************************** // DDR3 final burst length programming done. For DDR3 during // calibration the burst length is fixed to BL8. After calibration // the correct burst length is programmed. //***************************************************************** always @(posedge clk) if (rst) ddr3_lm_done_r <= #TCQ 1'b0; else if ((init_state_r == INIT_LOAD_MR_WAIT) && (chip_cnt_r == RANKS-1) && wrcal_done) ddr3_lm_done_r <= #TCQ 1'b1; always @(posedge clk) begin pi_dqs_found_rank_done_r <= #TCQ pi_dqs_found_rank_done; pi_phase_locked_all_r1 <= #TCQ pi_phase_locked_all; pi_phase_locked_all_r2 <= #TCQ pi_phase_locked_all_r1; pi_phase_locked_all_r3 <= #TCQ pi_phase_locked_all_r2; pi_phase_locked_all_r4 <= #TCQ pi_phase_locked_all_r3; pi_dqs_found_all_r <= #TCQ pi_dqs_found_done; pi_calib_done_r1 <= #TCQ pi_calib_done_r; end //*************************************************************************** // Logic for deep memory (multi-rank) configurations //*************************************************************************** // For DDR3 asserted when generate if (RANKS < 2) begin: single_rank always @(posedge clk) chip_cnt_r <= #TCQ 2'b00; end else begin: dual_rank always @(posedge clk) if (rst || // Set chip_cnt_r to 2'b00 after both Ranks are read leveled (rdlvl_stg1_done && prbs_rdlvl_done && ~wrcal_done) || // Set chip_cnt_r to 2'b00 after both Ranks are write leveled (wrlvl_done_r && (init_state_r==INIT_WRLVL_LOAD_MR2_WAIT)))begin chip_cnt_r <= #TCQ 2'b00; end else if ((((init_state_r == INIT_WAIT_DLLK_ZQINIT) && (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT)) && (DRAM_TYPE == "DDR3")) || ((init_state_r==INIT_REFRESH_RNK2_WAIT) && (cnt_cmd_r=='d36)) || //mpr_rnk_done || //(rdlvl_stg1_rank_done && ~rdlvl_last_byte_done) || //(stg1_wr_done && (init_state_r == INIT_REFRESH) && //~(rnk_ref_cnt && rdlvl_last_byte_done)) || // Increment chip_cnt_r to issue Refresh to second rank (~pi_dqs_found_all_r && (init_state_r==INIT_PRECHARGE_PREWAIT) && (cnt_cmd_r=='d36)) || // Increment chip_cnt_r when DQSFOUND done for the Rank (pi_dqs_found_rank_done && ~pi_dqs_found_rank_done_r) || ((init_state_r == INIT_LOAD_MR_WAIT)&& cnt_cmd_done_r && wrcal_done) || ((init_state_r == INIT_DDR2_MULTI_RANK) && (DRAM_TYPE == "DDR2"))) begin if ((~mem_init_done_r || ~rdlvl_stg1_done || ~pi_dqs_found_done || // condition to increment chip_cnt during // final burst length programming for DDR3 ~pi_calib_done_r || wrcal_done) //~mpr_rdlvl_done || && (chip_cnt_r != RANKS-1)) chip_cnt_r <= #TCQ chip_cnt_r + 1; else chip_cnt_r <= #TCQ 2'b00; end end endgenerate generate if ((REG_CTRL == "ON") && (RANKS == 1)) begin: DDR3_RDIMM_1rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[0] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end end else if (RANKS == 1) begin: DDR3_1rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (n = 0; n < nCS_PER_RANK; n = n + 1) begin phy_int_cs_n[n] <= #TCQ 1'b0; end end else begin //odd CWL for (p = nCS_PER_RANK; p < 2*nCS_PER_RANK; p = p + 1) begin phy_int_cs_n[p] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end end else if ((REG_CTRL == "ON") && (RANKS == 2)) begin: DDR3_2rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; case (chip_cnt_r) 2'b00:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[0] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1*CS_WIDTH*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (n = 0; n < nCS_PER_RANK*nCK_PER_CLK*2; n = n + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[n+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end 2'b01:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[1] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1+1*CS_WIDTH*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (p = nCS_PER_RANK; p < nCS_PER_RANK*nCK_PER_CLK*2; p = p + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[p+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end endcase end end end else if (RANKS == 2) begin: DDR3_2rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; case (chip_cnt_r) 2'b00:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (n = 0; n < nCS_PER_RANK; n = n + 1) begin phy_int_cs_n[n] <= #TCQ 1'b0; end end else begin // odd CWL for (p = CS_WIDTH*nCS_PER_RANK; p < (CS_WIDTH*nCS_PER_RANK + nCS_PER_RANK); p = p + 1) begin phy_int_cs_n[p] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (n = 0; n < nCS_PER_RANK*nCK_PER_CLK*2; n = n + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[n+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end 2'b01:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (q = nCS_PER_RANK; q < (2 * nCS_PER_RANK); q = q + 1) begin phy_int_cs_n[q] <= #TCQ 1'b0; end end else begin // odd CWL for (m = (nCS_PER_RANK*CS_WIDTH + nCS_PER_RANK); m < (nCS_PER_RANK*CS_WIDTH + 2*nCS_PER_RANK); m = m + 1) begin phy_int_cs_n[m] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (p = nCS_PER_RANK; p < nCS_PER_RANK*nCK_PER_CLK*2; p = p + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[p+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end endcase end end // always @ (posedge clk) end // commented out for now. Need it for DDR2 2T timing /* end else begin: DDR2 always @(posedge clk) if (rst) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end else begin if (init_state_r == INIT_REG_WRITE) begin // All ranks selected simultaneously phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b0}}; end else if ((wrlvl_odt) || (init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH)) begin phy_int_cs_n[0] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end // else: !if(rst) end // block: DDR2 */ endgenerate assign phy_cs_n = phy_int_cs_n; //*************************************************************************** // Write/read burst logic for calibration //*************************************************************************** assign rdlvl_wr = (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE); assign rdlvl_rd = (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_MPR_READ) || (init_state_r == INIT_WRCAL_MULT_READS); assign rdlvl_wr_rd = rdlvl_wr | rdlvl_rd; //*************************************************************************** // Address generation and logic to count # of writes/reads issued during // certain stages of calibration //*************************************************************************** // Column address generation logic: // Keep track of the current column address - since all bursts are in // increments of 8 only during calibration, we need to keep track of // addresses [COL_WIDTH-1:3], lower order address bits will always = 0 always @(posedge clk) if (rst || wrcal_done) burst_addr_r <= #TCQ 1'b0; else if ((init_state_r == INIT_WRCAL_ACT_WAIT) || (init_state_r == INIT_OCLKDELAY_ACT_WAIT) || (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS) || (init_state_r == INIT_WRCAL_READ_WAIT)) burst_addr_r <= #TCQ 1'b1; else if (rdlvl_wr_rd && new_burst_r) burst_addr_r <= #TCQ ~burst_addr_r; else burst_addr_r <= #TCQ 1'b0; // Read Level Stage 1 requires writes to the entire row since // a PRBS pattern is being written. This counter keeps track // of the number of writes which depends on the column width // The (stg1_wr_rd_cnt==9'd0) condition was added so the col // address wraps around during stage1 reads always @(posedge clk) if (rst || ((init_state_r == INIT_RDLVL_STG1_WRITE_READ) && ~rdlvl_stg1_done)) stg1_wr_rd_cnt <= #TCQ NUM_STG1_WR_RD; else if (rdlvl_last_byte_done || (stg1_wr_rd_cnt == 9'd1) || (prbs_rdlvl_prech_req && (init_state_r == INIT_RDLVL_ACT_WAIT))) stg1_wr_rd_cnt <= #TCQ 'd128; else if (((init_state_r == INIT_RDLVL_STG1_WRITE) && new_burst_r && ~phy_data_full) ||((init_state_r == INIT_RDLVL_STG1_READ) && rdlvl_stg1_done)) stg1_wr_rd_cnt <= #TCQ stg1_wr_rd_cnt - 1; // OCLKDELAY calibration requires multiple writes because // write can be up to 2 cycles early since OCLKDELAY tap // can go down to 0 always @(posedge clk) if (rst || (init_state_r == INIT_OCLKDELAY_WRITE_WAIT) || (oclk_wr_cnt == 4'd0)) oclk_wr_cnt <= #TCQ NUM_STG1_WR_RD; else if ((init_state_r == INIT_OCLKDELAY_WRITE) && new_burst_r && ~phy_data_full) oclk_wr_cnt <= #TCQ oclk_wr_cnt - 1; // Write calibration requires multiple writes because // write can be up to 2 cycles early due to new write // leveling algorithm to avoid late writes always @(posedge clk) if (rst || (init_state_r == INIT_WRCAL_WRITE_READ) || (wrcal_wr_cnt == 4'd0)) wrcal_wr_cnt <= #TCQ NUM_STG1_WR_RD; else if ((init_state_r == INIT_WRCAL_WRITE) && new_burst_r && ~phy_data_full) wrcal_wr_cnt <= #TCQ wrcal_wr_cnt - 1; generate if(nCK_PER_CLK == 4) begin:back_to_back_reads_4_1 // 4 back-to-back reads with gaps for // read data_offset calibration (rdlvl stage 2) always @(posedge clk) if (rst || (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) num_reads <= #TCQ 3'b000; else if ((num_reads > 3'b000) && ~(phy_ctl_full || phy_cmd_full)) num_reads <= #TCQ num_reads - 1; else if ((init_state_r == INIT_RDLVL_STG2_READ) || phy_ctl_full || phy_cmd_full && new_burst_r) num_reads <= #TCQ 3'b011; end else if(nCK_PER_CLK == 2) begin: back_to_back_reads_2_1 // 4 back-to-back reads with gaps for // read data_offset calibration (rdlvl stage 2) always @(posedge clk) if (rst || (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) num_reads <= #TCQ 3'b000; else if ((num_reads > 3'b000) && ~(phy_ctl_full || phy_cmd_full)) num_reads <= #TCQ num_reads - 1; else if ((init_state_r == INIT_RDLVL_STG2_READ) || phy_ctl_full || phy_cmd_full && new_burst_r) num_reads <= #TCQ 3'b111; end endgenerate // back-to-back reads during write calibration always @(posedge clk) if (rst ||(init_state_r == INIT_WRCAL_READ_WAIT)) wrcal_reads <= #TCQ 2'b00; else if ((wrcal_reads > 2'b00) && ~(phy_ctl_full || phy_cmd_full)) wrcal_reads <= #TCQ wrcal_reads - 1; else if ((init_state_r == INIT_WRCAL_MULT_READS) || phy_ctl_full || phy_cmd_full && new_burst_r) wrcal_reads <= #TCQ 'd255; // determine how often to issue row command during read leveling writes // and reads always @(posedge clk) if (rdlvl_wr_rd) begin // 2:1 mode - every other command issued is a data command // 4:1 mode - every command issued is a data command if (nCK_PER_CLK == 2) begin if (!phy_ctl_full) new_burst_r <= #TCQ ~new_burst_r; end else new_burst_r <= #TCQ 1'b1; end else new_burst_r <= #TCQ 1'b1; // indicate when a write is occurring. PHY_WRDATA_EN must be asserted // simultaneous with the corresponding command/address for CWL = 5,6 always @(posedge clk) begin rdlvl_wr_r <= #TCQ rdlvl_wr; calib_wrdata_en <= #TCQ phy_wrdata_en; end always @(posedge clk) begin if (rst || wrcal_done) extend_cal_pat <= #TCQ 1'b0; else if (temp_lmr_done && (PRE_REV3ES == "ON")) extend_cal_pat <= #TCQ 1'b1; end generate if ((nCK_PER_CLK == 4) || (BURST_MODE == "4")) begin: wrdqen_div4 // Write data enable asserted for one DIV4 clock cycle // Only BL8 supported with DIV4. DDR2 BL4 will use DIV2. always @(rst or phy_data_full or init_state_r) begin if (~phy_data_full && ((init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_WRCAL_WRITE))) phy_wrdata_en = 1'b1; else phy_wrdata_en = 1'b0; end end else begin: wrdqen_div2 // block: wrdqen_div4 always @(rdlvl_wr or phy_ctl_full or new_burst_r or phy_wrdata_en_r1 or phy_data_full) if((rdlvl_wr & ~phy_ctl_full & new_burst_r & ~phy_data_full) | phy_wrdata_en_r1) phy_wrdata_en = 1'b1; else phy_wrdata_en = 1'b0; always @(posedge clk) phy_wrdata_en_r1 <= #TCQ rdlvl_wr & ~phy_ctl_full & new_burst_r & ~phy_data_full; always @(posedge clk) begin if (!phy_wrdata_en & first_rdlvl_pat_r) wrdata_pat_cnt <= #TCQ 2'b00; else if (wrdata_pat_cnt == 2'b11) wrdata_pat_cnt <= #TCQ 2'b10; else wrdata_pat_cnt <= #TCQ wrdata_pat_cnt + 1; end always @(posedge clk) begin if (!phy_wrdata_en & first_wrcal_pat_r) wrcal_pat_cnt <= #TCQ 2'b00; else if (extend_cal_pat && (wrcal_pat_cnt == 2'b01)) wrcal_pat_cnt <= #TCQ 2'b00; else if (wrcal_pat_cnt == 2'b11) wrcal_pat_cnt <= #TCQ 2'b10; else wrcal_pat_cnt <= #TCQ wrcal_pat_cnt + 1; end end endgenerate // indicate when a write is occurring. PHY_RDDATA_EN must be asserted // simultaneous with the corresponding command/address. PHY_RDDATA_EN // is used during read-leveling to determine read latency assign phy_rddata_en = ~phy_if_empty; // Read data valid generation for MC and User Interface after calibration is // complete assign phy_rddata_valid = init_complete_r1_timing ? phy_rddata_en : 1'b0; //*************************************************************************** // Generate training data written at start of each read-leveling stage // For every stage of read leveling, 8 words are written into memory // The format is as follows (shown as {rise,fall}): // Stage 1: 0xF, 0x0, 0xF, 0x0, 0xF, 0x0, 0xF, 0x0 // Stage 2: 0xF, 0x0, 0xA, 0x5, 0x5, 0xA, 0x9, 0x6 //*************************************************************************** always @(posedge clk) if ((init_state_r == INIT_IDLE) || (init_state_r == INIT_RDLVL_STG1_WRITE)) cnt_init_data_r <= #TCQ 2'b00; else if (phy_wrdata_en) cnt_init_data_r <= #TCQ cnt_init_data_r + 1; else if (init_state_r == INIT_WRCAL_WRITE) cnt_init_data_r <= #TCQ 2'b10; // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling always @(posedge clk) if (rst || rdlvl_stg1_rank_done) first_rdlvl_pat_r <= #TCQ 1'b1; else if (phy_wrdata_en && (init_state_r == INIT_RDLVL_STG1_WRITE)) first_rdlvl_pat_r <= #TCQ 1'b0; always @(posedge clk) if (rst || wrcal_resume || (init_state_r == INIT_WRCAL_ACT_WAIT)) first_wrcal_pat_r <= #TCQ 1'b1; else if (phy_wrdata_en && (init_state_r == INIT_WRCAL_WRITE)) first_wrcal_pat_r <= #TCQ 1'b0; generate if ((CLK_PERIOD/nCK_PER_CLK > 2500) && (nCK_PER_CLK == 2)) begin: wrdq_div2_2to1_rdlvl_first always @(posedge clk) if (~oclkdelay_calib_done) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}}; else if (!rdlvl_stg1_done) begin // The 16 words for stage 1 write data in 2:1 mode is written // over 4 consecutive controller clock cycles. Note that write // data follows phy_wrdata_en by one clock cycle case (wrdata_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}}, {DQ_WIDTH/4{4'h9}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end endcase end else if (!prbs_rdlvl_done && ~phy_data_full) begin // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[4*8-1:3*8]}}, {DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}}, {DQ_WIDTH/8{prbs_o[8-1:0]}}}; end else if (!wrcal_done) begin case (wrcal_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h5}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}}, {DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h4}}}; end endcase end end else if ((CLK_PERIOD/nCK_PER_CLK > 2500) && (nCK_PER_CLK == 4)) begin: wrdq_div2_4to1_rdlvl_first always @(posedge clk) if (~oclkdelay_calib_done) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}}; else if (!rdlvl_stg1_done && ~phy_data_full) // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling if (first_rdlvl_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}},{DQ_WIDTH/4{4'h9}}}; else // For all others, change the first two words written in order // to differentiate the "early write" and "on-time write" // readback patterns during read leveling phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; else if (!prbs_rdlvl_done && ~phy_data_full) // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[8*8-1:7*8]}},{DQ_WIDTH/8{prbs_o[7*8-1:6*8]}}, {DQ_WIDTH/8{prbs_o[6*8-1:5*8]}},{DQ_WIDTH/8{prbs_o[5*8-1:4*8]}}, {DQ_WIDTH/8{prbs_o[4*8-1:3*8]}},{DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}},{DQ_WIDTH/8{prbs_o[8-1:0]}}}; else if (!wrcal_done) if (first_wrcal_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}},{DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (nCK_PER_CLK == 4) begin: wrdq_div1_4to1_wrcal_first always @(posedge clk) if ((~oclkdelay_calib_done) && (DRAM_TYPE == "DDR3")) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}}; else if ((!wrcal_done)&& (DRAM_TYPE == "DDR3")) begin if (extend_cal_pat) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else if (first_wrcal_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}},{DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (!rdlvl_stg1_done && ~phy_data_full) begin // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling if (first_rdlvl_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}},{DQ_WIDTH/4{4'h9}}}; else // For all others, change the first two words written in order // to differentiate the "early write" and "on-time write" // readback patterns during read leveling phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (!prbs_rdlvl_done && ~phy_data_full) // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[8*8-1:7*8]}},{DQ_WIDTH/8{prbs_o[7*8-1:6*8]}}, {DQ_WIDTH/8{prbs_o[6*8-1:5*8]}},{DQ_WIDTH/8{prbs_o[5*8-1:4*8]}}, {DQ_WIDTH/8{prbs_o[4*8-1:3*8]}},{DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}},{DQ_WIDTH/8{prbs_o[8-1:0]}}}; end else begin: wrdq_div1_2to1_wrcal_first always @(posedge clk) if ((~oclkdelay_calib_done)&& (DRAM_TYPE == "DDR3")) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}}; else if ((!wrcal_done) && (DRAM_TYPE == "DDR3"))begin case (wrcal_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h5}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}}, {DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h4}}}; end endcase end else if (!rdlvl_stg1_done) begin // The 16 words for stage 1 write data in 2:1 mode is written // over 4 consecutive controller clock cycles. Note that write // data follows phy_wrdata_en by one clock cycle case (wrdata_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}}, {DQ_WIDTH/4{4'h9}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end endcase end else if (!prbs_rdlvl_done && ~phy_data_full) begin // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[4*8-1:3*8]}}, {DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}}, {DQ_WIDTH/8{prbs_o[8-1:0]}}}; end end endgenerate //*************************************************************************** // Memory control/address //*************************************************************************** // Phases [2] and [3] are always deasserted for 4:1 mode generate if (nCK_PER_CLK == 4) begin: gen_div4_ca_tieoff always @(posedge clk) begin phy_ras_n[3:2] <= #TCQ 3'b11; phy_cas_n[3:2] <= #TCQ 3'b11; phy_we_n[3:2] <= #TCQ 3'b11; end end endgenerate // Assert RAS when: (1) Loading MRS, (2) Activating Row, (3) Precharging // (4) auto refresh generate if (!(CWL_M % 2)) begin: even_cwl always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH))begin phy_ras_n[0] <= #TCQ 1'b0; phy_ras_n[1] <= #TCQ 1'b1; end else begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b1; end end // Assert CAS when: (1) Loading MRS, (2) Issuing Read/Write command // (3) auto refresh always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r))begin phy_cas_n[0] <= #TCQ 1'b0; phy_cas_n[1] <= #TCQ 1'b1; end else begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b1; end end // Assert WE when: (1) Loading MRS, (2) Issuing Write command (only // occur during read leveling), (3) Issuing ZQ Long Calib command, // (4) Precharge always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE)|| (rdlvl_wr && new_burst_r))begin phy_we_n[0] <= #TCQ 1'b0; phy_we_n[1] <= #TCQ 1'b1; end else begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b1; end end end else begin: odd_cwl always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH))begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b0; end else begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b1; end end // Assert CAS when: (1) Loading MRS, (2) Issuing Read/Write command // (3) auto refresh always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r))begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b0; end else begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b1; end end // Assert WE when: (1) Loading MRS, (2) Issuing Write command (only // occur during read leveling), (3) Issuing ZQ Long Calib command, // (4) Precharge always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE)|| (rdlvl_wr && new_burst_r))begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b0; end else begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b1; end end end endgenerate // Assign calib_cmd for the command field in PHY_Ctl_Word always @(posedge clk) begin if (wr_level_dqs_asrt) begin // Request to toggle DQS during write leveling calib_cmd <= #TCQ 3'b001; if (CWL_M % 2) begin // odd write latency calib_data_offset_0 <= #TCQ CWL_M + 3; calib_data_offset_1 <= #TCQ CWL_M + 3; calib_data_offset_2 <= #TCQ CWL_M + 3; calib_cas_slot <= #TCQ 2'b01; end else begin // even write latency calib_data_offset_0 <= #TCQ CWL_M + 2; calib_data_offset_1 <= #TCQ CWL_M + 2; calib_data_offset_2 <= #TCQ CWL_M + 2; calib_cas_slot <= #TCQ 2'b00; end end else if (rdlvl_wr && new_burst_r) begin // Write Command calib_cmd <= #TCQ 3'b001; if (CWL_M % 2) begin // odd write latency calib_data_offset_0 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_data_offset_1 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_data_offset_2 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_cas_slot <= #TCQ 2'b01; end else begin // even write latency calib_data_offset_0 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_data_offset_1 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_data_offset_2 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_cas_slot <= #TCQ 2'b00; end end else if (rdlvl_rd && new_burst_r) begin // Read Command calib_cmd <= #TCQ 3'b011; if (CWL_M % 2) calib_cas_slot <= #TCQ 2'b01; else calib_cas_slot <= #TCQ 2'b00; if (~pi_calib_done_r1) begin calib_data_offset_0 <= #TCQ 6'd0; calib_data_offset_1 <= #TCQ 6'd0; calib_data_offset_2 <= #TCQ 6'd0; end else if (~pi_dqs_found_done_r1) begin calib_data_offset_0 <= #TCQ rd_data_offset_0; calib_data_offset_1 <= #TCQ rd_data_offset_1; calib_data_offset_2 <= #TCQ rd_data_offset_2; end else begin calib_data_offset_0 <= #TCQ rd_data_offset_ranks_0[6*chip_cnt_r+:6]; calib_data_offset_1 <= #TCQ rd_data_offset_ranks_1[6*chip_cnt_r+:6]; calib_data_offset_2 <= #TCQ rd_data_offset_ranks_2[6*chip_cnt_r+:6]; end end else begin // Non-Data Commands like NOP, MRS, ZQ Long Cal, Precharge, // Active, Refresh calib_cmd <= #TCQ 3'b100; calib_data_offset_0 <= #TCQ 6'd0; calib_data_offset_1 <= #TCQ 6'd0; calib_data_offset_2 <= #TCQ 6'd0; if (CWL_M % 2) calib_cas_slot <= #TCQ 2'b01; else calib_cas_slot <= #TCQ 2'b00; end end // Write Enable to PHY_Control FIFO always asserted // No danger of this FIFO being Full with 4:1 sync clock ratio // This is also the write enable to the command OUT_FIFO always @(posedge clk) begin if (rst) begin calib_ctl_wren <= #TCQ 1'b0; calib_cmd_wren <= #TCQ 1'b0; calib_seq <= #TCQ 2'b00; end else if (cnt_pwron_cke_done_r && phy_ctl_ready && ~(phy_ctl_full || phy_cmd_full )) begin calib_ctl_wren <= #TCQ 1'b1; calib_cmd_wren <= #TCQ 1'b1; calib_seq <= #TCQ calib_seq + 1; end else begin calib_ctl_wren <= #TCQ 1'b0; calib_cmd_wren <= #TCQ 1'b0; calib_seq <= #TCQ calib_seq; end end generate genvar rnk_i; for (rnk_i = 0; rnk_i < 4; rnk_i = rnk_i + 1) begin: gen_rnk always @(posedge clk) begin if (rst) begin mr2_r[rnk_i] <= #TCQ 2'b00; mr1_r[rnk_i] <= #TCQ 3'b000; end else begin mr2_r[rnk_i] <= #TCQ tmp_mr2_r[rnk_i]; mr1_r[rnk_i] <= #TCQ tmp_mr1_r[rnk_i]; end end end endgenerate // ODT assignment based on slot config and slot present // For single slot systems slot_1_present input will be ignored // Assuming component interfaces to be single slot systems generate if (nSLOTS == 1) begin: gen_single_slot_odt always @(posedge clk) begin if (rst) begin tmp_mr2_r[1] <= #TCQ 2'b00; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; tmp_mr1_r[1] <= #TCQ 3'b000; tmp_mr1_r[2] <= #TCQ 3'b000; tmp_mr1_r[3] <= #TCQ 3'b000; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b1}}; phy_tmp_odt_r <= #TCQ 4'b0000; phy_tmp_odt_r1 <= #TCQ phy_tmp_odt_r; end else begin case ({slot_0_present[0],slot_0_present[1], slot_0_present[2],slot_0_present[3]}) // Single slot configuration with quad rank // Assuming same behavior as single slot dual rank for now // DDR2 does not have quad rank parts 4'b1111: begin if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 RTT_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 RTT_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end phy_tmp_odt_r <= #TCQ 4'b0001; // Chip Select assignments phy_tmp_cs1_r[((chip_cnt_r*nCS_PER_RANK) ) +: nCS_PER_RANK] <= #TCQ 'b0; end // Single slot configuration with single rank 4'b1000: begin phy_tmp_odt_r <= #TCQ 4'b0001; if ((REG_CTRL == "ON") && (nCS_PER_RANK > 1)) begin phy_tmp_cs1_r[chip_cnt_r] <= #TCQ 1'b0; end else begin phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b0}}; end if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && ((cnt_init_mr_r == 2'd0) || (USE_ODT_PORT == 1)))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 RTT_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 RTT_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Single slot configuration with dual rank 4'b1100: begin phy_tmp_odt_r <= #TCQ 4'b0001; // Chip Select assignments phy_tmp_cs1_r[((chip_cnt_r*nCS_PER_RANK) ) +: nCS_PER_RANK] <= #TCQ 'b0; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end default: begin phy_tmp_odt_r <= #TCQ 4'b0001; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end endcase end end end else if (nSLOTS == 2) begin: gen_dual_slot_odt always @ (posedge clk) begin if (rst) begin tmp_mr2_r[1] <= #TCQ 2'b00; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; tmp_mr1_r[1] <= #TCQ 3'b000; tmp_mr1_r[2] <= #TCQ 3'b000; tmp_mr1_r[3] <= #TCQ 3'b000; phy_tmp_odt_r <= #TCQ 4'b0000; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b1}}; phy_tmp_odt_r1 <= #TCQ phy_tmp_odt_r; end else begin case ({slot_0_present[0],slot_0_present[1], slot_1_present[0],slot_1_present[1]}) // Two slot configuration, one slot present, single rank 4'b10_00: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end phy_tmp_cs1_r <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end 4'b00_10: begin //Rank1 ODT enabled if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end phy_tmp_cs1_r <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank1 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank1 Rtt_NOM defaults to 120 ohms tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Two slot configuration, one slot present, dual rank 4'b00_11: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end 4'b11_00: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank1 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Two slot configuration, one rank per slot 4'b10_10: begin if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r == 2'b00)begin phy_tmp_odt_r <= #TCQ 4'b0010; //bit0 for rank0 end else begin phy_tmp_odt_r <= #TCQ 4'b0001; //bit0 for rank0 end end else begin if(init_state_r == INIT_WRLVL_WAIT) phy_tmp_odt_r <= #TCQ 4'b0011; // rank 0/1 odt0 else if((init_next_state == INIT_RDLVL_STG1_WRITE) || (init_next_state == INIT_WRCAL_WRITE) || (init_next_state == INIT_OCLKDELAY_WRITE)) phy_tmp_odt_r <= #TCQ 4'b0011; // bit0 for rank0/1 (write) else if ((init_next_state == INIT_PI_PHASELOCK_READS) || (init_next_state == INIT_MPR_READ) || (init_next_state == INIT_RDLVL_STG1_READ) || (init_next_state == INIT_RDLVL_STG2_READ) || (init_next_state == INIT_OCLKDELAY_READ) || (init_next_state == INIT_WRCAL_READ) || (init_next_state == INIT_WRCAL_MULT_READS)) phy_tmp_odt_r <= #TCQ 4'b0010; // bit0 for rank1 (rank 0 rd) end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_WR == "60") ? 3'b001 : (RTT_WR == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; end end // Two Slots - One slot with dual rank and other with single rank 4'b10_11: begin //Rank3 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[1] <= #TCQ 3'b000; end //Slot1 Rank1 or Rank3 is being written if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r == 2'b00)begin phy_tmp_odt_r <= #TCQ 4'b0010; end else begin phy_tmp_odt_r <= #TCQ 4'b0001; end end else begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0011; //Slot0 Rank0 is being written end else begin phy_tmp_odt_r <= #TCQ 4'b0101; // ODT for ranks 0 and 2 aserted end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))begin if (chip_cnt_r == 2'b00) begin phy_tmp_odt_r <= #TCQ 4'b0100; end else begin phy_tmp_odt_r <= #TCQ 4'b0001; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end // Two Slots - One slot with dual rank and other with single rank 4'b11_10: begin //Rank2 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM2 == "60") ? 3'b001 : (RTT_NOM2 == "120") ? 3'b010 : (RTT_NOM2 == "20") ? 3'b100 : (RTT_NOM2 == "30") ? 3'b101 : (RTT_NOM2 == "40") ? 3'b011: 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011: 3'b000; //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r[1] == 1'b1)begin phy_tmp_odt_r <= #TCQ 4'b0001; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; // rank 2 ODT asserted end end else begin if (// wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin if (chip_cnt_r[1] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0110; end else begin phy_tmp_odt_r <= #TCQ 4'b0101; end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS)) begin if (chip_cnt_r[1] == 1'b1) begin phy_tmp_odt_r[(1*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ 4'b0010; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end // Two Slots - two ranks per slot 4'b11_11: begin //Rank2 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM2 == "60") ? 3'b001 : (RTT_NOM2 == "120") ? 3'b010 : (RTT_NOM2 == "20") ? 3'b100 : (RTT_NOM2 == "30") ? 3'b101 : (RTT_NOM2 == "40") ? 3'b011 : 3'b000; //Rank3 Rtt_NOM tmp_mr1_r[3] <= #TCQ (RTT_NOM3 == "60") ? 3'b001 : (RTT_NOM3 == "120") ? 3'b010 : (RTT_NOM3 == "20") ? 3'b100 : (RTT_NOM3 == "30") ? 3'b101 : (RTT_NOM3 == "40") ? 3'b011 : 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[1] <= #TCQ 3'b000; //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r[1] == 1'b1)begin phy_tmp_odt_r <= #TCQ 4'b0001; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; end end else begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin //Slot1 Rank1 or Rank3 is being written if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0110; //Slot0 Rank0 or Rank2 is being written end else begin phy_tmp_odt_r <= #TCQ 4'b1001; end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))begin //Slot1 Rank1 or Rank3 is being read if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0100; //Slot0 Rank0 or Rank2 is being read end else begin phy_tmp_odt_r <= #TCQ 4'b1000; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end default: begin phy_tmp_odt_r <= #TCQ 4'b1111; // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "60") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; end end endcase end end end endgenerate // PHY only supports two ranks. // calib_aux_out[0] is CKE for rank 0 and calib_aux_out[1] is ODT for rank 0 // calib_aux_out[2] is CKE for rank 1 and calib_aux_out[3] is ODT for rank 1 generate if(CKE_ODT_AUX == "FALSE") begin if ((nSLOTS == 1) && (RANKS < 2)) begin always @(posedge clk) if (rst) begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))/* || wrlvl_rank_done || wrlvl_rank_done_r1 || (wrlvl_done && !wrlvl_done_r)*/) && (DRAM_TYPE == "DDR3")) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt ) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_OCLKDELAY_WRITE)|| (init_state_r == INIT_OCLKDELAY_WRITE_WAIT))) begin // Quad rank in a single slot calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end end end else if ((nSLOTS == 1) && (RANKS <= 2)) begin always @(posedge clk) if (rst) begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))/* || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)*/) && (DRAM_TYPE == "DDR3")) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt)|| (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_OCLKDELAY_WRITE)|| (init_state_r == INIT_OCLKDELAY_WRITE_WAIT))) begin // Dual rank in a single slot calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end end end else if ((nSLOTS == 2) && (RANKS == 2)) begin always @(posedge clk) if (rst)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if (((DRAM_TYPE == "DDR2") && (RTT_NOM == "DISABLED")) || ((DRAM_TYPE == "DDR3") && (RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // Quad rank in a single slot if (nCK_PER_CLK == 2) begin calib_odt[0] <= #TCQ (!calib_odt[0]) ? phy_tmp_odt_r[0] : 1'b0; calib_odt[1] <= #TCQ (!calib_odt[1]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end // Turn on for idle rank during read if dynamic ODT is enabled in DDR3 end else if(((DRAM_TYPE == "DDR3") && (RTT_WR != "OFF")) && ((init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_MPR_READ) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))) begin if (nCK_PER_CLK == 2) begin calib_odt[0] <= #TCQ (!calib_odt[0]) ? phy_tmp_odt_r[0] : 1'b0; calib_odt[1] <= #TCQ (!calib_odt[1]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end // disable well before next command and before disabling write leveling end else if(cnt_cmd_done_m7_r || (init_state_r == INIT_WRLVL_WAIT && ~wrlvl_odt)) calib_odt <= #TCQ 2'b00; end end end else begin//USE AUX OUTPUT for routing CKE and ODT. if ((nSLOTS == 1) && (RANKS < 2)) begin always @(posedge clk) if (rst) begin calib_aux_out <= #TCQ 4'b0000; end else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done || wrlvl_rank_done_r1 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Quad rank in a single slot calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end else if ((nSLOTS == 1) && (RANKS <= 2)) begin always @(posedge clk) if (rst) begin calib_aux_out <= #TCQ 4'b0000; end else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Dual rank in a single slot calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end else if ((nSLOTS == 2) && (RANKS == 2)) begin always @(posedge clk) if (rst) calib_aux_out <= #TCQ 4'b0000; else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Quad rank in a single slot if (nCK_PER_CLK == 2) begin calib_aux_out[1] <= #TCQ (!calib_aux_out[1]) ? phy_tmp_odt_r[0] : 1'b0; calib_aux_out[3] <= #TCQ (!calib_aux_out[3]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end end endgenerate //***************************************************************** // memory address during init //***************************************************************** always @(posedge clk) phy_data_full_r <= #TCQ phy_data_full; always @(burst_addr_r or cnt_init_mr_r or chip_cnt_r or wrcal_wr_cnt or ddr2_refresh_flag_r or init_state_r or load_mr0 or phy_data_full_r or load_mr1 or load_mr2 or load_mr3 or new_burst_r or phy_address or mr1_r[0][0] or mr1_r[0][1] or mr1_r[0][2] or mr1_r[1][0] or mr1_r[1][1] or mr1_r[1][2] or mr1_r[2][0] or mr1_r[2][1] or mr1_r[2][2] or mr1_r[3][0] or mr1_r[3][1] or mr1_r[3][2] or mr2_r[chip_cnt_r] or reg_ctrl_cnt_r or stg1_wr_rd_cnt or oclk_wr_cnt or rdlvl_stg1_done or prbs_rdlvl_done or pi_dqs_found_done or rdlvl_wr_rd)begin // Bus 0 for address/bank never used address_w = 'b0; bank_w = 'b0; if ((init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_DDR2_PRECHARGE)) begin // Set A10=1 for ZQ long calibration or Precharge All address_w = 'b0; address_w[10] = 1'b1; bank_w = 'b0; end else if (init_state_r == INIT_WRLVL_START) begin // Enable wrlvl in MR1 bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; address_w[7] = 1'b1; end else if (init_state_r == INIT_WRLVL_LOAD_MR) begin // Finished with write leveling, disable wrlvl in MR1 // For single rank disable Rtt_Nom bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; end else if (init_state_r == INIT_WRLVL_LOAD_MR2) begin // Set RTT_WR in MR2 after write leveling disabled bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; address_w[10:9] = mr2_r[chip_cnt_r]; end else if (init_state_r == INIT_MPR_READ) begin address_w = 'b0; bank_w = 'b0; end else if (init_state_r == INIT_MPR_RDEN) begin // Enable MPR read with LMR3 and A2=1 bank_w[BANK_WIDTH-1:0] = 'd3; address_w = {ROW_WIDTH{1'b0}}; address_w[2] = 1'b1; end else if (init_state_r == INIT_MPR_DISABLE) begin // Disable MPR read with LMR3 and A2=0 bank_w[BANK_WIDTH-1:0] = 'd3; address_w = {ROW_WIDTH{1'b0}}; end else if ((init_state_r == INIT_REG_WRITE)& (DRAM_TYPE == "DDR3"))begin // bank_w is assigned a 3 bit value. In some // DDR2 cases there will be only two bank bits. //Qualifying the condition with DDR3 bank_w = 'b0; address_w = 'b0; case (reg_ctrl_cnt_r) REG_RC0[2:0]: address_w[4:0] = REG_RC0[4:0]; REG_RC1[2:0]:begin address_w[4:0] = REG_RC1[4:0]; bank_w = REG_RC1[7:5]; end REG_RC2[2:0]: address_w[4:0] = REG_RC2[4:0]; REG_RC3[2:0]: address_w[4:0] = REG_RC3[4:0]; REG_RC4[2:0]: address_w[4:0] = REG_RC4[4:0]; REG_RC5[2:0]: address_w[4:0] = REG_RC5[4:0]; endcase end else if (init_state_r == INIT_LOAD_MR) begin // If loading mode register, look at cnt_init_mr to determine // which MR is currently being programmed address_w = 'b0; bank_w = 'b0; if(DRAM_TYPE == "DDR3")begin if(rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done)begin // end of the calibration programming correct // burst length if (TEST_AL == "0") begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //Don't reset DLL end else begin // programming correct AL value bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; if (TEST_AL == "CL-1") address_w[4:3]= 2'b01; // AL="CL-1" else address_w[4:3]= 2'b10; // AL="CL-2" end end else begin case (cnt_init_mr_r) INIT_CNT_MR2: begin bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; address_w[10:9] = mr2_r[chip_cnt_r]; end INIT_CNT_MR3: begin bank_w[1:0] = 2'b11; address_w = load_mr3[ROW_WIDTH-1:0]; end INIT_CNT_MR1: begin bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; end INIT_CNT_MR0: begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; // fixing it to BL8 for calibration address_w[1:0] = 2'b00; end default: begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end endcase end end else begin // DDR2 case (cnt_init_mr_r) INIT_CNT_MR2: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //MRS command without resetting DLL end end INIT_CNT_MR3: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b11; address_w = load_mr3[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //MRS command without resetting DLL. Repeted again // because there is an extra state. end end INIT_CNT_MR1: begin bank_w[1:0] = 2'b01; if(~ddr2_refresh_flag_r)begin address_w = load_mr1[ROW_WIDTH-1:0]; end else begin // second set of lm commands address_w = load_mr1[ROW_WIDTH-1:0]; address_w[9:7] = 3'b111; //OCD default state end end INIT_CNT_MR0: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; if((chip_cnt_r == 2'd1) || (chip_cnt_r == 2'd3))begin // always disable odt for rank 1 and rank 3 as per SPEC address_w[2] = 'b0; address_w[6] = 'b0; end //OCD exit end end default: begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end endcase end end else if ((init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_READ)) begin // Writing and reading PRBS pattern for read leveling stage 1 // Need to support burst length 4 or 8. PRBS pattern will be // written to entire row and read back from the same row repeatedly bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (((stg1_wr_rd_cnt == NUM_STG1_WR_RD) && ~rdlvl_stg1_done) || (stg1_wr_rd_cnt == 'd128)) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((stg1_wr_rd_cnt >= 9'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_OCLKDELAY_READ)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (oclk_wr_cnt == NUM_STG1_WR_RD) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((oclk_wr_cnt >= 4'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_READ)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (wrcal_wr_cnt == NUM_STG1_WR_RD) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((wrcal_wr_cnt >= 4'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_WRCAL_MULT_READS) || (init_state_r == INIT_RDLVL_STG2_READ)) begin // when writing or reading back training pattern for read leveling stage2 // need to support burst length of 4 or 8. This may mean issuing // multiple commands to cover the entire range of addresses accessed // during read leveling. // Hard coding A[12] to 1 so that it will always be burst length of 8 // for DDR3. Does not have any effect on DDR2. bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; address_w[COL_WIDTH-1:0] = {CALIB_COL_ADD[COL_WIDTH-1:3],burst_addr_r, 3'b000}; address_w[12] = 1'b1; end else if ((init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w = CALIB_ROW_ADD[ROW_WIDTH-1:0]; end else begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end end // registring before sending out generate genvar r,s; if ((DRAM_TYPE != "DDR3") || (CA_MIRROR != "ON")) begin: gen_no_mirror for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: div_clk_loop always @(posedge clk) begin phy_address[(r*ROW_WIDTH) +: ROW_WIDTH] <= #TCQ address_w; phy_bank[(r*BANK_WIDTH) +: BANK_WIDTH] <= #TCQ bank_w; end end end else begin: gen_mirror // Control/addressing mirroring (optional for DDR3 dual rank DIMMs) // Mirror for the 2nd rank only. Logic needs to be enhanced to account // for multiple slots, currently only supports one slot, 2-rank config for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: gen_ba_div_clk_loop for (s = 0; s < BANK_WIDTH; s = s + 1) begin: gen_ba always @(posedge clk) if (chip_cnt_r == 2'b00) begin phy_bank[(r*BANK_WIDTH) + s] <= #TCQ bank_w[s]; end else begin phy_bank[(r*BANK_WIDTH) + s] <= #TCQ bank_w[(s == 0) ? 1 : ((s == 1) ? 0 : s)]; end end end for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: gen_addr_div_clk_loop for (s = 0; s < ROW_WIDTH; s = s + 1) begin: gen_addr always @(posedge clk) if (chip_cnt_r == 2'b00) begin phy_address[(r*ROW_WIDTH) + s] <= #TCQ address_w[s]; end else begin phy_address[(r*ROW_WIDTH) + s] <= #TCQ address_w[ (s == 3) ? 4 : ((s == 4) ? 3 : ((s == 5) ? 6 : ((s == 6) ? 5 : ((s == 7) ? 8 : ((s == 8) ? 7 : s)))))]; end end end end endgenerate endmodule
//***************************************************************************** // (c) Copyright 2009 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor: Xilinx // \ \ \/ Version: %version // \ \ Application: MIG // / / Filename: ddr_phy_init.v // /___/ /\ Date Last Modified: $Date: 2011/06/02 08:35:09 $ // \ \ / \ Date Created: // \___\/\___\ // //Device: 7 Series //Design Name: DDR3 SDRAM //Purpose: // Memory initialization and overall master state control during // initialization and calibration. Specifically, the following functions // are performed: // 1. Memory initialization (initial AR, mode register programming, etc.) // 2. Initiating write leveling // 3. Generate training pattern writes for read leveling. Generate // memory readback for read leveling. // This module has an interface for providing control/address and write // data to the PHY Control Block during initialization/calibration. // Once initialization and calibration are complete, control is passed to the MC. // //Reference: //Revision History: // //***************************************************************************** /****************************************************************************** **$Id: ddr_phy_init.v,v 1.1 2011/06/02 08:35:09 mishra Exp $ **$Date: 2011/06/02 08:35:09 $ **$Author: mishra $ **$Revision: 1.1 $ **$Source: /devl/xcs/repo/env/Databases/ip/src2/O/mig_7series_v1_3/data/dlib/7series/ddr3_sdram/verilog/rtl/phy/ddr_phy_init.v,v $ ******************************************************************************/ `timescale 1ps/1ps module mig_7series_v1_9_ddr_phy_init # ( parameter TCQ = 100, parameter nCK_PER_CLK = 4, // # of memory clocks per CLK parameter CLK_PERIOD = 3000, // Logic (internal) clk period (in ps) parameter USE_ODT_PORT = 0, // 0 - No ODT output from FPGA // 1 - ODT output from FPGA parameter PRBS_WIDTH = 8, // PRBS sequence = 2^PRBS_WIDTH parameter BANK_WIDTH = 2, parameter CA_MIRROR = "OFF", // C/A mirror opt for DDR3 dual rank parameter COL_WIDTH = 10, parameter nCS_PER_RANK = 1, // # of CS bits per rank e.g. for // component I/F with CS_WIDTH=1, // nCS_PER_RANK=# of components parameter DQ_WIDTH = 64, parameter DQS_WIDTH = 8, parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH)) parameter ROW_WIDTH = 14, parameter CS_WIDTH = 1, parameter RANKS = 1, // # of memory ranks in the interface parameter CKE_WIDTH = 1, // # of cke outputs parameter DRAM_TYPE = "DDR3", parameter REG_CTRL = "ON", parameter ADDR_CMD_MODE= "1T", // calibration Address parameter CALIB_ROW_ADD = 16'h0000,// Calibration row address parameter CALIB_COL_ADD = 12'h000, // Calibration column address parameter CALIB_BA_ADD = 3'h0, // Calibration bank address // DRAM mode settings parameter AL = "0", // Additive Latency option parameter BURST_MODE = "8", // Burst length parameter BURST_TYPE = "SEQ", // Burst type // parameter nAL = 0, // Additive latency (in clk cyc) parameter nCL = 5, // Read CAS latency (in clk cyc) parameter nCWL = 5, // Write CAS latency (in clk cyc) parameter tRFC = 110000, // Refresh-to-command delay (in ps) parameter OUTPUT_DRV = "HIGH", // DRAM reduced output drive option parameter RTT_NOM = "60", // Nominal ODT termination value parameter RTT_WR = "60", // Write ODT termination value parameter WRLVL = "ON", // Enable write leveling // parameter PHASE_DETECT = "ON", // Enable read phase detector parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2 parameter nSLOTS = 1, // Number of DIMM SLOTs in the system parameter SIM_INIT_OPTION = "NONE", // "NONE", "SKIP_PU_DLY", "SKIP_INIT" parameter SIM_CAL_OPTION = "NONE", // "NONE", "FAST_CAL", "SKIP_CAL" parameter CKE_ODT_AUX = "FALSE", parameter PRE_REV3ES = "OFF", // Enable TG error detection during calibration parameter TEST_AL = "0" // Internal use for ICM verification ) ( input clk, input rst, input [2*8*nCK_PER_CLK-1:0] prbs_o, input delay_incdec_done, input ck_addr_cmd_delay_done, input pi_phase_locked_all, input pi_dqs_found_done, input dqsfound_retry, input dqs_found_prech_req, output reg pi_phaselock_start, output pi_phase_locked_err, output pi_calib_done, input phy_if_empty, // Read/write calibration interface input wrlvl_done, input wrlvl_rank_done, input wrlvl_byte_done, input wrlvl_byte_redo, input wrlvl_final, output reg wrlvl_final_if_rst, input oclkdelay_calib_done, input oclk_prech_req, input oclk_calib_resume, output reg oclkdelay_calib_start, input done_dqs_tap_inc, input [5:0] rd_data_offset_0, input [5:0] rd_data_offset_1, input [5:0] rd_data_offset_2, input [6*RANKS-1:0] rd_data_offset_ranks_0, input [6*RANKS-1:0] rd_data_offset_ranks_1, input [6*RANKS-1:0] rd_data_offset_ranks_2, input pi_dqs_found_rank_done, input wrcal_done, input wrcal_prech_req, input wrcal_read_req, input wrcal_act_req, input temp_wrcal_done, input [7:0] slot_0_present, input [7:0] slot_1_present, output reg wl_sm_start, output reg wr_lvl_start, output reg wrcal_start, output reg wrcal_rd_wait, output reg wrcal_sanity_chk, output reg tg_timer_done, output reg no_rst_tg_mc, input rdlvl_stg1_done, input rdlvl_stg1_rank_done, output reg rdlvl_stg1_start, output reg pi_dqs_found_start, output reg detect_pi_found_dqs, // rdlvl stage 1 precharge requested after each DQS input rdlvl_prech_req, input rdlvl_last_byte_done, input wrcal_resume, input wrcal_sanity_chk_done, // MPR read leveling input mpr_rdlvl_done, input mpr_rnk_done, input mpr_last_byte_done, output reg mpr_rdlvl_start, output reg mpr_end_if_reset, // PRBS Read Leveling input prbs_rdlvl_done, input prbs_last_byte_done, input prbs_rdlvl_prech_req, output reg prbs_rdlvl_start, output reg prbs_gen_clk_en, // Signals shared btw multiple calibration stages output reg prech_done, // Data select / status output reg init_calib_complete, // Signal to mask memory model error for Invalid latching edge output reg calib_writes, // PHY address/control // 2 commands to PHY Control Block per div 2 clock in 2:1 mode // 4 commands to PHY Control Block per div 4 clock in 4:1 mode output reg [nCK_PER_CLK*ROW_WIDTH-1:0] phy_address, output reg [nCK_PER_CLK*BANK_WIDTH-1:0]phy_bank, output reg [nCK_PER_CLK-1:0] phy_ras_n, output reg [nCK_PER_CLK-1:0] phy_cas_n, output reg [nCK_PER_CLK-1:0] phy_we_n, output reg phy_reset_n, output [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] phy_cs_n, // Hard PHY Interface signals input phy_ctl_ready, input phy_ctl_full, input phy_cmd_full, input phy_data_full, output reg calib_ctl_wren, output reg calib_cmd_wren, output reg [1:0] calib_seq, output reg write_calib, output reg read_calib, // PHY_Ctl_Wd output reg [2:0] calib_cmd, // calib_aux_out used for CKE and ODT output reg [3:0] calib_aux_out, output reg [1:0] calib_odt , output reg [nCK_PER_CLK-1:0] calib_cke , output [1:0] calib_rank_cnt, output reg [1:0] calib_cas_slot, output reg [5:0] calib_data_offset_0, output reg [5:0] calib_data_offset_1, output reg [5:0] calib_data_offset_2, // PHY OUT_FIFO output reg calib_wrdata_en, output reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_wrdata, // PHY Read output phy_rddata_en, output phy_rddata_valid, output [255:0] dbg_phy_init ); //***************************************************************************** // Assertions to be added //***************************************************************************** // The phy_ctl_full signal must never be asserted in synchronous mode of // operation either 4:1 or 2:1 // // The RANKS parameter must never be set to '0' by the user // valid values: 1 to 4 // //***************************************************************************** //*************************************************************************** // Number of Read level stage 1 writes limited to a SDRAM row // The address of Read Level stage 1 reads must also be limited // to a single SDRAM row // (2^COL_WIDTH)/BURST_MODE = (2^10)/8 = 128 localparam NUM_STG1_WR_RD = (BURST_MODE == "8") ? 4 : (BURST_MODE == "4") ? 8 : 4; localparam ADDR_INC = (BURST_MODE == "8") ? 8 : (BURST_MODE == "4") ? 4 : 8; // In a 2 slot dual rank per system RTT_NOM values // for Rank2 and Rank3 default to 40 ohms localparam RTT_NOM2 = "40"; localparam RTT_NOM3 = "40"; localparam RTT_NOM_int = (USE_ODT_PORT == 1) ? RTT_NOM : RTT_WR; // Specifically for use with half-frequency controller (nCK_PER_CLK=2) // = 1 if burst length = 4, = 0 if burst length = 8. Determines how // often row command needs to be issued during read-leveling // For DDR3 the burst length is fixed during calibration localparam BURST4_FLAG = (DRAM_TYPE == "DDR3")? 1'b0 : (BURST_MODE == "8") ? 1'b0 : ((BURST_MODE == "4") ? 1'b1 : 1'b0); //*************************************************************************** // Counter values used to determine bus timing // NOTE on all counter terminal counts - these can/should be one less than // the actual delay to take into account extra clock cycle delay in // generating the corresponding "done" signal //*************************************************************************** localparam CLK_MEM_PERIOD = CLK_PERIOD / nCK_PER_CLK; // Calculate initial delay required in number of CLK clock cycles // to delay initially. The counter is clocked by [CLK/1024] - which // is approximately division by 1000 - note that the formulas below will // result in more than the minimum wait time because of this approximation. // NOTE: For DDR3 JEDEC specifies to delay reset // by 200us, and CKE by an additional 500us after power-up // For DDR2 CKE is delayed by 200us after power up. localparam DDR3_RESET_DELAY_NS = 200000; localparam DDR3_CKE_DELAY_NS = 500000 + DDR3_RESET_DELAY_NS; localparam DDR2_CKE_DELAY_NS = 200000; localparam PWRON_RESET_DELAY_CNT = ((DDR3_RESET_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD); localparam PWRON_CKE_DELAY_CNT = (DRAM_TYPE == "DDR3") ? (((DDR3_CKE_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD)) : (((DDR2_CKE_DELAY_NS+CLK_PERIOD-1)/CLK_PERIOD)); // FOR DDR2 -1 taken out. With -1 not getting 200us. The equation // needs to be reworked. localparam DDR2_INIT_PRE_DELAY_PS = 400000; localparam DDR2_INIT_PRE_CNT = ((DDR2_INIT_PRE_DELAY_PS+CLK_PERIOD-1)/CLK_PERIOD)-1; // Calculate tXPR time: reset from CKE HIGH to valid command after power-up // tXPR = (max(5nCK, tRFC(min)+10ns). Add a few (blah, messy) more clock // cycles because this counter actually starts up before CKE is asserted // to memory. localparam TXPR_DELAY_CNT = (5*CLK_MEM_PERIOD > tRFC+10000) ? (((5+nCK_PER_CLK-1)/nCK_PER_CLK)-1)+11 : (((tRFC+10000+CLK_PERIOD-1)/CLK_PERIOD)-1)+11; // tDLLK/tZQINIT time = 512*tCK = 256*tCLKDIV localparam TDLLK_TZQINIT_DELAY_CNT = 255; // TWR values in ns. Both DDR2 and DDR3 have the same value. // 15000ns/tCK localparam TWR_CYC = ((15000) % CLK_MEM_PERIOD) ? (15000/CLK_MEM_PERIOD) + 1 : 15000/CLK_MEM_PERIOD; // time to wait between consecutive commands in PHY_INIT - this is a // generic number, and must be large enough to account for worst case // timing parameter (tRFC - refresh-to-active) across all memory speed // grades and operating frequencies. Expressed in clk // (Divided by 4 or Divided by 2) clock cycles. localparam CNTNEXT_CMD = 7'b1111111; // Counter values to keep track of which MR register to load during init // Set value of INIT_CNT_MR_DONE to equal value of counter for last mode // register configured during initialization. // NOTE: Reserve more bits for DDR2 - more MR accesses for DDR2 init localparam INIT_CNT_MR2 = 2'b00; localparam INIT_CNT_MR3 = 2'b01; localparam INIT_CNT_MR1 = 2'b10; localparam INIT_CNT_MR0 = 2'b11; localparam INIT_CNT_MR_DONE = 2'b11; // Register chip programmable values for DDR3 // The register chip for the registered DIMM needs to be programmed // before the initialization of the registered DIMM. // Address for the control word is in : DBA2, DA2, DA1, DA0 // Data for the control word is in: DBA1 DBA0, DA4, DA3 // The values will be stored in the local param in the following format // {DBA[2:0], DA[4:0]} // RC0 is global features control word. Address == 000 localparam REG_RC0 = 8'b00000000; // RC1 Clock driver enable control word. Enables or disables the four // output clocks in the register chip. For single rank and dual rank // two clocks will be enabled and for quad rank all the four clocks // will be enabled. Address == 000. Data = 0110 for single and dual rank. // = 0000 for quad rank localparam REG_RC1 = (RANKS <= 2) ? 8'b00110001 : 8'b00000001; // RC2 timing control word. Set in 1T timing mode // Address = 010. Data = 0000 localparam REG_RC2 = 8'b00000010; // RC3 timing control word. Setting the data to 0000 localparam REG_RC3 = 8'b00000011; // RC4 timing control work. Setting the data to 0000 localparam REG_RC4 = 8'b00000100; // RC5 timing control work. Setting the data to 0000 localparam REG_RC5 = 8'b00000101; // For non-zero AL values localparam nAL = (AL == "CL-1") ? nCL - 1 : 0; // Adding the register dimm latency to write latency localparam CWL_M = (REG_CTRL == "ON") ? nCWL + nAL + 1 : nCWL + nAL; // Count value to generate pi_phase_locked_err signal localparam PHASELOCKED_TIMEOUT = (SIM_CAL_OPTION == "NONE") ? 16383 : 1000; // Timeout interval for detecting error with Traffic Generator localparam [13:0] TG_TIMER_TIMEOUT = (SIM_CAL_OPTION == "NONE") ? 14'h3FFF : 14'h0001; // Master state machine encoding localparam INIT_IDLE = 6'b000000; //0 localparam INIT_WAIT_CKE_EXIT = 6'b000001; //1 localparam INIT_LOAD_MR = 6'b000010; //2 localparam INIT_LOAD_MR_WAIT = 6'b000011; //3 localparam INIT_ZQCL = 6'b000100; //4 localparam INIT_WAIT_DLLK_ZQINIT = 6'b000101; //5 localparam INIT_WRLVL_START = 6'b000110; //6 localparam INIT_WRLVL_WAIT = 6'b000111; //7 localparam INIT_WRLVL_LOAD_MR = 6'b001000; //8 localparam INIT_WRLVL_LOAD_MR_WAIT = 6'b001001; //9 localparam INIT_WRLVL_LOAD_MR2 = 6'b001010; //A localparam INIT_WRLVL_LOAD_MR2_WAIT = 6'b001011; //B localparam INIT_RDLVL_ACT = 6'b001100; //C localparam INIT_RDLVL_ACT_WAIT = 6'b001101; //D localparam INIT_RDLVL_STG1_WRITE = 6'b001110; //E localparam INIT_RDLVL_STG1_WRITE_READ = 6'b001111; //F localparam INIT_RDLVL_STG1_READ = 6'b010000; //10 localparam INIT_RDLVL_STG2_READ = 6'b010001; //11 localparam INIT_RDLVL_STG2_READ_WAIT = 6'b010010; //12 localparam INIT_PRECHARGE_PREWAIT = 6'b010011; //13 localparam INIT_PRECHARGE = 6'b010100; //14 localparam INIT_PRECHARGE_WAIT = 6'b010101; //15 localparam INIT_DONE = 6'b010110; //16 localparam INIT_DDR2_PRECHARGE = 6'b010111; //17 localparam INIT_DDR2_PRECHARGE_WAIT = 6'b011000; //18 localparam INIT_REFRESH = 6'b011001; //19 localparam INIT_REFRESH_WAIT = 6'b011010; //1A localparam INIT_REG_WRITE = 6'b011011; //1B localparam INIT_REG_WRITE_WAIT = 6'b011100; //1C localparam INIT_DDR2_MULTI_RANK = 6'b011101; //1D localparam INIT_DDR2_MULTI_RANK_WAIT = 6'b011110; //1E localparam INIT_WRCAL_ACT = 6'b011111; //1F localparam INIT_WRCAL_ACT_WAIT = 6'b100000; //20 localparam INIT_WRCAL_WRITE = 6'b100001; //21 localparam INIT_WRCAL_WRITE_READ = 6'b100010; //22 localparam INIT_WRCAL_READ = 6'b100011; //23 localparam INIT_WRCAL_READ_WAIT = 6'b100100; //24 localparam INIT_WRCAL_MULT_READS = 6'b100101; //25 localparam INIT_PI_PHASELOCK_READS = 6'b100110; //26 localparam INIT_MPR_RDEN = 6'b100111; //27 localparam INIT_MPR_WAIT = 6'b101000; //28 localparam INIT_MPR_READ = 6'b101001; //29 localparam INIT_MPR_DISABLE_PREWAIT = 6'b101010; //2A localparam INIT_MPR_DISABLE = 6'b101011; //2B localparam INIT_MPR_DISABLE_WAIT = 6'b101100; //2C localparam INIT_OCLKDELAY_ACT = 6'b101101; //2D localparam INIT_OCLKDELAY_ACT_WAIT = 6'b101110; //2E localparam INIT_OCLKDELAY_WRITE = 6'b101111; //2F localparam INIT_OCLKDELAY_WRITE_WAIT = 6'b110000; //30 localparam INIT_OCLKDELAY_READ = 6'b110001; //31 localparam INIT_OCLKDELAY_READ_WAIT = 6'b110010; //32 localparam INIT_REFRESH_RNK2_WAIT = 6'b110011; //33 integer i, j, k, l, m, n, p, q; reg pi_dqs_found_all_r; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r1; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r2; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r3; (* ASYNC_REG = "TRUE" *) reg pi_phase_locked_all_r4; reg pi_calib_rank_done_r; reg [13:0] pi_phaselock_timer; reg stg1_wr_done; reg rnk_ref_cnt; reg pi_dqs_found_done_r1; reg pi_dqs_found_rank_done_r; reg read_calib_int; reg read_calib_r; reg pi_calib_done_r; reg pi_calib_done_r1; reg burst_addr_r; reg [1:0] chip_cnt_r; reg [6:0] cnt_cmd_r; reg cnt_cmd_done_r; reg cnt_cmd_done_m7_r; reg [7:0] cnt_dllk_zqinit_r; reg cnt_dllk_zqinit_done_r; reg cnt_init_af_done_r; reg [1:0] cnt_init_af_r; reg [1:0] cnt_init_data_r; reg [1:0] cnt_init_mr_r; reg cnt_init_mr_done_r; reg cnt_init_pre_wait_done_r; reg [7:0] cnt_init_pre_wait_r; reg [9:0] cnt_pwron_ce_r; reg cnt_pwron_cke_done_r; reg cnt_pwron_cke_done_r1; reg [8:0] cnt_pwron_r; reg cnt_pwron_reset_done_r; reg cnt_txpr_done_r; reg [7:0] cnt_txpr_r; reg ddr2_pre_flag_r; reg ddr2_refresh_flag_r; reg ddr3_lm_done_r; reg [4:0] enable_wrlvl_cnt; reg init_complete_r; reg init_complete_r1; reg init_complete_r2; (* keep = "true" *) reg init_complete_r_timing; (* keep = "true" *) reg init_complete_r1_timing; reg [5:0] init_next_state; reg [5:0] init_state_r; reg [5:0] init_state_r1; wire [15:0] load_mr0; wire [15:0] load_mr1; wire [15:0] load_mr2; wire [15:0] load_mr3; reg mem_init_done_r; reg [1:0] mr2_r [0:3]; reg [2:0] mr1_r [0:3]; reg new_burst_r; reg [15:0] wrcal_start_dly_r; wire wrcal_start_pre; reg wrcal_resume_r; // Only one ODT signal per rank in PHY Control Block reg [nCK_PER_CLK-1:0] phy_tmp_odt_r; reg [nCK_PER_CLK-1:0] phy_tmp_odt_r1; reg [CS_WIDTH*nCS_PER_RANK-1:0] phy_tmp_cs1_r; reg [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] phy_int_cs_n; wire prech_done_pre; reg [15:0] prech_done_dly_r; reg prech_pending_r; reg prech_req_posedge_r; reg prech_req_r; reg pwron_ce_r; reg first_rdlvl_pat_r; reg first_wrcal_pat_r; reg phy_wrdata_en; reg phy_wrdata_en_r1; reg [1:0] wrdata_pat_cnt; reg [1:0] wrcal_pat_cnt; reg [ROW_WIDTH-1:0] address_w; reg [BANK_WIDTH-1:0] bank_w; reg rdlvl_stg1_done_r1; reg rdlvl_stg1_start_int; reg [15:0] rdlvl_start_dly0_r; reg rdlvl_start_pre; reg rdlvl_last_byte_done_r; wire rdlvl_rd; wire rdlvl_wr; reg rdlvl_wr_r; wire rdlvl_wr_rd; reg [2:0] reg_ctrl_cnt_r; reg [1:0] tmp_mr2_r [0:3]; reg [2:0] tmp_mr1_r [0:3]; reg wrlvl_done_r; reg wrlvl_done_r1; reg wrlvl_rank_done_r1; reg wrlvl_rank_done_r2; reg wrlvl_rank_done_r3; reg wrlvl_rank_done_r4; reg wrlvl_rank_done_r5; reg wrlvl_rank_done_r6; reg wrlvl_rank_done_r7; reg [2:0] wrlvl_rank_cntr; reg wrlvl_odt_ctl; reg wrlvl_odt; reg wrlvl_active; reg wrlvl_active_r1; reg [2:0] num_reads; reg temp_wrcal_done_r; reg temp_lmr_done; reg extend_cal_pat; reg [13:0] tg_timer; reg tg_timer_go; reg cnt_wrcal_rd; reg [3:0] cnt_wait; reg [7:0] wrcal_reads; reg [8:0] stg1_wr_rd_cnt; reg phy_data_full_r; reg wr_level_dqs_asrt; reg wr_level_dqs_asrt_r1; reg [1:0] dqs_asrt_cnt; reg [3:0] num_refresh; wire oclkdelay_calib_start_pre; reg [15:0] oclkdelay_start_dly_r; reg [3:0] oclk_wr_cnt; reg [3:0] wrcal_wr_cnt; reg wrlvl_final_r; reg prbs_rdlvl_done_r1; reg prbs_last_byte_done_r; reg phy_if_empty_r; reg wrcal_final_chk; //*************************************************************************** // Debug //*************************************************************************** //synthesis translate_off always @(posedge mem_init_done_r) begin if (!rst) $display ("PHY_INIT: Memory Initialization completed at %t", $time); end always @(posedge wrlvl_done) begin if (!rst && (WRLVL == "ON")) $display ("PHY_INIT: Write Leveling completed at %t", $time); end always @(posedge rdlvl_stg1_done) begin if (!rst) $display ("PHY_INIT: Read Leveling Stage 1 completed at %t", $time); end always @(posedge mpr_rdlvl_done) begin if (!rst) $display ("PHY_INIT: MPR Read Leveling completed at %t", $time); end always @(posedge oclkdelay_calib_done) begin if (!rst) $display ("PHY_INIT: OCLKDELAY calibration completed at %t", $time); end always @(posedge pi_calib_done_r1) begin if (!rst) $display ("PHY_INIT: Phaser_In Phase Locked at %t", $time); end always @(posedge pi_dqs_found_done) begin if (!rst) $display ("PHY_INIT: Phaser_In DQSFOUND completed at %t", $time); end always @(posedge wrcal_done) begin if (!rst && (WRLVL == "ON")) $display ("PHY_INIT: Write Calibration completed at %t", $time); end //synthesis translate_on assign dbg_phy_init[5:0] = init_state_r; //*************************************************************************** // DQS count to be sent to hard PHY during Phaser_IN Phase Locking stage //*************************************************************************** // assign pi_phaselock_calib_cnt = dqs_cnt_r; assign pi_calib_done = pi_calib_done_r1; always @(posedge clk) begin if (rst) wrcal_final_chk <= #TCQ 1'b0; else if ((init_next_state == INIT_WRCAL_ACT) && wrcal_done && (DRAM_TYPE == "DDR3")) wrcal_final_chk <= #TCQ 1'b1; end always @(posedge clk) begin rdlvl_stg1_done_r1 <= #TCQ rdlvl_stg1_done; prbs_rdlvl_done_r1 <= #TCQ prbs_rdlvl_done; wrcal_resume_r <= #TCQ wrcal_resume; wrcal_sanity_chk <= #TCQ wrcal_final_chk; end always @(posedge clk) begin if (rst) mpr_end_if_reset <= #TCQ 1'b0; else if (mpr_last_byte_done && (num_refresh != 'd0)) mpr_end_if_reset <= #TCQ 1'b1; else mpr_end_if_reset <= #TCQ 1'b0; end // Siganl to mask memory model error for Invalid latching edge always @(posedge clk) if (rst) calib_writes <= #TCQ 1'b0; else if ((init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ)) calib_writes <= #TCQ 1'b1; else calib_writes <= #TCQ 1'b0; always @(posedge clk) if (rst) wrcal_rd_wait <= #TCQ 1'b0; else if (init_state_r == INIT_WRCAL_READ_WAIT) wrcal_rd_wait <= #TCQ 1'b1; else wrcal_rd_wait <= #TCQ 1'b0; //*************************************************************************** // Signal PHY completion when calibration is finished // Signal assertion is delayed by four clock cycles to account for the // multi cycle path constraint to (phy_init_data_sel) signal. //*************************************************************************** always @(posedge clk) if (rst) begin init_complete_r <= #TCQ 1'b0; init_complete_r_timing <= #TCQ 1'b0; init_complete_r1 <= #TCQ 1'b0; init_complete_r1_timing <= #TCQ 1'b0; init_complete_r2 <= #TCQ 1'b0; init_calib_complete <= #TCQ 1'b0; end else begin if (init_state_r == INIT_DONE) begin init_complete_r <= #TCQ 1'b1; init_complete_r_timing <= #TCQ 1'b1; end init_complete_r1 <= #TCQ init_complete_r; init_complete_r1_timing <= #TCQ init_complete_r_timing; init_complete_r2 <= #TCQ init_complete_r1; init_calib_complete <= #TCQ init_complete_r2; end //*************************************************************************** // Instantiate FF for the phy_init_data_sel signal. A multi cycle path // constraint will be assigned to this signal. This signal will only be // used within the PHY //*************************************************************************** // FDRSE u_ff_phy_init_data_sel // ( // .Q (phy_init_data_sel), // .C (clk), // .CE (1'b1), // .D (init_complete_r), // .R (1'b0), // .S (1'b0) // ) /* synthesis syn_preserve=1 */ // /* synthesis syn_replicate = 0 */; //*************************************************************************** // Mode register programming //*************************************************************************** //***************************************************************** // DDR3 Load mode reg0 // Mode Register (MR0): // [15:13] - unused - 000 // [12] - Precharge Power-down DLL usage - 0 (DLL frozen, slow-exit), // 1 (DLL maintained) // [11:9] - write recovery for Auto Precharge (tWR/tCK = 6) // [8] - DLL reset - 0 or 1 // [7] - Test Mode - 0 (normal) // [6:4],[2] - CAS latency - CAS_LAT // [3] - Burst Type - BURST_TYPE // [1:0] - Burst Length - BURST_LEN // DDR2 Load mode register // Mode Register (MR): // [15:14] - unused - 00 // [13] - reserved - 0 // [12] - Power-down mode - 0 (normal) // [11:9] - write recovery - write recovery for Auto Precharge // (tWR/tCK = 6) // [8] - DLL reset - 0 or 1 // [7] - Test Mode - 0 (normal) // [6:4] - CAS latency - CAS_LAT // [3] - Burst Type - BURST_TYPE // [2:0] - Burst Length - BURST_LEN //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr0_DDR3 assign load_mr0[1:0] = (BURST_MODE == "8") ? 2'b00 : (BURST_MODE == "OTF") ? 2'b01 : (BURST_MODE == "4") ? 2'b10 : 2'b11; assign load_mr0[2] = (nCL >= 12) ? 1'b1 : 1'b0; // LSb of CAS latency assign load_mr0[3] = (BURST_TYPE == "SEQ") ? 1'b0 : 1'b1; assign load_mr0[6:4] = ((nCL == 5) || (nCL == 13)) ? 3'b001 : ((nCL == 6) || (nCL == 14)) ? 3'b010 : (nCL == 7) ? 3'b011 : (nCL == 8) ? 3'b100 : (nCL == 9) ? 3'b101 : (nCL == 10) ? 3'b110 : (nCL == 11) ? 3'b111 : (nCL == 12) ? 3'b000 : 3'b111; assign load_mr0[7] = 1'b0; assign load_mr0[8] = 1'b1; // Reset DLL (init only) assign load_mr0[11:9] = (TWR_CYC == 5) ? 3'b001 : (TWR_CYC == 6) ? 3'b010 : (TWR_CYC == 7) ? 3'b011 : (TWR_CYC == 8) ? 3'b100 : (TWR_CYC == 9) ? 3'b101 : (TWR_CYC == 10) ? 3'b101 : (TWR_CYC == 11) ? 3'b110 : (TWR_CYC == 12) ? 3'b110 : (TWR_CYC == 13) ? 3'b111 : (TWR_CYC == 14) ? 3'b111 : (TWR_CYC == 15) ? 3'b000 : (TWR_CYC == 16) ? 3'b000 : 3'b010; assign load_mr0[12] = 1'b0; // Precharge Power-Down DLL 'slow-exit' assign load_mr0[15:13] = 3'b000; end else if (DRAM_TYPE == "DDR2") begin: gen_load_mr0_DDR2 // block: gen assign load_mr0[2:0] = (BURST_MODE == "8") ? 3'b011 : (BURST_MODE == "4") ? 3'b010 : 3'b111; assign load_mr0[3] = (BURST_TYPE == "SEQ") ? 1'b0 : 1'b1; assign load_mr0[6:4] = (nCL == 3) ? 3'b011 : (nCL == 4) ? 3'b100 : (nCL == 5) ? 3'b101 : (nCL == 6) ? 3'b110 : 3'b111; assign load_mr0[7] = 1'b0; assign load_mr0[8] = 1'b1; // Reset DLL (init only) assign load_mr0[11:9] = (TWR_CYC == 2) ? 3'b001 : (TWR_CYC == 3) ? 3'b010 : (TWR_CYC == 4) ? 3'b011 : (TWR_CYC == 5) ? 3'b100 : (TWR_CYC == 6) ? 3'b101 : 3'b010; assign load_mr0[15:12]= 4'b0000; // Reserved end endgenerate //***************************************************************** // DDR3 Load mode reg1 // Mode Register (MR1): // [15:13] - unused - 00 // [12] - output enable - 0 (enabled for DQ, DQS, DQS#) // [11] - TDQS enable - 0 (TDQS disabled and DM enabled) // [10] - reserved - 0 (must be '0') // [9] - RTT[2] - 0 // [8] - reserved - 0 (must be '0') // [7] - write leveling - 0 (disabled), 1 (enabled) // [6] - RTT[1] - RTT[1:0] = 0(no ODT), 1(75), 2(150), 3(50) // [5] - Output driver impedance[1] - 0 (RZQ/6 and RZQ/7) // [4:3] - Additive CAS - ADDITIVE_CAS // [2] - RTT[0] // [1] - Output driver impedance[0] - 0(RZQ/6), or 1 (RZQ/7) // [0] - DLL enable - 0 (normal) // DDR2 ext mode register // Extended Mode Register (MR): // [15:14] - unused - 00 // [13] - reserved - 0 // [12] - output enable - 0 (enabled) // [11] - RDQS enable - 0 (disabled) // [10] - DQS# enable - 0 (enabled) // [9:7] - OCD Program - 111 or 000 (first 111, then 000 during init) // [6] - RTT[1] - RTT[1:0] = 0(no ODT), 1(75), 2(150), 3(50) // [5:3] - Additive CAS - ADDITIVE_CAS // [2] - RTT[0] // [1] - Output drive - REDUCE_DRV (= 0(full), = 1 (reduced) // [0] - DLL enable - 0 (normal) //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr1_DDR3 assign load_mr1[0] = 1'b0; // DLL enabled during Imitialization assign load_mr1[1] = (OUTPUT_DRV == "LOW") ? 1'b0 : 1'b1; assign load_mr1[2] = ((RTT_NOM_int == "30") || (RTT_NOM_int == "40") || (RTT_NOM_int == "60")) ? 1'b1 : 1'b0; assign load_mr1[4:3] = (AL == "0") ? 2'b00 : (AL == "CL-1") ? 2'b01 : (AL == "CL-2") ? 2'b10 : 2'b11; assign load_mr1[5] = 1'b0; assign load_mr1[6] = ((RTT_NOM_int == "40") || (RTT_NOM_int == "120")) ? 1'b1 : 1'b0; assign load_mr1[7] = 1'b0; // Enable write lvl after init sequence assign load_mr1[8] = 1'b0; assign load_mr1[9] = ((RTT_NOM_int == "20") || (RTT_NOM_int == "30")) ? 1'b1 : 1'b0; assign load_mr1[10] = 1'b0; assign load_mr1[15:11] = 5'b00000; end else if (DRAM_TYPE == "DDR2") begin: gen_load_mr1_DDR2 assign load_mr1[0] = 1'b0; // DLL enabled during Imitialization assign load_mr1[1] = (OUTPUT_DRV == "LOW") ? 1'b1 : 1'b0; assign load_mr1[2] = ((RTT_NOM_int == "75") || (RTT_NOM_int == "50")) ? 1'b1 : 1'b0; assign load_mr1[5:3] = (AL == "0") ? 3'b000 : (AL == "1") ? 3'b001 : (AL == "2") ? 3'b010 : (AL == "3") ? 3'b011 : (AL == "4") ? 3'b100 : 3'b111; assign load_mr1[6] = ((RTT_NOM_int == "50") || (RTT_NOM_int == "150")) ? 1'b1 : 1'b0; assign load_mr1[9:7] = 3'b000; assign load_mr1[10] = (DDR2_DQSN_ENABLE == "YES") ? 1'b0 : 1'b1; assign load_mr1[15:11] = 5'b00000; end endgenerate //***************************************************************** // DDR3 Load mode reg2 // Mode Register (MR2): // [15:11] - unused - 00 // [10:9] - RTT_WR - 00 (Dynamic ODT off) // [8] - reserved - 0 (must be '0') // [7] - self-refresh temperature range - // 0 (normal), 1 (extended) // [6] - Auto Self-Refresh - 0 (manual), 1(auto) // [5:3] - CAS Write Latency (CWL) - // 000 (5 for 400 MHz device), // 001 (6 for 400 MHz to 533 MHz devices), // 010 (7 for 533 MHz to 667 MHz devices), // 011 (8 for 667 MHz to 800 MHz) // [2:0] - Partial Array Self-Refresh (Optional) - // 000 (full array) // Not used for DDR2 //***************************************************************** generate if(DRAM_TYPE == "DDR3") begin: gen_load_mr2_DDR3 assign load_mr2[2:0] = 3'b000; assign load_mr2[5:3] = (nCWL == 5) ? 3'b000 : (nCWL == 6) ? 3'b001 : (nCWL == 7) ? 3'b010 : (nCWL == 8) ? 3'b011 : (nCWL == 9) ? 3'b100 : (nCWL == 10) ? 3'b101 : (nCWL == 11) ? 3'b110 : 3'b111; assign load_mr2[6] = 1'b0; assign load_mr2[7] = 1'b0; assign load_mr2[8] = 1'b0; // Dynamic ODT disabled assign load_mr2[10:9] = 2'b00; assign load_mr2[15:11] = 5'b00000; end else begin: gen_load_mr2_DDR2 assign load_mr2[15:0] = 16'd0; end endgenerate //***************************************************************** // DDR3 Load mode reg3 // Mode Register (MR3): // [15:3] - unused - All zeros // [2] - MPR Operation - 0(normal operation), 1(data flow from MPR) // [1:0] - MPR location - 00 (Predefined pattern) //***************************************************************** assign load_mr3[1:0] = 2'b00; assign load_mr3[2] = 1'b0; assign load_mr3[15:3] = 13'b0000000000000; // For multi-rank systems the rank being accessed during writes in // Read Leveling must be sent to phy_write for the bitslip logic assign calib_rank_cnt = chip_cnt_r; //*************************************************************************** // Logic to begin initial calibration, and to handle precharge requests // during read-leveling (to avoid tRAS violations if individual read // levelling calibration stages take more than max{tRAS) to complete). //*************************************************************************** // Assert when readback for each stage of read-leveling begins. However, // note this indicates only when the read command is issued and when // Phaser_IN has phase aligned FREQ_REF clock to read DQS. It does not // indicate when the read data is present on the bus (when this happens // after the read command is issued depends on CAS LATENCY) - there will // need to be some delay before valid data is present on the bus. // assign rdlvl_start_pre = (init_state_r == INIT_PI_PHASELOCK_READS); // Assert when read back for oclkdelay calibration begins assign oclkdelay_calib_start_pre = (init_state_r == INIT_OCLKDELAY_READ); // Assert when read back for write calibration begins assign wrcal_start_pre = (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS); // Common precharge signal done signal - pulses only when there has been // a precharge issued as a result of a PRECH_REQ pulse. Note also a common // PRECH_DONE signal is used for all blocks assign prech_done_pre = (((init_state_r == INIT_RDLVL_STG1_READ) || ((rdlvl_last_byte_done_r || prbs_last_byte_done_r) && (init_state_r == INIT_RDLVL_ACT_WAIT) && cnt_cmd_done_r) || (dqs_found_prech_req && (init_state_r == INIT_RDLVL_ACT_WAIT)) || (init_state_r == INIT_MPR_RDEN) || ((init_state_r == INIT_WRCAL_ACT_WAIT) && cnt_cmd_done_r) || ((init_state_r == INIT_OCLKDELAY_ACT_WAIT) && cnt_cmd_done_r) || (wrlvl_final && (init_state_r == INIT_REFRESH_WAIT) && cnt_cmd_done_r && ~oclkdelay_calib_done)) && prech_pending_r && !prech_req_posedge_r); always @(posedge clk) if (rst) pi_phaselock_start <= #TCQ 1'b0; else if (init_state_r == INIT_PI_PHASELOCK_READS) pi_phaselock_start <= #TCQ 1'b1; // Delay start of each calibration by 16 clock cycles to ensure that when // calibration logic begins, read data is already appearing on the bus. // Each circuit should synthesize using an SRL16. Assume that reset is // long enough to clear contents of SRL16. always @(posedge clk) begin rdlvl_last_byte_done_r <= #TCQ rdlvl_last_byte_done; prbs_last_byte_done_r <= #TCQ prbs_last_byte_done; rdlvl_start_dly0_r <= #TCQ {rdlvl_start_dly0_r[14:0], rdlvl_start_pre}; wrcal_start_dly_r <= #TCQ {wrcal_start_dly_r[14:0], wrcal_start_pre}; oclkdelay_start_dly_r <= #TCQ {oclkdelay_start_dly_r[14:0], oclkdelay_calib_start_pre}; prech_done_dly_r <= #TCQ {prech_done_dly_r[14:0], prech_done_pre}; end always @(posedge clk) prech_done <= #TCQ prech_done_dly_r[15]; always @(posedge clk) if (rst) mpr_rdlvl_start <= #TCQ 1'b0; else if (pi_dqs_found_done && (init_state_r == INIT_MPR_READ)) mpr_rdlvl_start <= #TCQ 1'b1; always @(posedge clk) phy_if_empty_r <= #TCQ phy_if_empty; always @(posedge clk) if (rst || (phy_if_empty_r && prbs_rdlvl_prech_req) || ((stg1_wr_rd_cnt == 'd1) && ~stg1_wr_done) || prbs_rdlvl_done) prbs_gen_clk_en <= #TCQ 1'b0; else if ((~phy_if_empty_r && rdlvl_stg1_done_r1 && ~prbs_rdlvl_done) || ((init_state_r == INIT_RDLVL_ACT_WAIT) && rdlvl_stg1_done_r1 && (cnt_cmd_r == 'd0))) prbs_gen_clk_en <= #TCQ 1'b1; generate if (RANKS < 2) begin always @(posedge clk) if (rst) begin rdlvl_stg1_start <= #TCQ 1'b0; rdlvl_stg1_start_int <= #TCQ 1'b0; rdlvl_start_pre <= #TCQ 1'b0; prbs_rdlvl_start <= #TCQ 1'b0; end else begin if (pi_dqs_found_done && cnt_cmd_done_r && (init_state_r == INIT_RDLVL_ACT_WAIT)) rdlvl_stg1_start_int <= #TCQ 1'b1; if (pi_dqs_found_done && (init_state_r == INIT_RDLVL_STG1_READ))begin rdlvl_start_pre <= #TCQ 1'b1; rdlvl_stg1_start <= #TCQ rdlvl_start_dly0_r[14]; end if (pi_dqs_found_done && rdlvl_stg1_done && (init_state_r == INIT_RDLVL_STG1_READ) && (WRLVL == "ON")) begin prbs_rdlvl_start <= #TCQ 1'b1; end end end else begin always @(posedge clk) if (rst || rdlvl_stg1_rank_done) begin rdlvl_stg1_start <= #TCQ 1'b0; rdlvl_stg1_start_int <= #TCQ 1'b0; rdlvl_start_pre <= #TCQ 1'b0; prbs_rdlvl_start <= #TCQ 1'b0; end else begin if (pi_dqs_found_done && cnt_cmd_done_r && (init_state_r == INIT_RDLVL_ACT_WAIT)) rdlvl_stg1_start_int <= #TCQ 1'b1; if (pi_dqs_found_done && (init_state_r == INIT_RDLVL_STG1_READ))begin rdlvl_start_pre <= #TCQ 1'b1; rdlvl_stg1_start <= #TCQ rdlvl_start_dly0_r[14]; end if (pi_dqs_found_done && rdlvl_stg1_done && (init_state_r == INIT_RDLVL_STG1_READ) && (WRLVL == "ON")) begin prbs_rdlvl_start <= #TCQ 1'b1; end end end endgenerate always @(posedge clk) begin if (rst || dqsfound_retry || wrlvl_byte_redo) begin pi_dqs_found_start <= #TCQ 1'b0; wrcal_start <= #TCQ 1'b0; end else begin if (!pi_dqs_found_done && init_state_r == INIT_RDLVL_STG2_READ) pi_dqs_found_start <= #TCQ 1'b1; if (wrcal_start_dly_r[5]) wrcal_start <= #TCQ 1'b1; end end // else: !if(rst) always @(posedge clk) if (rst) oclkdelay_calib_start <= #TCQ 1'b0; else if (oclkdelay_start_dly_r[5]) oclkdelay_calib_start <= #TCQ 1'b1; always @(posedge clk) if (rst) pi_dqs_found_done_r1 <= #TCQ 1'b0; else pi_dqs_found_done_r1 <= #TCQ pi_dqs_found_done; always @(posedge clk) wrlvl_final_r <= #TCQ wrlvl_final; // Reset IN_FIFO after final write leveling to make sure the FIFO // pointers are initialized always @(posedge clk) if (rst || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_REFRESH)) wrlvl_final_if_rst <= #TCQ 1'b0; else if (wrlvl_done_r && //(wrlvl_final_r && wrlvl_done_r && (init_state_r == INIT_WRLVL_LOAD_MR2)) wrlvl_final_if_rst <= #TCQ 1'b1; // Constantly enable DQS while write leveling is enabled in the memory // This is more to get rid of warnings in simulation, can later change // this code to only enable WRLVL_ACTIVE when WRLVL_START is asserted always @(posedge clk) if (rst || ((init_state_r1 != INIT_WRLVL_START) && (init_state_r == INIT_WRLVL_START))) wrlvl_odt_ctl <= #TCQ 1'b0; else if (wrlvl_rank_done && ~wrlvl_rank_done_r1) wrlvl_odt_ctl <= #TCQ 1'b1; generate if (nCK_PER_CLK == 4) begin: en_cnt_div4 always @ (posedge clk) if (rst) enable_wrlvl_cnt <= #TCQ 5'd0; else if ((init_state_r == INIT_WRLVL_START) || (wrlvl_odt && (enable_wrlvl_cnt == 5'd0))) enable_wrlvl_cnt <= #TCQ 5'd12; else if ((enable_wrlvl_cnt > 5'd0) && ~(phy_ctl_full || phy_cmd_full)) enable_wrlvl_cnt <= #TCQ enable_wrlvl_cnt - 1; // ODT stays asserted as long as write_calib // signal is asserted always @(posedge clk) if (rst || wrlvl_odt_ctl) wrlvl_odt <= #TCQ 1'b0; else if (enable_wrlvl_cnt == 5'd1) wrlvl_odt <= #TCQ 1'b1; end else begin: en_cnt_div2 always @ (posedge clk) if (rst) enable_wrlvl_cnt <= #TCQ 5'd0; else if ((init_state_r == INIT_WRLVL_START) || (wrlvl_odt && (enable_wrlvl_cnt == 5'd0))) enable_wrlvl_cnt <= #TCQ 5'd21; else if ((enable_wrlvl_cnt > 5'd0) && ~(phy_ctl_full || phy_cmd_full)) enable_wrlvl_cnt <= #TCQ enable_wrlvl_cnt - 1; // ODT stays asserted as long as write_calib // signal is asserted always @(posedge clk) if (rst || wrlvl_odt_ctl) wrlvl_odt <= #TCQ 1'b0; else if (enable_wrlvl_cnt == 5'd1) wrlvl_odt <= #TCQ 1'b1; end endgenerate always @(posedge clk) if (rst || wrlvl_rank_done || done_dqs_tap_inc) wrlvl_active <= #TCQ 1'b0; else if ((enable_wrlvl_cnt == 5'd1) && wrlvl_odt && !wrlvl_active) wrlvl_active <= #TCQ 1'b1; // signal used to assert DQS for write leveling. // the DQS will be asserted once every 16 clock cycles. always @(posedge clk)begin if(rst || (enable_wrlvl_cnt != 5'd1)) begin wr_level_dqs_asrt <= #TCQ 1'd0; end else if ((enable_wrlvl_cnt == 5'd1) && (wrlvl_active_r1)) begin wr_level_dqs_asrt <= #TCQ 1'd1; end end always @ (posedge clk) begin if (rst || (wrlvl_done_r && ~wrlvl_done_r1)) dqs_asrt_cnt <= #TCQ 2'd0; else if (wr_level_dqs_asrt && dqs_asrt_cnt != 2'd3) dqs_asrt_cnt <= #TCQ (dqs_asrt_cnt + 1); end always @ (posedge clk) begin if (rst || ~wrlvl_active) wr_lvl_start <= #TCQ 1'd0; else if (dqs_asrt_cnt == 2'd3) wr_lvl_start <= #TCQ 1'd1; end always @(posedge clk) begin if (rst) wl_sm_start <= #TCQ 1'b0; else wl_sm_start <= #TCQ wr_level_dqs_asrt_r1; end always @(posedge clk) begin wrlvl_active_r1 <= #TCQ wrlvl_active; wr_level_dqs_asrt_r1 <= #TCQ wr_level_dqs_asrt; wrlvl_done_r <= #TCQ wrlvl_done; wrlvl_done_r1 <= #TCQ wrlvl_done_r; wrlvl_rank_done_r1 <= #TCQ wrlvl_rank_done; wrlvl_rank_done_r2 <= #TCQ wrlvl_rank_done_r1; wrlvl_rank_done_r3 <= #TCQ wrlvl_rank_done_r2; wrlvl_rank_done_r4 <= #TCQ wrlvl_rank_done_r3; wrlvl_rank_done_r5 <= #TCQ wrlvl_rank_done_r4; wrlvl_rank_done_r6 <= #TCQ wrlvl_rank_done_r5; wrlvl_rank_done_r7 <= #TCQ wrlvl_rank_done_r6; end always @ (posedge clk) begin //if (rst) wrlvl_rank_cntr <= #TCQ 3'd0; //else if (wrlvl_rank_done) // wrlvl_rank_cntr <= #TCQ wrlvl_rank_cntr + 1'b1; end //***************************************************************** // Precharge request logic - those calibration logic blocks // that require greater than tRAS(max) to finish must break up // their calibration into smaller units of time, with precharges // issued in between. This is done using the XXX_PRECH_REQ and // PRECH_DONE handshaking between PHY_INIT and those blocks //***************************************************************** // Shared request from multiple sources assign prech_req = oclk_prech_req | rdlvl_prech_req | wrcal_prech_req | prbs_rdlvl_prech_req | (dqs_found_prech_req & (init_state_r == INIT_RDLVL_STG2_READ_WAIT)); // Handshaking logic to force precharge during read leveling, and to // notify read leveling logic when precharge has been initiated and // it's okay to proceed with leveling again always @(posedge clk) if (rst) begin prech_req_r <= #TCQ 1'b0; prech_req_posedge_r <= #TCQ 1'b0; prech_pending_r <= #TCQ 1'b0; end else begin prech_req_r <= #TCQ prech_req; prech_req_posedge_r <= #TCQ prech_req & ~prech_req_r; if (prech_req_posedge_r) prech_pending_r <= #TCQ 1'b1; // Clear after we've finished with the precharge and have // returned to issuing read leveling calibration reads else if (prech_done_pre) prech_pending_r <= #TCQ 1'b0; end //*************************************************************************** // Various timing counters //*************************************************************************** //***************************************************************** // Generic delay for various states that require it (e.g. for turnaround // between read and write). Make this a sufficiently large number of clock // cycles to cover all possible frequencies and memory components) // Requirements for this counter: // 1. Greater than tMRD // 2. tRFC (refresh-active) for DDR2 // 3. (list the other requirements, slacker...) //***************************************************************** always @(posedge clk) begin case (init_state_r) INIT_LOAD_MR_WAIT, INIT_WRLVL_LOAD_MR_WAIT, INIT_WRLVL_LOAD_MR2_WAIT, INIT_MPR_WAIT, INIT_MPR_DISABLE_PREWAIT, INIT_MPR_DISABLE_WAIT, INIT_OCLKDELAY_ACT_WAIT, INIT_OCLKDELAY_WRITE_WAIT, INIT_RDLVL_ACT_WAIT, INIT_RDLVL_STG1_WRITE_READ, INIT_RDLVL_STG2_READ_WAIT, INIT_WRCAL_ACT_WAIT, INIT_WRCAL_WRITE_READ, INIT_WRCAL_READ_WAIT, INIT_PRECHARGE_PREWAIT, INIT_PRECHARGE_WAIT, INIT_DDR2_PRECHARGE_WAIT, INIT_REG_WRITE_WAIT, INIT_REFRESH_WAIT, INIT_REFRESH_RNK2_WAIT: begin if (phy_ctl_full || phy_cmd_full) cnt_cmd_r <= #TCQ cnt_cmd_r; else cnt_cmd_r <= #TCQ cnt_cmd_r + 1; end INIT_WRLVL_WAIT: cnt_cmd_r <= #TCQ 'b0; default: cnt_cmd_r <= #TCQ 'b0; endcase end // pulse when count reaches terminal count always @(posedge clk) cnt_cmd_done_r <= #TCQ (cnt_cmd_r == CNTNEXT_CMD); // For ODT deassertion - hold throughout post read/write wait stage, but // deassert before next command. The post read/write stage is very long, so // we simply address the longest case here plus some margin. always @(posedge clk) cnt_cmd_done_m7_r <= #TCQ (cnt_cmd_r == (CNTNEXT_CMD - 7)); //************************************************************************ // Added to support PO fine delay inc when TG errors always @(posedge clk) begin case (init_state_r) INIT_WRCAL_READ_WAIT: begin if (phy_ctl_full || phy_cmd_full) cnt_wait <= #TCQ cnt_wait; else cnt_wait <= #TCQ cnt_wait + 1; end default: cnt_wait <= #TCQ 'b0; endcase end always @(posedge clk) cnt_wrcal_rd <= #TCQ (cnt_wait == 'd4); always @(posedge clk) begin if (rst || ~temp_wrcal_done) temp_lmr_done <= #TCQ 1'b0; else if (temp_wrcal_done && (init_state_r == INIT_LOAD_MR)) temp_lmr_done <= #TCQ 1'b1; end always @(posedge clk) temp_wrcal_done_r <= #TCQ temp_wrcal_done; always @(posedge clk) if (rst) begin tg_timer_go <= #TCQ 1'b0; end else if ((PRE_REV3ES == "ON") && temp_wrcal_done && temp_lmr_done && (init_state_r == INIT_WRCAL_READ_WAIT)) begin tg_timer_go <= #TCQ 1'b1; end else begin tg_timer_go <= #TCQ 1'b0; end always @(posedge clk) begin if (rst || (temp_wrcal_done && ~temp_wrcal_done_r) || (init_state_r == INIT_PRECHARGE_PREWAIT)) tg_timer <= #TCQ 'd0; else if ((pi_phaselock_timer == PHASELOCKED_TIMEOUT) && tg_timer_go && (tg_timer != TG_TIMER_TIMEOUT)) tg_timer <= #TCQ tg_timer + 1; end always @(posedge clk) begin if (rst) tg_timer_done <= #TCQ 1'b0; else if (tg_timer == TG_TIMER_TIMEOUT) tg_timer_done <= #TCQ 1'b1; else tg_timer_done <= #TCQ 1'b0; end always @(posedge clk) begin if (rst) no_rst_tg_mc <= #TCQ 1'b0; else if ((init_state_r == INIT_WRCAL_ACT) && wrcal_read_req) no_rst_tg_mc <= #TCQ 1'b1; else no_rst_tg_mc <= #TCQ 1'b0; end //************************************************************************ always @(posedge clk) begin if (rst) detect_pi_found_dqs <= #TCQ 1'b0; else if ((cnt_cmd_r == 7'b0111111) && (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) detect_pi_found_dqs <= #TCQ 1'b1; else detect_pi_found_dqs <= #TCQ 1'b0; end //***************************************************************** // Initial delay after power-on for RESET, CKE // NOTE: Could reduce power consumption by turning off these counters // after initial power-up (at expense of more logic) // NOTE: Likely can combine multiple counters into single counter //***************************************************************** // Create divided by 1024 version of clock always @(posedge clk) if (rst) begin cnt_pwron_ce_r <= #TCQ 10'h000; pwron_ce_r <= #TCQ 1'b0; end else begin cnt_pwron_ce_r <= #TCQ cnt_pwron_ce_r + 1; pwron_ce_r <= #TCQ (cnt_pwron_ce_r == 10'h3FF); end // "Main" power-on counter - ticks every CLKDIV/1024 cycles always @(posedge clk) if (rst) cnt_pwron_r <= #TCQ 'b0; else if (pwron_ce_r) cnt_pwron_r <= #TCQ cnt_pwron_r + 1; always @(posedge clk) if (rst || ~phy_ctl_ready) begin cnt_pwron_reset_done_r <= #TCQ 1'b0; cnt_pwron_cke_done_r <= #TCQ 1'b0; end else begin // skip power-up count for simulation purposes only if ((SIM_INIT_OPTION == "SKIP_PU_DLY") || (SIM_INIT_OPTION == "SKIP_INIT")) begin cnt_pwron_reset_done_r <= #TCQ 1'b1; cnt_pwron_cke_done_r <= #TCQ 1'b1; end else begin // otherwise, create latched version of done signal for RESET, CKE if (DRAM_TYPE == "DDR3") begin if (!cnt_pwron_reset_done_r) cnt_pwron_reset_done_r <= #TCQ (cnt_pwron_r == PWRON_RESET_DELAY_CNT); if (!cnt_pwron_cke_done_r) cnt_pwron_cke_done_r <= #TCQ (cnt_pwron_r == PWRON_CKE_DELAY_CNT); end else begin // DDR2 cnt_pwron_reset_done_r <= #TCQ 1'b1; // not needed if (!cnt_pwron_cke_done_r) cnt_pwron_cke_done_r <= #TCQ (cnt_pwron_r == PWRON_CKE_DELAY_CNT); end end end // else: !if(rst || ~phy_ctl_ready) always @(posedge clk) cnt_pwron_cke_done_r1 <= #TCQ cnt_pwron_cke_done_r; // Keep RESET asserted and CKE deasserted until after power-on delay always @(posedge clk or posedge rst) begin if (rst) phy_reset_n <= #TCQ 1'b0; else phy_reset_n <= #TCQ cnt_pwron_reset_done_r; // phy_cke <= #TCQ {CKE_WIDTH{cnt_pwron_cke_done_r}}; end //***************************************************************** // Counter for tXPR (pronouned "Tax-Payer") - wait time after // CKE deassertion before first MRS command can be asserted //***************************************************************** always @(posedge clk) if (!cnt_pwron_cke_done_r) begin cnt_txpr_r <= #TCQ 'b0; cnt_txpr_done_r <= #TCQ 1'b0; end else begin cnt_txpr_r <= #TCQ cnt_txpr_r + 1; if (!cnt_txpr_done_r) cnt_txpr_done_r <= #TCQ (cnt_txpr_r == TXPR_DELAY_CNT); end //***************************************************************** // Counter for the initial 400ns wait for issuing precharge all // command after CKE assertion. Only for DDR2. //***************************************************************** always @(posedge clk) if (!cnt_pwron_cke_done_r) begin cnt_init_pre_wait_r <= #TCQ 'b0; cnt_init_pre_wait_done_r <= #TCQ 1'b0; end else begin cnt_init_pre_wait_r <= #TCQ cnt_init_pre_wait_r + 1; if (!cnt_init_pre_wait_done_r) cnt_init_pre_wait_done_r <= #TCQ (cnt_init_pre_wait_r >= DDR2_INIT_PRE_CNT); end //***************************************************************** // Wait for both DLL to lock (tDLLK) and ZQ calibration to finish // (tZQINIT). Both take the same amount of time (512*tCK) //***************************************************************** always @(posedge clk) if (init_state_r == INIT_ZQCL) begin cnt_dllk_zqinit_r <= #TCQ 'b0; cnt_dllk_zqinit_done_r <= #TCQ 1'b0; end else if (~(phy_ctl_full || phy_cmd_full)) begin cnt_dllk_zqinit_r <= #TCQ cnt_dllk_zqinit_r + 1; if (!cnt_dllk_zqinit_done_r) cnt_dllk_zqinit_done_r <= #TCQ (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT); end //***************************************************************** // Keep track of which MRS counter needs to be programmed during // memory initialization // The counter and the done signal are reset an additional time // for DDR2. The same signals are used for the additional DDR2 // initialization sequence. //***************************************************************** always @(posedge clk) if ((init_state_r == INIT_IDLE)|| ((init_state_r == INIT_REFRESH) && (~mem_init_done_r))) begin cnt_init_mr_r <= #TCQ 'b0; cnt_init_mr_done_r <= #TCQ 1'b0; end else if (init_state_r == INIT_LOAD_MR) begin cnt_init_mr_r <= #TCQ cnt_init_mr_r + 1; cnt_init_mr_done_r <= #TCQ (cnt_init_mr_r == INIT_CNT_MR_DONE); end //***************************************************************** // Flag to tell if the first precharge for DDR2 init sequence is // done //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) ddr2_pre_flag_r<= #TCQ 'b0; else if (init_state_r == INIT_LOAD_MR) ddr2_pre_flag_r<= #TCQ 1'b1; // reset the flag for multi rank case else if ((ddr2_refresh_flag_r) && (init_state_r == INIT_LOAD_MR_WAIT)&& (cnt_cmd_done_r) && (cnt_init_mr_done_r)) ddr2_pre_flag_r <= #TCQ 'b0; //***************************************************************** // Flag to tell if the refresh stat for DDR2 init sequence is // reached //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) ddr2_refresh_flag_r<= #TCQ 'b0; else if ((init_state_r == INIT_REFRESH) && (~mem_init_done_r)) // reset the flag for multi rank case ddr2_refresh_flag_r<= #TCQ 1'b1; else if ((ddr2_refresh_flag_r) && (init_state_r == INIT_LOAD_MR_WAIT)&& (cnt_cmd_done_r) && (cnt_init_mr_done_r)) ddr2_refresh_flag_r <= #TCQ 'b0; //***************************************************************** // Keep track of the number of auto refreshes for DDR2 // initialization. The spec asks for a minimum of two refreshes. // Four refreshes are performed here. The two extra refreshes is to // account for the 200 clock cycle wait between step h and l. // Without the two extra refreshes we would have to have a // wait state. //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) begin cnt_init_af_r <= #TCQ 'b0; cnt_init_af_done_r <= #TCQ 1'b0; end else if ((init_state_r == INIT_REFRESH) && (~mem_init_done_r))begin cnt_init_af_r <= #TCQ cnt_init_af_r + 1; cnt_init_af_done_r <= #TCQ (cnt_init_af_r == 2'b11); end //***************************************************************** // Keep track of the register control word programming for // DDR3 RDIMM //***************************************************************** always @(posedge clk) if (init_state_r == INIT_IDLE) reg_ctrl_cnt_r <= #TCQ 'b0; else if (init_state_r == INIT_REG_WRITE) reg_ctrl_cnt_r <= #TCQ reg_ctrl_cnt_r + 1; generate if (RANKS < 2) begin: one_rank always @(posedge clk) if ((init_state_r == INIT_IDLE) || rdlvl_last_byte_done) stg1_wr_done <= #TCQ 1'b0; else if (init_state_r == INIT_RDLVL_STG1_WRITE_READ) stg1_wr_done <= #TCQ 1'b1; end else begin: two_ranks always @(posedge clk) if ((init_state_r == INIT_IDLE) || rdlvl_last_byte_done || (rdlvl_stg1_rank_done )) stg1_wr_done <= #TCQ 1'b0; else if (init_state_r == INIT_RDLVL_STG1_WRITE_READ) stg1_wr_done <= #TCQ 1'b1; end endgenerate always @(posedge clk) if (rst) rnk_ref_cnt <= #TCQ 1'b0; else if (stg1_wr_done && (init_state_r == INIT_REFRESH_WAIT) && cnt_cmd_done_r) rnk_ref_cnt <= #TCQ ~rnk_ref_cnt; always @(posedge clk) if (rst || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_RDLVL_ACT)) num_refresh <= #TCQ 'd0; else if ((init_state_r == INIT_REFRESH) && (~pi_dqs_found_done || ((DRAM_TYPE == "DDR3") && ~oclkdelay_calib_done) || (rdlvl_stg1_done && ~prbs_rdlvl_done) || ((CLK_PERIOD/nCK_PER_CLK <= 2500) && wrcal_done && ~rdlvl_stg1_done) || ((CLK_PERIOD/nCK_PER_CLK > 2500) && wrlvl_done_r1 && ~rdlvl_stg1_done))) num_refresh <= #TCQ num_refresh + 1; //*************************************************************************** // Initialization state machine //*************************************************************************** //***************************************************************** // Next-state logic //***************************************************************** always @(posedge clk) if (rst)begin init_state_r <= #TCQ INIT_IDLE; init_state_r1 <= #TCQ INIT_IDLE; end else begin init_state_r <= #TCQ init_next_state; init_state_r1 <= #TCQ init_state_r; end always @(burst_addr_r or chip_cnt_r or cnt_cmd_done_r or cnt_dllk_zqinit_done_r or cnt_init_af_done_r or cnt_init_mr_done_r or phy_ctl_ready or phy_ctl_full or stg1_wr_done or rdlvl_last_byte_done or phy_cmd_full or num_reads or rnk_ref_cnt or mpr_last_byte_done or oclk_wr_cnt or mpr_rdlvl_done or mpr_rnk_done or num_refresh or oclkdelay_calib_done or oclk_prech_req or oclk_calib_resume or wrlvl_byte_redo or wrlvl_byte_done or wrlvl_final or wrlvl_final_r or cnt_init_pre_wait_done_r or cnt_pwron_cke_done_r or delay_incdec_done or wrcal_wr_cnt or ck_addr_cmd_delay_done or wrcal_read_req or wrcal_reads or cnt_wrcal_rd or wrcal_act_req or temp_wrcal_done or temp_lmr_done or cnt_txpr_done_r or ddr2_pre_flag_r or ddr2_refresh_flag_r or ddr3_lm_done_r or init_state_r or mem_init_done_r or dqsfound_retry or dqs_found_prech_req or prech_req_posedge_r or prech_req_r or wrcal_done or wrcal_resume_r or rdlvl_stg1_done or rdlvl_stg1_done_r1 or rdlvl_stg1_rank_done or rdlvl_stg1_start_int or prbs_rdlvl_done or prbs_last_byte_done or prbs_rdlvl_done_r1 or stg1_wr_rd_cnt or rdlvl_prech_req or wrcal_prech_req or read_calib_int or read_calib_r or pi_calib_done_r1 or pi_phase_locked_all_r3 or pi_phase_locked_all_r4 or pi_dqs_found_done or pi_dqs_found_rank_done or pi_dqs_found_start or reg_ctrl_cnt_r or wrlvl_done_r1 or wrlvl_rank_done_r7 or wrcal_final_chk or wrcal_sanity_chk_done) begin init_next_state = init_state_r; (* full_case, parallel_case *) case (init_state_r) //******************************************************* // DRAM initialization //******************************************************* // Initial state - wait for: // 1. Power-on delays to pass // 2. PHY Control Block to assert phy_ctl_ready // 3. PHY Control FIFO must not be FULL // 4. Read path initialization to finish INIT_IDLE: if (cnt_pwron_cke_done_r && phy_ctl_ready && ck_addr_cmd_delay_done && delay_incdec_done && ~(phy_ctl_full || phy_cmd_full) ) begin // If skipping memory initialization (simulation only) if (SIM_INIT_OPTION == "SKIP_INIT") //if (WRLVL == "ON") // Proceed to write leveling // init_next_state = INIT_WRLVL_START; //else //if (SIM_CAL_OPTION != "SKIP_CAL") // Proceed to Phaser_In phase lock init_next_state = INIT_RDLVL_ACT; // else // Skip read leveling //init_next_state = INIT_DONE; else init_next_state = INIT_WAIT_CKE_EXIT; end // Wait minimum of Reset CKE exit time (tXPR = max(tXS, INIT_WAIT_CKE_EXIT: if ((cnt_txpr_done_r) && (DRAM_TYPE == "DDR3") && ~(phy_ctl_full || phy_cmd_full)) begin if((REG_CTRL == "ON") && ((nCS_PER_RANK > 1) || (RANKS > 1))) //register write for reg dimm. Some register chips // have the register chip in a pre-programmed state // in that case the nCS_PER_RANK == 1 && RANKS == 1 init_next_state = INIT_REG_WRITE; else // Load mode register - this state is repeated multiple times init_next_state = INIT_LOAD_MR; end else if ((cnt_init_pre_wait_done_r) && (DRAM_TYPE == "DDR2") && ~(phy_ctl_full || phy_cmd_full)) // DDR2 start with a precharge all command init_next_state = INIT_DDR2_PRECHARGE; INIT_REG_WRITE: init_next_state = INIT_REG_WRITE_WAIT; INIT_REG_WRITE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if(reg_ctrl_cnt_r == 3'd5) init_next_state = INIT_LOAD_MR; else init_next_state = INIT_REG_WRITE; end INIT_LOAD_MR: init_next_state = INIT_LOAD_MR_WAIT; // After loading MR, wait at least tMRD INIT_LOAD_MR_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin // If finished loading all mode registers, proceed to next step if (prbs_rdlvl_done && pi_dqs_found_done && rdlvl_stg1_done) // for ddr3 when the correct burst length is writtern at end init_next_state = INIT_PRECHARGE; else if (~wrcal_done && temp_lmr_done) init_next_state = INIT_PRECHARGE_PREWAIT; else if (cnt_init_mr_done_r)begin if(DRAM_TYPE == "DDR3") init_next_state = INIT_ZQCL; else begin //DDR2 if(ddr2_refresh_flag_r)begin // memory initialization per rank for multi-rank case if (!mem_init_done_r && (chip_cnt_r <= RANKS-1)) init_next_state = INIT_DDR2_MULTI_RANK; else init_next_state = INIT_RDLVL_ACT; // ddr2 initialization done.load mode state after refresh end else init_next_state = INIT_DDR2_PRECHARGE; end end else init_next_state = INIT_LOAD_MR; end // DDR2 multi rank transition state INIT_DDR2_MULTI_RANK: init_next_state = INIT_DDR2_MULTI_RANK_WAIT; INIT_DDR2_MULTI_RANK_WAIT: init_next_state = INIT_DDR2_PRECHARGE; // Initial ZQ calibration INIT_ZQCL: init_next_state = INIT_WAIT_DLLK_ZQINIT; // Wait until both DLL have locked, and ZQ calibration done INIT_WAIT_DLLK_ZQINIT: if (cnt_dllk_zqinit_done_r && ~(phy_ctl_full || phy_cmd_full)) // memory initialization per rank for multi-rank case if (!mem_init_done_r && (chip_cnt_r <= RANKS-1)) init_next_state = INIT_LOAD_MR; //else if (WRLVL == "ON") // init_next_state = INIT_WRLVL_START; else // skip write-leveling (e.g. for DDR2 interface) init_next_state = INIT_RDLVL_ACT; // Initial precharge for DDR2 INIT_DDR2_PRECHARGE: init_next_state = INIT_DDR2_PRECHARGE_WAIT; INIT_DDR2_PRECHARGE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if (ddr2_pre_flag_r) init_next_state = INIT_REFRESH; else // from precharge state initially go to load mode init_next_state = INIT_LOAD_MR; end INIT_REFRESH: if ((RANKS == 2) && (chip_cnt_r == RANKS - 1)) init_next_state = INIT_REFRESH_RNK2_WAIT; else init_next_state = INIT_REFRESH_WAIT; INIT_REFRESH_RNK2_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_PRECHARGE; INIT_REFRESH_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full))begin if(cnt_init_af_done_r && (~mem_init_done_r)) // go to lm state as part of DDR2 init sequence init_next_state = INIT_LOAD_MR; else if (pi_dqs_found_done && ~wrlvl_done_r1 && ~wrlvl_final && ~wrlvl_byte_redo && (WRLVL == "ON")) init_next_state = INIT_WRLVL_START; else if (~pi_dqs_found_done || (rdlvl_stg1_done && ~prbs_rdlvl_done) || ((CLK_PERIOD/nCK_PER_CLK <= 2500) && wrcal_done && ~rdlvl_stg1_done) || ((CLK_PERIOD/nCK_PER_CLK > 2500) && wrlvl_done_r1 && ~rdlvl_stg1_done)) begin if (num_refresh == 'd8) init_next_state = INIT_RDLVL_ACT; else init_next_state = INIT_REFRESH; end else if ((~wrcal_done && wrlvl_byte_redo)&& (DRAM_TYPE == "DDR3") && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRLVL_LOAD_MR2; else if (((prbs_rdlvl_done && rdlvl_stg1_done && pi_dqs_found_done) && (WRLVL == "ON")) && mem_init_done_r && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRCAL_ACT; else if (pi_dqs_found_done && (DRAM_TYPE == "DDR3") && ~(mpr_last_byte_done || mpr_rdlvl_done)) begin if (num_refresh == 'd8) init_next_state = INIT_MPR_RDEN; else init_next_state = INIT_REFRESH; end else if (((~oclkdelay_calib_done && wrlvl_final) || (~wrcal_done && wrlvl_byte_redo)) && (DRAM_TYPE == "DDR3")) init_next_state = INIT_WRLVL_LOAD_MR2; else if (~oclkdelay_calib_done && (mpr_last_byte_done || mpr_rdlvl_done) && (DRAM_TYPE == "DDR3")) begin if (num_refresh == 'd8) init_next_state = INIT_OCLKDELAY_ACT; else init_next_state = INIT_REFRESH; end else if ((~wrcal_done && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK <= 2500)) && pi_dqs_found_done) init_next_state = INIT_WRCAL_ACT; else if (mem_init_done_r) begin if (RANKS < 2) init_next_state = INIT_RDLVL_ACT; else if (stg1_wr_done && ~rnk_ref_cnt && ~rdlvl_stg1_done) init_next_state = INIT_PRECHARGE; else init_next_state = INIT_RDLVL_ACT; end else // to DDR2 init state as part of DDR2 init sequence init_next_state = INIT_REFRESH; end //****************************************************** // Write Leveling //******************************************************* // Enable write leveling in MR1 and start write leveling // for current rank INIT_WRLVL_START: init_next_state = INIT_WRLVL_WAIT; // Wait for both MR load and write leveling to complete // (write leveling should take much longer than MR load..) INIT_WRLVL_WAIT: if (wrlvl_rank_done_r7 && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRLVL_LOAD_MR; // Disable write leveling in MR1 for current rank INIT_WRLVL_LOAD_MR: init_next_state = INIT_WRLVL_LOAD_MR_WAIT; INIT_WRLVL_LOAD_MR_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRLVL_LOAD_MR2; // Load MR2 to set ODT: Dynamic ODT for single rank case // And ODTs for multi-rank case as well INIT_WRLVL_LOAD_MR2: init_next_state = INIT_WRLVL_LOAD_MR2_WAIT; // Wait tMRD before proceeding INIT_WRLVL_LOAD_MR2_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin //if (wrlvl_byte_done) // init_next_state = INIT_PRECHARGE_PREWAIT; // else if ((RANKS == 2) && wrlvl_rank_done_r2) // init_next_state = INIT_WRLVL_LOAD_MR2_WAIT; if (~wrlvl_done_r1) init_next_state = INIT_WRLVL_START; else if (SIM_CAL_OPTION == "SKIP_CAL") // If skip rdlvl, then we're done init_next_state = INIT_DONE; else // Otherwise, proceed to read leveling //init_next_state = INIT_RDLVL_ACT; init_next_state = INIT_PRECHARGE_PREWAIT; end //******************************************************* // Read Leveling //******************************************************* // single row activate. All subsequent read leveling writes and // read will take place in this row INIT_RDLVL_ACT: init_next_state = INIT_RDLVL_ACT_WAIT; // hang out for awhile before issuing subsequent column commands // it's also possible to reach this state at various points // during read leveling - determine what the current stage is INIT_RDLVL_ACT_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin // Just finished an activate. Now either write, read, or precharge // depending on where we are in the training sequence if (!pi_calib_done_r1) init_next_state = INIT_PI_PHASELOCK_READS; else if (!pi_dqs_found_done) // (!pi_dqs_found_start || pi_dqs_found_rank_done)) init_next_state = INIT_RDLVL_STG2_READ; else if (~wrcal_done && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK <= 2500)) init_next_state = INIT_WRCAL_ACT_WAIT; else if ((!rdlvl_stg1_done && ~stg1_wr_done && ~rdlvl_last_byte_done) || (!prbs_rdlvl_done && ~stg1_wr_done && ~prbs_last_byte_done)) begin // Added to avoid rdlvl_stg1 write data pattern at the start of PRBS rdlvl if (!prbs_rdlvl_done && ~stg1_wr_done && rdlvl_last_byte_done) init_next_state = INIT_RDLVL_ACT_WAIT; else init_next_state = INIT_RDLVL_STG1_WRITE; end else if ((!rdlvl_stg1_done && rdlvl_stg1_start_int) || !prbs_rdlvl_done) begin if (rdlvl_last_byte_done || prbs_last_byte_done) // Added to avoid extra reads at the end of read leveling init_next_state = INIT_RDLVL_ACT_WAIT; else // Case 2: If in stage 1, and just precharged after training // previous byte, then continue reading init_next_state = INIT_RDLVL_STG1_READ; end else if ((prbs_rdlvl_done && rdlvl_stg1_done && (RANKS == 1)) && (WRLVL == "ON") && (CLK_PERIOD/nCK_PER_CLK > 2500)) init_next_state = INIT_WRCAL_ACT_WAIT; else // Otherwise, if we're finished with calibration, then precharge // the row - silly, because we just opened it - possible to take // this out by adding logic to avoid the ACT in first place. Make // sure that cnt_cmd_done will handle tRAS(min) init_next_state = INIT_PRECHARGE_PREWAIT; end //************************************************** // Back-to-back reads for Phaser_IN Phase locking // DQS to FREQ_REF clock //************************************************** INIT_PI_PHASELOCK_READS: if (pi_phase_locked_all_r3 && ~pi_phase_locked_all_r4) init_next_state = INIT_PRECHARGE_PREWAIT; //********************************************* // Stage 1 read-leveling (write and continuous read) //********************************************* // Write training pattern for stage 1 // PRBS pattern of TBD length INIT_RDLVL_STG1_WRITE: // 4:1 DDR3 BL8 will require all 8 words in 1 DIV4 clock cycle // 2:1 DDR2/DDR3 BL8 will require 2 DIV2 clock cycles for 8 words // 2:1 DDR2 BL4 will require 1 DIV2 clock cycle for 4 words // An entire row worth of writes issued before proceeding to reads // The number of write is (2^column width)/burst length to accomodate // PRBS pattern for window detection. if (stg1_wr_rd_cnt == 9'd1) init_next_state = INIT_RDLVL_STG1_WRITE_READ; // Write-read turnaround INIT_RDLVL_STG1_WRITE_READ: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_RDLVL_STG1_READ; // Continuous read, where interruptible by precharge request from // calibration logic. Also precharges when stage 1 is complete // No precharges when reads provided to Phaser_IN for phase locking // FREQ_REF to read DQS since data integrity is not important. INIT_RDLVL_STG1_READ: if (rdlvl_stg1_rank_done || (rdlvl_stg1_done && ~rdlvl_stg1_done_r1) || prech_req_posedge_r || (prbs_rdlvl_done && ~prbs_rdlvl_done_r1)) init_next_state = INIT_PRECHARGE_PREWAIT; //********************************************* // DQSFOUND calibration (set of 4 reads with gaps) //********************************************* // Read of training data. Note that Stage 2 is not a constant read, // instead there is a large gap between each set of back-to-back reads INIT_RDLVL_STG2_READ: // 4 read commands issued back-to-back if (num_reads == 'b1) init_next_state = INIT_RDLVL_STG2_READ_WAIT; // Wait before issuing the next set of reads. If a precharge request // comes in then handle - this can occur after stage 2 calibration is // completed for a DQS group INIT_RDLVL_STG2_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (pi_dqs_found_rank_done || pi_dqs_found_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; else if (cnt_cmd_done_r) init_next_state = INIT_RDLVL_STG2_READ; end //****************************************************************** // MPR Read Leveling for DDR3 OCLK_DELAYED calibration //****************************************************************** // Issue Load Mode Register 3 command with A[2]=1, A[1:0]=2'b00 // to enable Multi Purpose Register (MPR) Read INIT_MPR_RDEN: init_next_state = INIT_MPR_WAIT; //Wait tMRD, tMOD INIT_MPR_WAIT: if (cnt_cmd_done_r) begin init_next_state = INIT_MPR_READ; end // Issue back-to-back read commands to read from MPR with // Address bus 0x0000 for BL=8. DQ[0] will output the pre-defined // MPR pattern of 01010101 (Rise0 = 1'b0, Fall0 = 1'b1 ...) INIT_MPR_READ: if (mpr_rdlvl_done || mpr_rnk_done || rdlvl_prech_req) init_next_state = INIT_MPR_DISABLE_PREWAIT; INIT_MPR_DISABLE_PREWAIT: if (cnt_cmd_done_r) init_next_state = INIT_MPR_DISABLE; // Issue Load Mode Register 3 command with A[2]=0 to disable // MPR read INIT_MPR_DISABLE: init_next_state = INIT_MPR_DISABLE_WAIT; INIT_MPR_DISABLE_WAIT: init_next_state = INIT_PRECHARGE_PREWAIT; //*********************************************************************** // OCLKDELAY Calibration //*********************************************************************** // This calibration requires single write followed by single read to // determine the Phaser_Out stage 3 delay required to center write DQS // in write DQ valid window. // Single Row Activate command before issuing Write command INIT_OCLKDELAY_ACT: init_next_state = INIT_OCLKDELAY_ACT_WAIT; INIT_OCLKDELAY_ACT_WAIT: if (cnt_cmd_done_r && ~oclk_prech_req) init_next_state = INIT_OCLKDELAY_WRITE; else if (oclkdelay_calib_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; INIT_OCLKDELAY_WRITE: if (oclk_wr_cnt == 4'd1) init_next_state = INIT_OCLKDELAY_WRITE_WAIT; INIT_OCLKDELAY_WRITE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_OCLKDELAY_READ; INIT_OCLKDELAY_READ: init_next_state = INIT_OCLKDELAY_READ_WAIT; INIT_OCLKDELAY_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (oclk_calib_resume) init_next_state = INIT_OCLKDELAY_WRITE; else if (oclkdelay_calib_done || prech_req_posedge_r || wrlvl_final) init_next_state = INIT_PRECHARGE_PREWAIT; end //********************************************* // Write calibration //********************************************* // single row activate INIT_WRCAL_ACT: init_next_state = INIT_WRCAL_ACT_WAIT; // hang out for awhile before issuing subsequent column command INIT_WRCAL_ACT_WAIT: if (cnt_cmd_done_r && ~wrcal_prech_req) init_next_state = INIT_WRCAL_WRITE; else if (wrcal_done || prech_req_posedge_r) init_next_state = INIT_PRECHARGE_PREWAIT; // Write training pattern for write calibration INIT_WRCAL_WRITE: // Once we've issued enough commands for 8 words - proceed to reads //if (burst_addr_r == 1'b1) if (wrcal_wr_cnt == 4'd1) init_next_state = INIT_WRCAL_WRITE_READ; // Write-read turnaround INIT_WRCAL_WRITE_READ: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_WRCAL_READ; else if (dqsfound_retry) init_next_state = INIT_RDLVL_STG2_READ_WAIT; INIT_WRCAL_READ: if (burst_addr_r == 1'b1) init_next_state = INIT_WRCAL_READ_WAIT; INIT_WRCAL_READ_WAIT: if (~(phy_ctl_full || phy_cmd_full)) begin if (wrcal_resume_r) begin if (wrcal_final_chk) init_next_state = INIT_WRCAL_READ; else init_next_state = INIT_WRCAL_WRITE; end else if (wrcal_done || prech_req_posedge_r || wrcal_act_req || // Added to support PO fine delay inc when TG errors wrlvl_byte_redo || (temp_wrcal_done && ~temp_lmr_done)) init_next_state = INIT_PRECHARGE_PREWAIT; else if (dqsfound_retry) init_next_state = INIT_RDLVL_STG2_READ_WAIT; else if (wrcal_read_req && cnt_wrcal_rd) init_next_state = INIT_WRCAL_MULT_READS; end INIT_WRCAL_MULT_READS: // multiple read commands issued back-to-back if (wrcal_reads == 'b1) init_next_state = INIT_WRCAL_READ_WAIT; //********************************************* // Handling of precharge during and in between read-level stages //********************************************* // Make sure we aren't violating any timing specs by precharging // immediately INIT_PRECHARGE_PREWAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) init_next_state = INIT_PRECHARGE; // Initiate precharge INIT_PRECHARGE: init_next_state = INIT_PRECHARGE_WAIT; INIT_PRECHARGE_WAIT: if (cnt_cmd_done_r && ~(phy_ctl_full || phy_cmd_full)) begin if ((wrcal_sanity_chk_done && (DRAM_TYPE == "DDR3")) || (rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done && (DRAM_TYPE == "DDR2"))) init_next_state = INIT_DONE; else if ((wrcal_done || (WRLVL == "OFF")) && rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done && ((ddr3_lm_done_r) || (DRAM_TYPE == "DDR2"))) // If read leveling and phase detection calibration complete, // and programing the correct burst length then we're finished init_next_state = INIT_WRCAL_ACT; else if ((wrcal_done || (WRLVL == "OFF") || (~wrcal_done && temp_wrcal_done && ~temp_lmr_done)) && (rdlvl_stg1_done || (~wrcal_done && temp_wrcal_done && ~temp_lmr_done)) && prbs_rdlvl_done && rdlvl_stg1_done && pi_dqs_found_done) begin // after all calibration program the correct burst length init_next_state = INIT_LOAD_MR; // Added to support PO fine delay inc when TG errors end else if (~wrcal_done && temp_wrcal_done && temp_lmr_done) init_next_state = INIT_WRCAL_READ_WAIT; else if (rdlvl_stg1_done && pi_dqs_found_done && (WRLVL == "ON")) // If read leveling finished, proceed to write calibration init_next_state = INIT_REFRESH; else // Otherwise, open row for read-leveling purposes init_next_state = INIT_REFRESH; end //******************************************************* // Initialization/Calibration done. Take a long rest, relax //******************************************************* INIT_DONE: init_next_state = INIT_DONE; endcase end //***************************************************************** // Initialization done signal - asserted before leveling starts //***************************************************************** always @(posedge clk) if (rst) mem_init_done_r <= #TCQ 1'b0; else if ((!cnt_dllk_zqinit_done_r && (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT) && (chip_cnt_r == RANKS-1) && (DRAM_TYPE == "DDR3")) || ( (init_state_r == INIT_LOAD_MR_WAIT) && (ddr2_refresh_flag_r) && (chip_cnt_r == RANKS-1) && (cnt_init_mr_done_r) && (DRAM_TYPE == "DDR2"))) mem_init_done_r <= #TCQ 1'b1; //***************************************************************** // Write Calibration signal to PHY Control Block - asserted before // Write Leveling starts //***************************************************************** //generate //if (RANKS < 2) begin: ranks_one always @(posedge clk) begin if (rst || (done_dqs_tap_inc && (init_state_r == INIT_WRLVL_LOAD_MR2))) write_calib <= #TCQ 1'b0; else if (wrlvl_active_r1) write_calib <= #TCQ 1'b1; end //end else begin: ranks_two // always @(posedge clk) begin // if (rst || // ((init_state_r1 == INIT_WRLVL_LOAD_MR_WAIT) && // ((wrlvl_rank_done_r2 && (chip_cnt_r == RANKS-1)) || // (SIM_CAL_OPTION == "FAST_CAL")))) // write_calib <= #TCQ 1'b0; // else if (wrlvl_active_r1) // write_calib <= #TCQ 1'b1; // end //end //endgenerate //***************************************************************** // Read Calibration signal to PHY Control Block - asserted after // Write Leveling during PHASER_IN phase locking stage. // Must be de-asserted before Read Leveling //***************************************************************** always @(posedge clk) begin if (rst || pi_calib_done_r1) read_calib_int <= #TCQ 1'b0; else if (~pi_calib_done_r1 && (init_state_r == INIT_RDLVL_ACT_WAIT) && (cnt_cmd_r == CNTNEXT_CMD)) read_calib_int <= #TCQ 1'b1; end always @(posedge clk) read_calib_r <= #TCQ read_calib_int; always @(posedge clk) begin if (rst || pi_calib_done_r1) read_calib <= #TCQ 1'b0; else if (~pi_calib_done_r1 && (init_state_r == INIT_PI_PHASELOCK_READS)) read_calib <= #TCQ 1'b1; end always @(posedge clk) if (rst) pi_calib_done_r <= #TCQ 1'b0; else if (pi_calib_rank_done_r)// && (chip_cnt_r == RANKS-1)) pi_calib_done_r <= #TCQ 1'b1; always @(posedge clk) if (rst) pi_calib_rank_done_r <= #TCQ 1'b0; else if (pi_phase_locked_all_r3 && ~pi_phase_locked_all_r4) pi_calib_rank_done_r <= #TCQ 1'b1; else pi_calib_rank_done_r <= #TCQ 1'b0; always @(posedge clk) begin if (rst || ((PRE_REV3ES == "ON") && temp_wrcal_done && ~temp_wrcal_done_r)) pi_phaselock_timer <= #TCQ 'd0; else if (((init_state_r == INIT_PI_PHASELOCK_READS) && (pi_phaselock_timer != PHASELOCKED_TIMEOUT)) || tg_timer_go) pi_phaselock_timer <= #TCQ pi_phaselock_timer + 1; else pi_phaselock_timer <= #TCQ pi_phaselock_timer; end assign pi_phase_locked_err = (pi_phaselock_timer == PHASELOCKED_TIMEOUT) ? 1'b1 : 1'b0; //***************************************************************** // DDR3 final burst length programming done. For DDR3 during // calibration the burst length is fixed to BL8. After calibration // the correct burst length is programmed. //***************************************************************** always @(posedge clk) if (rst) ddr3_lm_done_r <= #TCQ 1'b0; else if ((init_state_r == INIT_LOAD_MR_WAIT) && (chip_cnt_r == RANKS-1) && wrcal_done) ddr3_lm_done_r <= #TCQ 1'b1; always @(posedge clk) begin pi_dqs_found_rank_done_r <= #TCQ pi_dqs_found_rank_done; pi_phase_locked_all_r1 <= #TCQ pi_phase_locked_all; pi_phase_locked_all_r2 <= #TCQ pi_phase_locked_all_r1; pi_phase_locked_all_r3 <= #TCQ pi_phase_locked_all_r2; pi_phase_locked_all_r4 <= #TCQ pi_phase_locked_all_r3; pi_dqs_found_all_r <= #TCQ pi_dqs_found_done; pi_calib_done_r1 <= #TCQ pi_calib_done_r; end //*************************************************************************** // Logic for deep memory (multi-rank) configurations //*************************************************************************** // For DDR3 asserted when generate if (RANKS < 2) begin: single_rank always @(posedge clk) chip_cnt_r <= #TCQ 2'b00; end else begin: dual_rank always @(posedge clk) if (rst || // Set chip_cnt_r to 2'b00 after both Ranks are read leveled (rdlvl_stg1_done && prbs_rdlvl_done && ~wrcal_done) || // Set chip_cnt_r to 2'b00 after both Ranks are write leveled (wrlvl_done_r && (init_state_r==INIT_WRLVL_LOAD_MR2_WAIT)))begin chip_cnt_r <= #TCQ 2'b00; end else if ((((init_state_r == INIT_WAIT_DLLK_ZQINIT) && (cnt_dllk_zqinit_r == TDLLK_TZQINIT_DELAY_CNT)) && (DRAM_TYPE == "DDR3")) || ((init_state_r==INIT_REFRESH_RNK2_WAIT) && (cnt_cmd_r=='d36)) || //mpr_rnk_done || //(rdlvl_stg1_rank_done && ~rdlvl_last_byte_done) || //(stg1_wr_done && (init_state_r == INIT_REFRESH) && //~(rnk_ref_cnt && rdlvl_last_byte_done)) || // Increment chip_cnt_r to issue Refresh to second rank (~pi_dqs_found_all_r && (init_state_r==INIT_PRECHARGE_PREWAIT) && (cnt_cmd_r=='d36)) || // Increment chip_cnt_r when DQSFOUND done for the Rank (pi_dqs_found_rank_done && ~pi_dqs_found_rank_done_r) || ((init_state_r == INIT_LOAD_MR_WAIT)&& cnt_cmd_done_r && wrcal_done) || ((init_state_r == INIT_DDR2_MULTI_RANK) && (DRAM_TYPE == "DDR2"))) begin if ((~mem_init_done_r || ~rdlvl_stg1_done || ~pi_dqs_found_done || // condition to increment chip_cnt during // final burst length programming for DDR3 ~pi_calib_done_r || wrcal_done) //~mpr_rdlvl_done || && (chip_cnt_r != RANKS-1)) chip_cnt_r <= #TCQ chip_cnt_r + 1; else chip_cnt_r <= #TCQ 2'b00; end end endgenerate generate if ((REG_CTRL == "ON") && (RANKS == 1)) begin: DDR3_RDIMM_1rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[0] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end end else if (RANKS == 1) begin: DDR3_1rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (n = 0; n < nCS_PER_RANK; n = n + 1) begin phy_int_cs_n[n] <= #TCQ 1'b0; end end else begin //odd CWL for (p = nCS_PER_RANK; p < 2*nCS_PER_RANK; p = p + 1) begin phy_int_cs_n[p] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end end else if ((REG_CTRL == "ON") && (RANKS == 2)) begin: DDR3_2rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; case (chip_cnt_r) 2'b00:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[0] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1*CS_WIDTH*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (n = 0; n < nCS_PER_RANK*nCK_PER_CLK*2; n = n + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[n+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end 2'b01:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) //even CWL phy_int_cs_n[1] <= #TCQ 1'b0; else // odd CWL phy_int_cs_n[1+1*CS_WIDTH*nCS_PER_RANK] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (p = nCS_PER_RANK; p < nCS_PER_RANK*nCK_PER_CLK*2; p = p + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[p+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end endcase end end end else if (RANKS == 2) begin: DDR3_2rank always @(posedge clk) begin if (rst) phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; else if (init_state_r == INIT_REG_WRITE) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if(!(CWL_M%2)) begin phy_int_cs_n[0%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[1%nCK_PER_CLK] <= #TCQ 1'b0; end else begin phy_int_cs_n[2%nCK_PER_CLK] <= #TCQ 1'b0; phy_int_cs_n[3%nCK_PER_CLK] <= #TCQ 1'b0; end end else begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; case (chip_cnt_r) 2'b00:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (n = 0; n < nCS_PER_RANK; n = n + 1) begin phy_int_cs_n[n] <= #TCQ 1'b0; end end else begin // odd CWL for (p = CS_WIDTH*nCS_PER_RANK; p < (CS_WIDTH*nCS_PER_RANK + nCS_PER_RANK); p = p + 1) begin phy_int_cs_n[p] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (n = 0; n < nCS_PER_RANK*nCK_PER_CLK*2; n = n + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[n+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end 2'b01:begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r)) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; if (!(CWL_M % 2)) begin //even CWL for (q = nCS_PER_RANK; q < (2 * nCS_PER_RANK); q = q + 1) begin phy_int_cs_n[q] <= #TCQ 1'b0; end end else begin // odd CWL for (m = (nCS_PER_RANK*CS_WIDTH + nCS_PER_RANK); m < (nCS_PER_RANK*CS_WIDTH + 2*nCS_PER_RANK); m = m + 1) begin phy_int_cs_n[m] <= #TCQ 1'b0; end end end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; //for (p = nCS_PER_RANK; p < nCS_PER_RANK*nCK_PER_CLK*2; p = p + (nCS_PER_RANK*2)) begin // // phy_int_cs_n[p+:nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; //end end endcase end end // always @ (posedge clk) end // commented out for now. Need it for DDR2 2T timing /* end else begin: DDR2 always @(posedge clk) if (rst) begin phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end else begin if (init_state_r == INIT_REG_WRITE) begin // All ranks selected simultaneously phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b0}}; end else if ((wrlvl_odt) || (init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH)) begin phy_int_cs_n[0] <= #TCQ 1'b0; end else phy_int_cs_n <= #TCQ {CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK{1'b1}}; end // else: !if(rst) end // block: DDR2 */ endgenerate assign phy_cs_n = phy_int_cs_n; //*************************************************************************** // Write/read burst logic for calibration //*************************************************************************** assign rdlvl_wr = (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE); assign rdlvl_rd = (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_MPR_READ) || (init_state_r == INIT_WRCAL_MULT_READS); assign rdlvl_wr_rd = rdlvl_wr | rdlvl_rd; //*************************************************************************** // Address generation and logic to count # of writes/reads issued during // certain stages of calibration //*************************************************************************** // Column address generation logic: // Keep track of the current column address - since all bursts are in // increments of 8 only during calibration, we need to keep track of // addresses [COL_WIDTH-1:3], lower order address bits will always = 0 always @(posedge clk) if (rst || wrcal_done) burst_addr_r <= #TCQ 1'b0; else if ((init_state_r == INIT_WRCAL_ACT_WAIT) || (init_state_r == INIT_OCLKDELAY_ACT_WAIT) || (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS) || (init_state_r == INIT_WRCAL_READ_WAIT)) burst_addr_r <= #TCQ 1'b1; else if (rdlvl_wr_rd && new_burst_r) burst_addr_r <= #TCQ ~burst_addr_r; else burst_addr_r <= #TCQ 1'b0; // Read Level Stage 1 requires writes to the entire row since // a PRBS pattern is being written. This counter keeps track // of the number of writes which depends on the column width // The (stg1_wr_rd_cnt==9'd0) condition was added so the col // address wraps around during stage1 reads always @(posedge clk) if (rst || ((init_state_r == INIT_RDLVL_STG1_WRITE_READ) && ~rdlvl_stg1_done)) stg1_wr_rd_cnt <= #TCQ NUM_STG1_WR_RD; else if (rdlvl_last_byte_done || (stg1_wr_rd_cnt == 9'd1) || (prbs_rdlvl_prech_req && (init_state_r == INIT_RDLVL_ACT_WAIT))) stg1_wr_rd_cnt <= #TCQ 'd128; else if (((init_state_r == INIT_RDLVL_STG1_WRITE) && new_burst_r && ~phy_data_full) ||((init_state_r == INIT_RDLVL_STG1_READ) && rdlvl_stg1_done)) stg1_wr_rd_cnt <= #TCQ stg1_wr_rd_cnt - 1; // OCLKDELAY calibration requires multiple writes because // write can be up to 2 cycles early since OCLKDELAY tap // can go down to 0 always @(posedge clk) if (rst || (init_state_r == INIT_OCLKDELAY_WRITE_WAIT) || (oclk_wr_cnt == 4'd0)) oclk_wr_cnt <= #TCQ NUM_STG1_WR_RD; else if ((init_state_r == INIT_OCLKDELAY_WRITE) && new_burst_r && ~phy_data_full) oclk_wr_cnt <= #TCQ oclk_wr_cnt - 1; // Write calibration requires multiple writes because // write can be up to 2 cycles early due to new write // leveling algorithm to avoid late writes always @(posedge clk) if (rst || (init_state_r == INIT_WRCAL_WRITE_READ) || (wrcal_wr_cnt == 4'd0)) wrcal_wr_cnt <= #TCQ NUM_STG1_WR_RD; else if ((init_state_r == INIT_WRCAL_WRITE) && new_burst_r && ~phy_data_full) wrcal_wr_cnt <= #TCQ wrcal_wr_cnt - 1; generate if(nCK_PER_CLK == 4) begin:back_to_back_reads_4_1 // 4 back-to-back reads with gaps for // read data_offset calibration (rdlvl stage 2) always @(posedge clk) if (rst || (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) num_reads <= #TCQ 3'b000; else if ((num_reads > 3'b000) && ~(phy_ctl_full || phy_cmd_full)) num_reads <= #TCQ num_reads - 1; else if ((init_state_r == INIT_RDLVL_STG2_READ) || phy_ctl_full || phy_cmd_full && new_burst_r) num_reads <= #TCQ 3'b011; end else if(nCK_PER_CLK == 2) begin: back_to_back_reads_2_1 // 4 back-to-back reads with gaps for // read data_offset calibration (rdlvl stage 2) always @(posedge clk) if (rst || (init_state_r == INIT_RDLVL_STG2_READ_WAIT)) num_reads <= #TCQ 3'b000; else if ((num_reads > 3'b000) && ~(phy_ctl_full || phy_cmd_full)) num_reads <= #TCQ num_reads - 1; else if ((init_state_r == INIT_RDLVL_STG2_READ) || phy_ctl_full || phy_cmd_full && new_burst_r) num_reads <= #TCQ 3'b111; end endgenerate // back-to-back reads during write calibration always @(posedge clk) if (rst ||(init_state_r == INIT_WRCAL_READ_WAIT)) wrcal_reads <= #TCQ 2'b00; else if ((wrcal_reads > 2'b00) && ~(phy_ctl_full || phy_cmd_full)) wrcal_reads <= #TCQ wrcal_reads - 1; else if ((init_state_r == INIT_WRCAL_MULT_READS) || phy_ctl_full || phy_cmd_full && new_burst_r) wrcal_reads <= #TCQ 'd255; // determine how often to issue row command during read leveling writes // and reads always @(posedge clk) if (rdlvl_wr_rd) begin // 2:1 mode - every other command issued is a data command // 4:1 mode - every command issued is a data command if (nCK_PER_CLK == 2) begin if (!phy_ctl_full) new_burst_r <= #TCQ ~new_burst_r; end else new_burst_r <= #TCQ 1'b1; end else new_burst_r <= #TCQ 1'b1; // indicate when a write is occurring. PHY_WRDATA_EN must be asserted // simultaneous with the corresponding command/address for CWL = 5,6 always @(posedge clk) begin rdlvl_wr_r <= #TCQ rdlvl_wr; calib_wrdata_en <= #TCQ phy_wrdata_en; end always @(posedge clk) begin if (rst || wrcal_done) extend_cal_pat <= #TCQ 1'b0; else if (temp_lmr_done && (PRE_REV3ES == "ON")) extend_cal_pat <= #TCQ 1'b1; end generate if ((nCK_PER_CLK == 4) || (BURST_MODE == "4")) begin: wrdqen_div4 // Write data enable asserted for one DIV4 clock cycle // Only BL8 supported with DIV4. DDR2 BL4 will use DIV2. always @(rst or phy_data_full or init_state_r) begin if (~phy_data_full && ((init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_WRCAL_WRITE))) phy_wrdata_en = 1'b1; else phy_wrdata_en = 1'b0; end end else begin: wrdqen_div2 // block: wrdqen_div4 always @(rdlvl_wr or phy_ctl_full or new_burst_r or phy_wrdata_en_r1 or phy_data_full) if((rdlvl_wr & ~phy_ctl_full & new_burst_r & ~phy_data_full) | phy_wrdata_en_r1) phy_wrdata_en = 1'b1; else phy_wrdata_en = 1'b0; always @(posedge clk) phy_wrdata_en_r1 <= #TCQ rdlvl_wr & ~phy_ctl_full & new_burst_r & ~phy_data_full; always @(posedge clk) begin if (!phy_wrdata_en & first_rdlvl_pat_r) wrdata_pat_cnt <= #TCQ 2'b00; else if (wrdata_pat_cnt == 2'b11) wrdata_pat_cnt <= #TCQ 2'b10; else wrdata_pat_cnt <= #TCQ wrdata_pat_cnt + 1; end always @(posedge clk) begin if (!phy_wrdata_en & first_wrcal_pat_r) wrcal_pat_cnt <= #TCQ 2'b00; else if (extend_cal_pat && (wrcal_pat_cnt == 2'b01)) wrcal_pat_cnt <= #TCQ 2'b00; else if (wrcal_pat_cnt == 2'b11) wrcal_pat_cnt <= #TCQ 2'b10; else wrcal_pat_cnt <= #TCQ wrcal_pat_cnt + 1; end end endgenerate // indicate when a write is occurring. PHY_RDDATA_EN must be asserted // simultaneous with the corresponding command/address. PHY_RDDATA_EN // is used during read-leveling to determine read latency assign phy_rddata_en = ~phy_if_empty; // Read data valid generation for MC and User Interface after calibration is // complete assign phy_rddata_valid = init_complete_r1_timing ? phy_rddata_en : 1'b0; //*************************************************************************** // Generate training data written at start of each read-leveling stage // For every stage of read leveling, 8 words are written into memory // The format is as follows (shown as {rise,fall}): // Stage 1: 0xF, 0x0, 0xF, 0x0, 0xF, 0x0, 0xF, 0x0 // Stage 2: 0xF, 0x0, 0xA, 0x5, 0x5, 0xA, 0x9, 0x6 //*************************************************************************** always @(posedge clk) if ((init_state_r == INIT_IDLE) || (init_state_r == INIT_RDLVL_STG1_WRITE)) cnt_init_data_r <= #TCQ 2'b00; else if (phy_wrdata_en) cnt_init_data_r <= #TCQ cnt_init_data_r + 1; else if (init_state_r == INIT_WRCAL_WRITE) cnt_init_data_r <= #TCQ 2'b10; // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling always @(posedge clk) if (rst || rdlvl_stg1_rank_done) first_rdlvl_pat_r <= #TCQ 1'b1; else if (phy_wrdata_en && (init_state_r == INIT_RDLVL_STG1_WRITE)) first_rdlvl_pat_r <= #TCQ 1'b0; always @(posedge clk) if (rst || wrcal_resume || (init_state_r == INIT_WRCAL_ACT_WAIT)) first_wrcal_pat_r <= #TCQ 1'b1; else if (phy_wrdata_en && (init_state_r == INIT_WRCAL_WRITE)) first_wrcal_pat_r <= #TCQ 1'b0; generate if ((CLK_PERIOD/nCK_PER_CLK > 2500) && (nCK_PER_CLK == 2)) begin: wrdq_div2_2to1_rdlvl_first always @(posedge clk) if (~oclkdelay_calib_done) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}}; else if (!rdlvl_stg1_done) begin // The 16 words for stage 1 write data in 2:1 mode is written // over 4 consecutive controller clock cycles. Note that write // data follows phy_wrdata_en by one clock cycle case (wrdata_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}}, {DQ_WIDTH/4{4'h9}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end endcase end else if (!prbs_rdlvl_done && ~phy_data_full) begin // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[4*8-1:3*8]}}, {DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}}, {DQ_WIDTH/8{prbs_o[8-1:0]}}}; end else if (!wrcal_done) begin case (wrcal_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h5}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}}, {DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h4}}}; end endcase end end else if ((CLK_PERIOD/nCK_PER_CLK > 2500) && (nCK_PER_CLK == 4)) begin: wrdq_div2_4to1_rdlvl_first always @(posedge clk) if (~oclkdelay_calib_done) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}}; else if (!rdlvl_stg1_done && ~phy_data_full) // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling if (first_rdlvl_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}},{DQ_WIDTH/4{4'h9}}}; else // For all others, change the first two words written in order // to differentiate the "early write" and "on-time write" // readback patterns during read leveling phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; else if (!prbs_rdlvl_done && ~phy_data_full) // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[8*8-1:7*8]}},{DQ_WIDTH/8{prbs_o[7*8-1:6*8]}}, {DQ_WIDTH/8{prbs_o[6*8-1:5*8]}},{DQ_WIDTH/8{prbs_o[5*8-1:4*8]}}, {DQ_WIDTH/8{prbs_o[4*8-1:3*8]}},{DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}},{DQ_WIDTH/8{prbs_o[8-1:0]}}}; else if (!wrcal_done) if (first_wrcal_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}},{DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (nCK_PER_CLK == 4) begin: wrdq_div1_4to1_wrcal_first always @(posedge clk) if ((~oclkdelay_calib_done) && (DRAM_TYPE == "DDR3")) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}},{DQ_WIDTH/4{4'h0}}}; else if ((!wrcal_done)&& (DRAM_TYPE == "DDR3")) begin if (extend_cal_pat) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else if (first_wrcal_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}},{DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}},{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'h5}},{DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}},{DQ_WIDTH/4{4'hF}}}; else phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}},{DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (!rdlvl_stg1_done && ~phy_data_full) begin // write different sequence for very // first write to memory only. Used to help us differentiate // if the writes are "early" or "on-time" during read leveling if (first_rdlvl_pat_r) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}},{DQ_WIDTH/4{4'h9}}}; else // For all others, change the first two words written in order // to differentiate the "early write" and "on-time write" // readback patterns during read leveling phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}},{DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}},{DQ_WIDTH/4{4'hC}}, {DQ_WIDTH/4{4'hE}},{DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}},{DQ_WIDTH/4{4'hB}}}; end else if (!prbs_rdlvl_done && ~phy_data_full) // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[8*8-1:7*8]}},{DQ_WIDTH/8{prbs_o[7*8-1:6*8]}}, {DQ_WIDTH/8{prbs_o[6*8-1:5*8]}},{DQ_WIDTH/8{prbs_o[5*8-1:4*8]}}, {DQ_WIDTH/8{prbs_o[4*8-1:3*8]}},{DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}},{DQ_WIDTH/8{prbs_o[8-1:0]}}}; end else begin: wrdq_div1_2to1_wrcal_first always @(posedge clk) if ((~oclkdelay_calib_done)&& (DRAM_TYPE == "DDR3")) phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}, {DQ_WIDTH/4{4'h0}}}; else if ((!wrcal_done) && (DRAM_TYPE == "DDR3"))begin case (wrcal_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h5}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h0}}, {DQ_WIDTH/4{4'hF}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h6}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hA}}, {DQ_WIDTH/4{4'h5}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h8}}, {DQ_WIDTH/4{4'hD}}, {DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h4}}}; end endcase end else if (!rdlvl_stg1_done) begin // The 16 words for stage 1 write data in 2:1 mode is written // over 4 consecutive controller clock cycles. Note that write // data follows phy_wrdata_en by one clock cycle case (wrdata_pat_cnt) 2'b00: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h3}}, {DQ_WIDTH/4{4'h9}}}; end 2'b01: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end 2'b10: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'hE}}, {DQ_WIDTH/4{4'h7}}, {DQ_WIDTH/4{4'h1}}, {DQ_WIDTH/4{4'hB}}}; end 2'b11: begin phy_wrdata <= #TCQ {{DQ_WIDTH/4{4'h4}}, {DQ_WIDTH/4{4'h2}}, {DQ_WIDTH/4{4'h9}}, {DQ_WIDTH/4{4'hC}}}; end endcase end else if (!prbs_rdlvl_done && ~phy_data_full) begin // prbs_o is 8-bits wide hence {DQ_WIDTH/8{prbs_o}} results in // prbs_o being concatenated 8 times resulting in DQ_WIDTH phy_wrdata <= #TCQ {{DQ_WIDTH/8{prbs_o[4*8-1:3*8]}}, {DQ_WIDTH/8{prbs_o[3*8-1:2*8]}}, {DQ_WIDTH/8{prbs_o[2*8-1:8]}}, {DQ_WIDTH/8{prbs_o[8-1:0]}}}; end end endgenerate //*************************************************************************** // Memory control/address //*************************************************************************** // Phases [2] and [3] are always deasserted for 4:1 mode generate if (nCK_PER_CLK == 4) begin: gen_div4_ca_tieoff always @(posedge clk) begin phy_ras_n[3:2] <= #TCQ 3'b11; phy_cas_n[3:2] <= #TCQ 3'b11; phy_we_n[3:2] <= #TCQ 3'b11; end end endgenerate // Assert RAS when: (1) Loading MRS, (2) Activating Row, (3) Precharging // (4) auto refresh generate if (!(CWL_M % 2)) begin: even_cwl always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH))begin phy_ras_n[0] <= #TCQ 1'b0; phy_ras_n[1] <= #TCQ 1'b1; end else begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b1; end end // Assert CAS when: (1) Loading MRS, (2) Issuing Read/Write command // (3) auto refresh always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r))begin phy_cas_n[0] <= #TCQ 1'b0; phy_cas_n[1] <= #TCQ 1'b1; end else begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b1; end end // Assert WE when: (1) Loading MRS, (2) Issuing Write command (only // occur during read leveling), (3) Issuing ZQ Long Calib command, // (4) Precharge always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE)|| (rdlvl_wr && new_burst_r))begin phy_we_n[0] <= #TCQ 1'b0; phy_we_n[1] <= #TCQ 1'b1; end else begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b1; end end end else begin: odd_cwl always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE) || (init_state_r == INIT_REFRESH))begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b0; end else begin phy_ras_n[0] <= #TCQ 1'b1; phy_ras_n[1] <= #TCQ 1'b1; end end // Assert CAS when: (1) Loading MRS, (2) Issuing Read/Write command // (3) auto refresh always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_REFRESH) || (rdlvl_wr_rd && new_burst_r))begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b0; end else begin phy_cas_n[0] <= #TCQ 1'b1; phy_cas_n[1] <= #TCQ 1'b1; end end // Assert WE when: (1) Loading MRS, (2) Issuing Write command (only // occur during read leveling), (3) Issuing ZQ Long Calib command, // (4) Precharge always @(posedge clk) begin if ((init_state_r == INIT_LOAD_MR) || (init_state_r == INIT_MPR_RDEN) || (init_state_r == INIT_MPR_DISABLE) || (init_state_r == INIT_REG_WRITE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_WRLVL_START) || (init_state_r == INIT_WRLVL_LOAD_MR) || (init_state_r == INIT_WRLVL_LOAD_MR2) || (init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_DDR2_PRECHARGE)|| (rdlvl_wr && new_burst_r))begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b0; end else begin phy_we_n[0] <= #TCQ 1'b1; phy_we_n[1] <= #TCQ 1'b1; end end end endgenerate // Assign calib_cmd for the command field in PHY_Ctl_Word always @(posedge clk) begin if (wr_level_dqs_asrt) begin // Request to toggle DQS during write leveling calib_cmd <= #TCQ 3'b001; if (CWL_M % 2) begin // odd write latency calib_data_offset_0 <= #TCQ CWL_M + 3; calib_data_offset_1 <= #TCQ CWL_M + 3; calib_data_offset_2 <= #TCQ CWL_M + 3; calib_cas_slot <= #TCQ 2'b01; end else begin // even write latency calib_data_offset_0 <= #TCQ CWL_M + 2; calib_data_offset_1 <= #TCQ CWL_M + 2; calib_data_offset_2 <= #TCQ CWL_M + 2; calib_cas_slot <= #TCQ 2'b00; end end else if (rdlvl_wr && new_burst_r) begin // Write Command calib_cmd <= #TCQ 3'b001; if (CWL_M % 2) begin // odd write latency calib_data_offset_0 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_data_offset_1 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_data_offset_2 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 3 : CWL_M - 1; calib_cas_slot <= #TCQ 2'b01; end else begin // even write latency calib_data_offset_0 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_data_offset_1 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_data_offset_2 <= #TCQ (nCK_PER_CLK == 4) ? CWL_M + 2 : CWL_M - 2 ; calib_cas_slot <= #TCQ 2'b00; end end else if (rdlvl_rd && new_burst_r) begin // Read Command calib_cmd <= #TCQ 3'b011; if (CWL_M % 2) calib_cas_slot <= #TCQ 2'b01; else calib_cas_slot <= #TCQ 2'b00; if (~pi_calib_done_r1) begin calib_data_offset_0 <= #TCQ 6'd0; calib_data_offset_1 <= #TCQ 6'd0; calib_data_offset_2 <= #TCQ 6'd0; end else if (~pi_dqs_found_done_r1) begin calib_data_offset_0 <= #TCQ rd_data_offset_0; calib_data_offset_1 <= #TCQ rd_data_offset_1; calib_data_offset_2 <= #TCQ rd_data_offset_2; end else begin calib_data_offset_0 <= #TCQ rd_data_offset_ranks_0[6*chip_cnt_r+:6]; calib_data_offset_1 <= #TCQ rd_data_offset_ranks_1[6*chip_cnt_r+:6]; calib_data_offset_2 <= #TCQ rd_data_offset_ranks_2[6*chip_cnt_r+:6]; end end else begin // Non-Data Commands like NOP, MRS, ZQ Long Cal, Precharge, // Active, Refresh calib_cmd <= #TCQ 3'b100; calib_data_offset_0 <= #TCQ 6'd0; calib_data_offset_1 <= #TCQ 6'd0; calib_data_offset_2 <= #TCQ 6'd0; if (CWL_M % 2) calib_cas_slot <= #TCQ 2'b01; else calib_cas_slot <= #TCQ 2'b00; end end // Write Enable to PHY_Control FIFO always asserted // No danger of this FIFO being Full with 4:1 sync clock ratio // This is also the write enable to the command OUT_FIFO always @(posedge clk) begin if (rst) begin calib_ctl_wren <= #TCQ 1'b0; calib_cmd_wren <= #TCQ 1'b0; calib_seq <= #TCQ 2'b00; end else if (cnt_pwron_cke_done_r && phy_ctl_ready && ~(phy_ctl_full || phy_cmd_full )) begin calib_ctl_wren <= #TCQ 1'b1; calib_cmd_wren <= #TCQ 1'b1; calib_seq <= #TCQ calib_seq + 1; end else begin calib_ctl_wren <= #TCQ 1'b0; calib_cmd_wren <= #TCQ 1'b0; calib_seq <= #TCQ calib_seq; end end generate genvar rnk_i; for (rnk_i = 0; rnk_i < 4; rnk_i = rnk_i + 1) begin: gen_rnk always @(posedge clk) begin if (rst) begin mr2_r[rnk_i] <= #TCQ 2'b00; mr1_r[rnk_i] <= #TCQ 3'b000; end else begin mr2_r[rnk_i] <= #TCQ tmp_mr2_r[rnk_i]; mr1_r[rnk_i] <= #TCQ tmp_mr1_r[rnk_i]; end end end endgenerate // ODT assignment based on slot config and slot present // For single slot systems slot_1_present input will be ignored // Assuming component interfaces to be single slot systems generate if (nSLOTS == 1) begin: gen_single_slot_odt always @(posedge clk) begin if (rst) begin tmp_mr2_r[1] <= #TCQ 2'b00; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; tmp_mr1_r[1] <= #TCQ 3'b000; tmp_mr1_r[2] <= #TCQ 3'b000; tmp_mr1_r[3] <= #TCQ 3'b000; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b1}}; phy_tmp_odt_r <= #TCQ 4'b0000; phy_tmp_odt_r1 <= #TCQ phy_tmp_odt_r; end else begin case ({slot_0_present[0],slot_0_present[1], slot_0_present[2],slot_0_present[3]}) // Single slot configuration with quad rank // Assuming same behavior as single slot dual rank for now // DDR2 does not have quad rank parts 4'b1111: begin if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 RTT_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 RTT_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end phy_tmp_odt_r <= #TCQ 4'b0001; // Chip Select assignments phy_tmp_cs1_r[((chip_cnt_r*nCS_PER_RANK) ) +: nCS_PER_RANK] <= #TCQ 'b0; end // Single slot configuration with single rank 4'b1000: begin phy_tmp_odt_r <= #TCQ 4'b0001; if ((REG_CTRL == "ON") && (nCS_PER_RANK > 1)) begin phy_tmp_cs1_r[chip_cnt_r] <= #TCQ 1'b0; end else begin phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b0}}; end if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && ((cnt_init_mr_r == 2'd0) || (USE_ODT_PORT == 1)))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 RTT_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 RTT_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Single slot configuration with dual rank 4'b1100: begin phy_tmp_odt_r <= #TCQ 4'b0001; // Chip Select assignments phy_tmp_cs1_r[((chip_cnt_r*nCS_PER_RANK) ) +: nCS_PER_RANK] <= #TCQ 'b0; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end default: begin phy_tmp_odt_r <= #TCQ 4'b0001; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end endcase end end end else if (nSLOTS == 2) begin: gen_dual_slot_odt always @ (posedge clk) begin if (rst) begin tmp_mr2_r[1] <= #TCQ 2'b00; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; tmp_mr1_r[1] <= #TCQ 3'b000; tmp_mr1_r[2] <= #TCQ 3'b000; tmp_mr1_r[3] <= #TCQ 3'b000; phy_tmp_odt_r <= #TCQ 4'b0000; phy_tmp_cs1_r <= #TCQ {CS_WIDTH*nCS_PER_RANK{1'b1}}; phy_tmp_odt_r1 <= #TCQ phy_tmp_odt_r; end else begin case ({slot_0_present[0],slot_0_present[1], slot_1_present[0],slot_1_present[1]}) // Two slot configuration, one slot present, single rank 4'b10_00: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end phy_tmp_cs1_r <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end 4'b00_10: begin //Rank1 ODT enabled if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end phy_tmp_cs1_r <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank1 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank1 Rtt_NOM defaults to 120 ohms tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Two slot configuration, one slot present, dual rank 4'b00_11: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end 4'b11_00: begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // odt turned on only during write phy_tmp_odt_r <= #TCQ 4'b0001; end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank1 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end end // Two slot configuration, one rank per slot 4'b10_10: begin if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r == 2'b00)begin phy_tmp_odt_r <= #TCQ 4'b0010; //bit0 for rank0 end else begin phy_tmp_odt_r <= #TCQ 4'b0001; //bit0 for rank0 end end else begin if(init_state_r == INIT_WRLVL_WAIT) phy_tmp_odt_r <= #TCQ 4'b0011; // rank 0/1 odt0 else if((init_next_state == INIT_RDLVL_STG1_WRITE) || (init_next_state == INIT_WRCAL_WRITE) || (init_next_state == INIT_OCLKDELAY_WRITE)) phy_tmp_odt_r <= #TCQ 4'b0011; // bit0 for rank0/1 (write) else if ((init_next_state == INIT_PI_PHASELOCK_READS) || (init_next_state == INIT_MPR_READ) || (init_next_state == INIT_RDLVL_STG1_READ) || (init_next_state == INIT_RDLVL_STG2_READ) || (init_next_state == INIT_OCLKDELAY_READ) || (init_next_state == INIT_WRCAL_READ) || (init_next_state == INIT_WRCAL_MULT_READS)) phy_tmp_odt_r <= #TCQ 4'b0010; // bit0 for rank1 (rank 0 rd) end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_WR == "60") ? 3'b001 : (RTT_WR == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; end end // Two Slots - One slot with dual rank and other with single rank 4'b10_11: begin //Rank3 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[1] <= #TCQ 3'b000; end //Slot1 Rank1 or Rank3 is being written if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r == 2'b00)begin phy_tmp_odt_r <= #TCQ 4'b0010; end else begin phy_tmp_odt_r <= #TCQ 4'b0001; end end else begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0011; //Slot0 Rank0 is being written end else begin phy_tmp_odt_r <= #TCQ 4'b0101; // ODT for ranks 0 and 2 aserted end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))begin if (chip_cnt_r == 2'b00) begin phy_tmp_odt_r <= #TCQ 4'b0100; end else begin phy_tmp_odt_r <= #TCQ 4'b0001; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end // Two Slots - One slot with dual rank and other with single rank 4'b11_10: begin //Rank2 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM2 == "60") ? 3'b001 : (RTT_NOM2 == "120") ? 3'b010 : (RTT_NOM2 == "20") ? 3'b100 : (RTT_NOM2 == "30") ? 3'b101 : (RTT_NOM2 == "40") ? 3'b011: 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011: 3'b000; //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r[1] == 1'b1)begin phy_tmp_odt_r <= #TCQ 4'b0001; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; // rank 2 ODT asserted end end else begin if (// wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin if (chip_cnt_r[1] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0110; end else begin phy_tmp_odt_r <= #TCQ 4'b0101; end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS)) begin if (chip_cnt_r[1] == 1'b1) begin phy_tmp_odt_r[(1*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ 4'b0010; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end // Two Slots - two ranks per slot 4'b11_11: begin //Rank2 Rtt_NOM tmp_mr1_r[2] <= #TCQ (RTT_NOM2 == "60") ? 3'b001 : (RTT_NOM2 == "120") ? 3'b010 : (RTT_NOM2 == "20") ? 3'b100 : (RTT_NOM2 == "30") ? 3'b101 : (RTT_NOM2 == "40") ? 3'b011 : 3'b000; //Rank3 Rtt_NOM tmp_mr1_r[3] <= #TCQ (RTT_NOM3 == "60") ? 3'b001 : (RTT_NOM3 == "120") ? 3'b010 : (RTT_NOM3 == "20") ? 3'b100 : (RTT_NOM3 == "30") ? 3'b101 : (RTT_NOM3 == "40") ? 3'b011 : 3'b000; tmp_mr2_r[2] <= #TCQ 2'b00; tmp_mr2_r[3] <= #TCQ 2'b00; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done && (wrlvl_rank_cntr==3'd0))) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; end else begin //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM after write leveling completes tmp_mr1_r[1] <= #TCQ 3'b000; //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM after write leveling completes tmp_mr1_r[0] <= #TCQ 3'b000; end if(DRAM_TYPE == "DDR2")begin if(chip_cnt_r[1] == 1'b1)begin phy_tmp_odt_r <= #TCQ 4'b0001; end else begin phy_tmp_odt_r <= #TCQ 4'b0100; end end else begin if (//wrlvl_odt || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin //Slot1 Rank1 or Rank3 is being written if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0110; //Slot0 Rank0 or Rank2 is being written end else begin phy_tmp_odt_r <= #TCQ 4'b1001; end end else if ((init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))begin //Slot1 Rank1 or Rank3 is being read if (chip_cnt_r[0] == 1'b1) begin phy_tmp_odt_r <= #TCQ 4'b0100; //Slot0 Rank0 or Rank2 is being read end else begin phy_tmp_odt_r <= #TCQ 4'b1000; end end end // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; end default: begin phy_tmp_odt_r <= #TCQ 4'b1111; // Chip Select assignments phy_tmp_cs1_r[(chip_cnt_r*nCS_PER_RANK) +: nCS_PER_RANK] <= #TCQ {nCS_PER_RANK{1'b0}}; if ((RTT_WR == "OFF") || ((WRLVL=="ON") && ~wrlvl_done)) begin //Rank0 Dynamic ODT disabled tmp_mr2_r[0] <= #TCQ 2'b00; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : 3'b000; //Rank1 Dynamic ODT disabled tmp_mr2_r[1] <= #TCQ 2'b00; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "40") ? 3'b011 : (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "60") ? 3'b010 : 3'b000; end else begin //Rank0 Dynamic ODT defaults to 120 ohms tmp_mr2_r[0] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank0 Rtt_NOM tmp_mr1_r[0] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; //Rank1 Dynamic ODT defaults to 120 ohms tmp_mr2_r[1] <= #TCQ (RTT_WR == "60") ? 2'b01 : 2'b10; //Rank1 Rtt_NOM tmp_mr1_r[1] <= #TCQ (RTT_NOM_int == "60") ? 3'b001 : (RTT_NOM_int == "120") ? 3'b010 : (RTT_NOM_int == "20") ? 3'b100 : (RTT_NOM_int == "30") ? 3'b101 : (RTT_NOM_int == "40") ? 3'b011 : 3'b000; end end endcase end end end endgenerate // PHY only supports two ranks. // calib_aux_out[0] is CKE for rank 0 and calib_aux_out[1] is ODT for rank 0 // calib_aux_out[2] is CKE for rank 1 and calib_aux_out[3] is ODT for rank 1 generate if(CKE_ODT_AUX == "FALSE") begin if ((nSLOTS == 1) && (RANKS < 2)) begin always @(posedge clk) if (rst) begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))/* || wrlvl_rank_done || wrlvl_rank_done_r1 || (wrlvl_done && !wrlvl_done_r)*/) && (DRAM_TYPE == "DDR3")) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt ) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_OCLKDELAY_WRITE)|| (init_state_r == INIT_OCLKDELAY_WRITE_WAIT))) begin // Quad rank in a single slot calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end end end else if ((nSLOTS == 1) && (RANKS <= 2)) begin always @(posedge clk) if (rst) begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))/* || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)*/) && (DRAM_TYPE == "DDR3")) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt)|| (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_WRITE_READ) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_WRITE_READ) || (init_state_r == INIT_OCLKDELAY_WRITE)|| (init_state_r == INIT_OCLKDELAY_WRITE_WAIT))) begin // Dual rank in a single slot calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end end end else if ((nSLOTS == 2) && (RANKS == 2)) begin always @(posedge clk) if (rst)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}} ; calib_odt <= 2'b00 ; end else begin if (cnt_pwron_cke_done_r /*&& ~cnt_pwron_cke_done_r1*/)begin calib_cke <= #TCQ {nCK_PER_CLK{1'b1}}; end else begin calib_cke <= #TCQ {nCK_PER_CLK{1'b0}}; end if (((DRAM_TYPE == "DDR2") && (RTT_NOM == "DISABLED")) || ((DRAM_TYPE == "DDR3") && (RTT_NOM == "DISABLED") && (RTT_WR == "OFF"))) begin calib_odt[0] <= #TCQ 1'b0; calib_odt[1] <= #TCQ 1'b0; end else if (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE)) begin // Quad rank in a single slot if (nCK_PER_CLK == 2) begin calib_odt[0] <= #TCQ (!calib_odt[0]) ? phy_tmp_odt_r[0] : 1'b0; calib_odt[1] <= #TCQ (!calib_odt[1]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end // Turn on for idle rank during read if dynamic ODT is enabled in DDR3 end else if(((DRAM_TYPE == "DDR3") && (RTT_WR != "OFF")) && ((init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_MPR_READ) || (init_state_r == INIT_RDLVL_STG1_READ) || (init_state_r == INIT_RDLVL_STG2_READ) || (init_state_r == INIT_OCLKDELAY_READ) || (init_state_r == INIT_WRCAL_READ) || (init_state_r == INIT_WRCAL_MULT_READS))) begin if (nCK_PER_CLK == 2) begin calib_odt[0] <= #TCQ (!calib_odt[0]) ? phy_tmp_odt_r[0] : 1'b0; calib_odt[1] <= #TCQ (!calib_odt[1]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_odt[0] <= #TCQ phy_tmp_odt_r[0]; calib_odt[1] <= #TCQ phy_tmp_odt_r[1]; end // disable well before next command and before disabling write leveling end else if(cnt_cmd_done_m7_r || (init_state_r == INIT_WRLVL_WAIT && ~wrlvl_odt)) calib_odt <= #TCQ 2'b00; end end end else begin//USE AUX OUTPUT for routing CKE and ODT. if ((nSLOTS == 1) && (RANKS < 2)) begin always @(posedge clk) if (rst) begin calib_aux_out <= #TCQ 4'b0000; end else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done || wrlvl_rank_done_r1 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Quad rank in a single slot calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end else if ((nSLOTS == 1) && (RANKS <= 2)) begin always @(posedge clk) if (rst) begin calib_aux_out <= #TCQ 4'b0000; end else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Dual rank in a single slot calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end else if ((nSLOTS == 2) && (RANKS == 2)) begin always @(posedge clk) if (rst) calib_aux_out <= #TCQ 4'b0000; else begin if (cnt_pwron_cke_done_r && ~cnt_pwron_cke_done_r1)begin calib_aux_out[0] <= #TCQ 1'b1; calib_aux_out[2] <= #TCQ 1'b1; end else begin calib_aux_out[0] <= #TCQ 1'b0; calib_aux_out[2] <= #TCQ 1'b0; end if ((((RTT_NOM == "DISABLED") && (RTT_WR == "OFF")) || wrlvl_rank_done_r2 || (wrlvl_done && !wrlvl_done_r)) && (DRAM_TYPE == "DDR3")) begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end else if (((DRAM_TYPE == "DDR3") ||((RTT_NOM != "DISABLED") && (DRAM_TYPE == "DDR2"))) && (((init_state_r == INIT_WRLVL_WAIT) && wrlvl_odt) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_OCLKDELAY_WRITE))) begin // Quad rank in a single slot if (nCK_PER_CLK == 2) begin calib_aux_out[1] <= #TCQ (!calib_aux_out[1]) ? phy_tmp_odt_r[0] : 1'b0; calib_aux_out[3] <= #TCQ (!calib_aux_out[3]) ? phy_tmp_odt_r[1] : 1'b0; end else begin calib_aux_out[1] <= #TCQ phy_tmp_odt_r[0]; calib_aux_out[3] <= #TCQ phy_tmp_odt_r[1]; end end else begin calib_aux_out[1] <= #TCQ 1'b0; calib_aux_out[3] <= #TCQ 1'b0; end end end end endgenerate //***************************************************************** // memory address during init //***************************************************************** always @(posedge clk) phy_data_full_r <= #TCQ phy_data_full; always @(burst_addr_r or cnt_init_mr_r or chip_cnt_r or wrcal_wr_cnt or ddr2_refresh_flag_r or init_state_r or load_mr0 or phy_data_full_r or load_mr1 or load_mr2 or load_mr3 or new_burst_r or phy_address or mr1_r[0][0] or mr1_r[0][1] or mr1_r[0][2] or mr1_r[1][0] or mr1_r[1][1] or mr1_r[1][2] or mr1_r[2][0] or mr1_r[2][1] or mr1_r[2][2] or mr1_r[3][0] or mr1_r[3][1] or mr1_r[3][2] or mr2_r[chip_cnt_r] or reg_ctrl_cnt_r or stg1_wr_rd_cnt or oclk_wr_cnt or rdlvl_stg1_done or prbs_rdlvl_done or pi_dqs_found_done or rdlvl_wr_rd)begin // Bus 0 for address/bank never used address_w = 'b0; bank_w = 'b0; if ((init_state_r == INIT_PRECHARGE) || (init_state_r == INIT_ZQCL) || (init_state_r == INIT_DDR2_PRECHARGE)) begin // Set A10=1 for ZQ long calibration or Precharge All address_w = 'b0; address_w[10] = 1'b1; bank_w = 'b0; end else if (init_state_r == INIT_WRLVL_START) begin // Enable wrlvl in MR1 bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; address_w[7] = 1'b1; end else if (init_state_r == INIT_WRLVL_LOAD_MR) begin // Finished with write leveling, disable wrlvl in MR1 // For single rank disable Rtt_Nom bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; end else if (init_state_r == INIT_WRLVL_LOAD_MR2) begin // Set RTT_WR in MR2 after write leveling disabled bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; address_w[10:9] = mr2_r[chip_cnt_r]; end else if (init_state_r == INIT_MPR_READ) begin address_w = 'b0; bank_w = 'b0; end else if (init_state_r == INIT_MPR_RDEN) begin // Enable MPR read with LMR3 and A2=1 bank_w[BANK_WIDTH-1:0] = 'd3; address_w = {ROW_WIDTH{1'b0}}; address_w[2] = 1'b1; end else if (init_state_r == INIT_MPR_DISABLE) begin // Disable MPR read with LMR3 and A2=0 bank_w[BANK_WIDTH-1:0] = 'd3; address_w = {ROW_WIDTH{1'b0}}; end else if ((init_state_r == INIT_REG_WRITE)& (DRAM_TYPE == "DDR3"))begin // bank_w is assigned a 3 bit value. In some // DDR2 cases there will be only two bank bits. //Qualifying the condition with DDR3 bank_w = 'b0; address_w = 'b0; case (reg_ctrl_cnt_r) REG_RC0[2:0]: address_w[4:0] = REG_RC0[4:0]; REG_RC1[2:0]:begin address_w[4:0] = REG_RC1[4:0]; bank_w = REG_RC1[7:5]; end REG_RC2[2:0]: address_w[4:0] = REG_RC2[4:0]; REG_RC3[2:0]: address_w[4:0] = REG_RC3[4:0]; REG_RC4[2:0]: address_w[4:0] = REG_RC4[4:0]; REG_RC5[2:0]: address_w[4:0] = REG_RC5[4:0]; endcase end else if (init_state_r == INIT_LOAD_MR) begin // If loading mode register, look at cnt_init_mr to determine // which MR is currently being programmed address_w = 'b0; bank_w = 'b0; if(DRAM_TYPE == "DDR3")begin if(rdlvl_stg1_done && prbs_rdlvl_done && pi_dqs_found_done)begin // end of the calibration programming correct // burst length if (TEST_AL == "0") begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //Don't reset DLL end else begin // programming correct AL value bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; if (TEST_AL == "CL-1") address_w[4:3]= 2'b01; // AL="CL-1" else address_w[4:3]= 2'b10; // AL="CL-2" end end else begin case (cnt_init_mr_r) INIT_CNT_MR2: begin bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; address_w[10:9] = mr2_r[chip_cnt_r]; end INIT_CNT_MR3: begin bank_w[1:0] = 2'b11; address_w = load_mr3[ROW_WIDTH-1:0]; end INIT_CNT_MR1: begin bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; address_w[2] = mr1_r[chip_cnt_r][0]; address_w[6] = mr1_r[chip_cnt_r][1]; address_w[9] = mr1_r[chip_cnt_r][2]; end INIT_CNT_MR0: begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; // fixing it to BL8 for calibration address_w[1:0] = 2'b00; end default: begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end endcase end end else begin // DDR2 case (cnt_init_mr_r) INIT_CNT_MR2: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b10; address_w = load_mr2[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //MRS command without resetting DLL end end INIT_CNT_MR3: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b11; address_w = load_mr3[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; address_w[8]= 1'b0; //MRS command without resetting DLL. Repeted again // because there is an extra state. end end INIT_CNT_MR1: begin bank_w[1:0] = 2'b01; if(~ddr2_refresh_flag_r)begin address_w = load_mr1[ROW_WIDTH-1:0]; end else begin // second set of lm commands address_w = load_mr1[ROW_WIDTH-1:0]; address_w[9:7] = 3'b111; //OCD default state end end INIT_CNT_MR0: begin if(~ddr2_refresh_flag_r)begin bank_w[1:0] = 2'b00; address_w = load_mr0[ROW_WIDTH-1:0]; end else begin // second set of lm commands bank_w[1:0] = 2'b01; address_w = load_mr1[ROW_WIDTH-1:0]; if((chip_cnt_r == 2'd1) || (chip_cnt_r == 2'd3))begin // always disable odt for rank 1 and rank 3 as per SPEC address_w[2] = 'b0; address_w[6] = 'b0; end //OCD exit end end default: begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end endcase end end else if ((init_state_r == INIT_PI_PHASELOCK_READS) || (init_state_r == INIT_RDLVL_STG1_WRITE) || (init_state_r == INIT_RDLVL_STG1_READ)) begin // Writing and reading PRBS pattern for read leveling stage 1 // Need to support burst length 4 or 8. PRBS pattern will be // written to entire row and read back from the same row repeatedly bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (((stg1_wr_rd_cnt == NUM_STG1_WR_RD) && ~rdlvl_stg1_done) || (stg1_wr_rd_cnt == 'd128)) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((stg1_wr_rd_cnt >= 9'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_OCLKDELAY_WRITE) || (init_state_r == INIT_OCLKDELAY_READ)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (oclk_wr_cnt == NUM_STG1_WR_RD) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((oclk_wr_cnt >= 4'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_WRCAL_WRITE) || (init_state_r == INIT_WRCAL_READ)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; if (wrcal_wr_cnt == NUM_STG1_WR_RD) address_w[COL_WIDTH-1:0] = {COL_WIDTH{1'b0}}; else if (phy_data_full_r || (!new_burst_r)) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0]; else if ((wrcal_wr_cnt >= 4'd0) && new_burst_r && ~phy_data_full_r) address_w[COL_WIDTH-1:0] = phy_address[COL_WIDTH-1:0] + ADDR_INC; end else if ((init_state_r == INIT_WRCAL_MULT_READS) || (init_state_r == INIT_RDLVL_STG2_READ)) begin // when writing or reading back training pattern for read leveling stage2 // need to support burst length of 4 or 8. This may mean issuing // multiple commands to cover the entire range of addresses accessed // during read leveling. // Hard coding A[12] to 1 so that it will always be burst length of 8 // for DDR3. Does not have any effect on DDR2. bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w[ROW_WIDTH-1:COL_WIDTH] = {ROW_WIDTH-COL_WIDTH{1'b0}}; address_w[COL_WIDTH-1:0] = {CALIB_COL_ADD[COL_WIDTH-1:3],burst_addr_r, 3'b000}; address_w[12] = 1'b1; end else if ((init_state_r == INIT_RDLVL_ACT) || (init_state_r == INIT_WRCAL_ACT) || (init_state_r == INIT_OCLKDELAY_ACT)) begin bank_w = CALIB_BA_ADD[BANK_WIDTH-1:0]; address_w = CALIB_ROW_ADD[ROW_WIDTH-1:0]; end else begin bank_w = {BANK_WIDTH{1'bx}}; address_w = {ROW_WIDTH{1'bx}}; end end // registring before sending out generate genvar r,s; if ((DRAM_TYPE != "DDR3") || (CA_MIRROR != "ON")) begin: gen_no_mirror for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: div_clk_loop always @(posedge clk) begin phy_address[(r*ROW_WIDTH) +: ROW_WIDTH] <= #TCQ address_w; phy_bank[(r*BANK_WIDTH) +: BANK_WIDTH] <= #TCQ bank_w; end end end else begin: gen_mirror // Control/addressing mirroring (optional for DDR3 dual rank DIMMs) // Mirror for the 2nd rank only. Logic needs to be enhanced to account // for multiple slots, currently only supports one slot, 2-rank config for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: gen_ba_div_clk_loop for (s = 0; s < BANK_WIDTH; s = s + 1) begin: gen_ba always @(posedge clk) if (chip_cnt_r == 2'b00) begin phy_bank[(r*BANK_WIDTH) + s] <= #TCQ bank_w[s]; end else begin phy_bank[(r*BANK_WIDTH) + s] <= #TCQ bank_w[(s == 0) ? 1 : ((s == 1) ? 0 : s)]; end end end for (r = 0; r < nCK_PER_CLK; r = r + 1) begin: gen_addr_div_clk_loop for (s = 0; s < ROW_WIDTH; s = s + 1) begin: gen_addr always @(posedge clk) if (chip_cnt_r == 2'b00) begin phy_address[(r*ROW_WIDTH) + s] <= #TCQ address_w[s]; end else begin phy_address[(r*ROW_WIDTH) + s] <= #TCQ address_w[ (s == 3) ? 4 : ((s == 4) ? 3 : ((s == 5) ? 6 : ((s == 6) ? 5 : ((s == 7) ? 8 : ((s == 8) ? 7 : s)))))]; end end end end endgenerate endmodule
////////////////////////////////////////////////////////////////////////////// // // Xilinx, Inc. 2008 www.xilinx.com // ////////////////////////////////////////////////////////////////////////////// // // File name : encode.v // // Description : TMDS encoder // // Date - revision : Jan. 2008 - v 1.0 // // Author : Bob Feng // // Disclaimer: LIMITED WARRANTY AND DISCLAMER. These designs are // provided to you "as is". Xilinx and its licensors make and you // receive no warranties or conditions, express, implied, // statutory or otherwise, and Xilinx specifically disclaims any // implied warranties of merchantability, non-infringement,or // fitness for a particular purpose. Xilinx does not warrant that // the functions contained in these designs will meet your // requirements, or that the operation of these designs will be // uninterrupted or error free, or that defects in the Designs // will be corrected. Furthermore, Xilinx does not warrantor // make any representations regarding use or the results of the // use of the designs in terms of correctness, accuracy, // reliability, or otherwise. // // LIMITATION OF LIABILITY. In no event will Xilinx or its // licensors be liable for any loss of data, lost profits,cost // or procurement of substitute goods or services, or for any // special, incidental, consequential, or indirect damages // arising from the use or operation of the designs or // accompanying documentation, however caused and on any theory // of liability. This limitation will apply even if Xilinx // has been advised of the possibility of such damage. This // limitation shall apply not-withstanding the failure of the // essential purpose of any limited remedies herein. // // Copyright © 2006 Xilinx, Inc. // All rights reserved // ////////////////////////////////////////////////////////////////////////////// `timescale 1 ps / 1ps module encode ( input clkin, // pixel clock input input rstin, // async. reset input (active high) input [7:0] din, // data inputs: expect registered input c0, // c0 input input c1, // c1 input input de, // de input output reg [9:0] dout // data outputs ); //////////////////////////////////////////////////////////// // Counting number of 1s and 0s for each incoming pixel // component. Pipe line the result. // Register Data Input so it matches the pipe lined adder // output //////////////////////////////////////////////////////////// reg [3:0] n1d; //number of 1s in din reg [7:0] din_q; always @ (posedge clkin) begin n1d <=#1 din[0] + din[1] + din[2] + din[3] + din[4] + din[5] + din[6] + din[7]; din_q <=#1 din; end /////////////////////////////////////////////////////// // Stage 1: 8 bit -> 9 bit // Refer to DVI 1.0 Specification, page 29, Figure 3-5 /////////////////////////////////////////////////////// wire decision1; assign decision1 = (n1d > 4'h4) | ((n1d == 4'h4) & (din_q[0] == 1'b0)); /* reg [8:0] q_m; always @ (posedge clkin) begin q_m[0] <=#1 din_q[0]; q_m[1] <=#1 (decision1) ? (q_m[0] ^~ din_q[1]) : (q_m[0] ^ din_q[1]); q_m[2] <=#1 (decision1) ? (q_m[1] ^~ din_q[2]) : (q_m[1] ^ din_q[2]); q_m[3] <=#1 (decision1) ? (q_m[2] ^~ din_q[3]) : (q_m[2] ^ din_q[3]); q_m[4] <=#1 (decision1) ? (q_m[3] ^~ din_q[4]) : (q_m[3] ^ din_q[4]); q_m[5] <=#1 (decision1) ? (q_m[4] ^~ din_q[5]) : (q_m[4] ^ din_q[5]); q_m[6] <=#1 (decision1) ? (q_m[5] ^~ din_q[6]) : (q_m[5] ^ din_q[6]); q_m[7] <=#1 (decision1) ? (q_m[6] ^~ din_q[7]) : (q_m[6] ^ din_q[7]); q_m[8] <=#1 (decision1) ? 1'b0 : 1'b1; end */ wire [8:0] q_m; assign q_m[0] = din_q[0]; assign q_m[1] = (decision1) ? (q_m[0] ^~ din_q[1]) : (q_m[0] ^ din_q[1]); assign q_m[2] = (decision1) ? (q_m[1] ^~ din_q[2]) : (q_m[1] ^ din_q[2]); assign q_m[3] = (decision1) ? (q_m[2] ^~ din_q[3]) : (q_m[2] ^ din_q[3]); assign q_m[4] = (decision1) ? (q_m[3] ^~ din_q[4]) : (q_m[3] ^ din_q[4]); assign q_m[5] = (decision1) ? (q_m[4] ^~ din_q[5]) : (q_m[4] ^ din_q[5]); assign q_m[6] = (decision1) ? (q_m[5] ^~ din_q[6]) : (q_m[5] ^ din_q[6]); assign q_m[7] = (decision1) ? (q_m[6] ^~ din_q[7]) : (q_m[6] ^ din_q[7]); assign q_m[8] = (decision1) ? 1'b0 : 1'b1; ///////////////////////////////////////////////////////// // Stage 2: 9 bit -> 10 bit // Refer to DVI 1.0 Specification, page 29, Figure 3-5 ///////////////////////////////////////////////////////// reg [3:0] n1q_m, n0q_m; // number of 1s and 0s for q_m always @ (posedge clkin) begin n1q_m <=#1 q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] + q_m[5] + q_m[6] + q_m[7]; n0q_m <=#1 4'h8 - (q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] + q_m[5] + q_m[6] + q_m[7]); end parameter CTRLTOKEN0 = 10'b1101010100; parameter CTRLTOKEN1 = 10'b0010101011; parameter CTRLTOKEN2 = 10'b0101010100; parameter CTRLTOKEN3 = 10'b1010101011; reg [4:0] cnt; //disparity counter, MSB is the sign bit wire decision2, decision3; assign decision2 = (cnt == 5'h0) | (n1q_m == n0q_m); ///////////////////////////////////////////////////////////////////////// // [(cnt > 0) and (N1q_m > N0q_m)] or [(cnt < 0) and (N0q_m > N1q_m)] ///////////////////////////////////////////////////////////////////////// assign decision3 = (~cnt[4] & (n1q_m > n0q_m)) | (cnt[4] & (n0q_m > n1q_m)); //////////////////////////////////// // pipe line alignment //////////////////////////////////// reg de_q, de_reg; reg c0_q, c1_q; reg c0_reg, c1_reg; reg [8:0] q_m_reg; always @ (posedge clkin) begin de_q <=#1 de; de_reg <=#1 de_q; c0_q <=#1 c0; c0_reg <=#1 c0_q; c1_q <=#1 c1; c1_reg <=#1 c1_q; q_m_reg <=#1 q_m; end /////////////////////////////// // 10-bit out // disparity counter /////////////////////////////// always @ (posedge clkin or posedge rstin) begin if(rstin) begin dout <= 10'h0; cnt <= 5'h0; end else begin if (de_reg) begin if(decision2) begin dout[9] <=#1 ~q_m_reg[8]; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 (q_m_reg[8]) ? q_m_reg[7:0] : ~q_m_reg[7:0]; cnt <=#1 (~q_m_reg[8]) ? (cnt + n0q_m - n1q_m) : (cnt + n1q_m - n0q_m); end else begin if(decision3) begin dout[9] <=#1 1'b1; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 ~q_m_reg[7:0]; cnt <=#1 cnt + {q_m_reg[8], 1'b0} + (n0q_m - n1q_m); end else begin dout[9] <=#1 1'b0; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 q_m_reg[7:0]; cnt <=#1 cnt - {~q_m_reg[8], 1'b0} + (n1q_m - n0q_m); end end end else begin case ({c1_reg, c0_reg}) 2'b00: dout <=#1 CTRLTOKEN0; 2'b01: dout <=#1 CTRLTOKEN1; 2'b10: dout <=#1 CTRLTOKEN2; default: dout <=#1 CTRLTOKEN3; endcase cnt <=#1 5'h0; end end end endmodule
////////////////////////////////////////////////////////////////////////////// // // Xilinx, Inc. 2008 www.xilinx.com // ////////////////////////////////////////////////////////////////////////////// // // File name : encode.v // // Description : TMDS encoder // // Date - revision : Jan. 2008 - v 1.0 // // Author : Bob Feng // // Disclaimer: LIMITED WARRANTY AND DISCLAMER. These designs are // provided to you "as is". Xilinx and its licensors make and you // receive no warranties or conditions, express, implied, // statutory or otherwise, and Xilinx specifically disclaims any // implied warranties of merchantability, non-infringement,or // fitness for a particular purpose. Xilinx does not warrant that // the functions contained in these designs will meet your // requirements, or that the operation of these designs will be // uninterrupted or error free, or that defects in the Designs // will be corrected. Furthermore, Xilinx does not warrantor // make any representations regarding use or the results of the // use of the designs in terms of correctness, accuracy, // reliability, or otherwise. // // LIMITATION OF LIABILITY. In no event will Xilinx or its // licensors be liable for any loss of data, lost profits,cost // or procurement of substitute goods or services, or for any // special, incidental, consequential, or indirect damages // arising from the use or operation of the designs or // accompanying documentation, however caused and on any theory // of liability. This limitation will apply even if Xilinx // has been advised of the possibility of such damage. This // limitation shall apply not-withstanding the failure of the // essential purpose of any limited remedies herein. // // Copyright © 2006 Xilinx, Inc. // All rights reserved // ////////////////////////////////////////////////////////////////////////////// `timescale 1 ps / 1ps module encode ( input clkin, // pixel clock input input rstin, // async. reset input (active high) input [7:0] din, // data inputs: expect registered input c0, // c0 input input c1, // c1 input input de, // de input output reg [9:0] dout // data outputs ); //////////////////////////////////////////////////////////// // Counting number of 1s and 0s for each incoming pixel // component. Pipe line the result. // Register Data Input so it matches the pipe lined adder // output //////////////////////////////////////////////////////////// reg [3:0] n1d; //number of 1s in din reg [7:0] din_q; always @ (posedge clkin) begin n1d <=#1 din[0] + din[1] + din[2] + din[3] + din[4] + din[5] + din[6] + din[7]; din_q <=#1 din; end /////////////////////////////////////////////////////// // Stage 1: 8 bit -> 9 bit // Refer to DVI 1.0 Specification, page 29, Figure 3-5 /////////////////////////////////////////////////////// wire decision1; assign decision1 = (n1d > 4'h4) | ((n1d == 4'h4) & (din_q[0] == 1'b0)); /* reg [8:0] q_m; always @ (posedge clkin) begin q_m[0] <=#1 din_q[0]; q_m[1] <=#1 (decision1) ? (q_m[0] ^~ din_q[1]) : (q_m[0] ^ din_q[1]); q_m[2] <=#1 (decision1) ? (q_m[1] ^~ din_q[2]) : (q_m[1] ^ din_q[2]); q_m[3] <=#1 (decision1) ? (q_m[2] ^~ din_q[3]) : (q_m[2] ^ din_q[3]); q_m[4] <=#1 (decision1) ? (q_m[3] ^~ din_q[4]) : (q_m[3] ^ din_q[4]); q_m[5] <=#1 (decision1) ? (q_m[4] ^~ din_q[5]) : (q_m[4] ^ din_q[5]); q_m[6] <=#1 (decision1) ? (q_m[5] ^~ din_q[6]) : (q_m[5] ^ din_q[6]); q_m[7] <=#1 (decision1) ? (q_m[6] ^~ din_q[7]) : (q_m[6] ^ din_q[7]); q_m[8] <=#1 (decision1) ? 1'b0 : 1'b1; end */ wire [8:0] q_m; assign q_m[0] = din_q[0]; assign q_m[1] = (decision1) ? (q_m[0] ^~ din_q[1]) : (q_m[0] ^ din_q[1]); assign q_m[2] = (decision1) ? (q_m[1] ^~ din_q[2]) : (q_m[1] ^ din_q[2]); assign q_m[3] = (decision1) ? (q_m[2] ^~ din_q[3]) : (q_m[2] ^ din_q[3]); assign q_m[4] = (decision1) ? (q_m[3] ^~ din_q[4]) : (q_m[3] ^ din_q[4]); assign q_m[5] = (decision1) ? (q_m[4] ^~ din_q[5]) : (q_m[4] ^ din_q[5]); assign q_m[6] = (decision1) ? (q_m[5] ^~ din_q[6]) : (q_m[5] ^ din_q[6]); assign q_m[7] = (decision1) ? (q_m[6] ^~ din_q[7]) : (q_m[6] ^ din_q[7]); assign q_m[8] = (decision1) ? 1'b0 : 1'b1; ///////////////////////////////////////////////////////// // Stage 2: 9 bit -> 10 bit // Refer to DVI 1.0 Specification, page 29, Figure 3-5 ///////////////////////////////////////////////////////// reg [3:0] n1q_m, n0q_m; // number of 1s and 0s for q_m always @ (posedge clkin) begin n1q_m <=#1 q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] + q_m[5] + q_m[6] + q_m[7]; n0q_m <=#1 4'h8 - (q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] + q_m[5] + q_m[6] + q_m[7]); end parameter CTRLTOKEN0 = 10'b1101010100; parameter CTRLTOKEN1 = 10'b0010101011; parameter CTRLTOKEN2 = 10'b0101010100; parameter CTRLTOKEN3 = 10'b1010101011; reg [4:0] cnt; //disparity counter, MSB is the sign bit wire decision2, decision3; assign decision2 = (cnt == 5'h0) | (n1q_m == n0q_m); ///////////////////////////////////////////////////////////////////////// // [(cnt > 0) and (N1q_m > N0q_m)] or [(cnt < 0) and (N0q_m > N1q_m)] ///////////////////////////////////////////////////////////////////////// assign decision3 = (~cnt[4] & (n1q_m > n0q_m)) | (cnt[4] & (n0q_m > n1q_m)); //////////////////////////////////// // pipe line alignment //////////////////////////////////// reg de_q, de_reg; reg c0_q, c1_q; reg c0_reg, c1_reg; reg [8:0] q_m_reg; always @ (posedge clkin) begin de_q <=#1 de; de_reg <=#1 de_q; c0_q <=#1 c0; c0_reg <=#1 c0_q; c1_q <=#1 c1; c1_reg <=#1 c1_q; q_m_reg <=#1 q_m; end /////////////////////////////// // 10-bit out // disparity counter /////////////////////////////// always @ (posedge clkin or posedge rstin) begin if(rstin) begin dout <= 10'h0; cnt <= 5'h0; end else begin if (de_reg) begin if(decision2) begin dout[9] <=#1 ~q_m_reg[8]; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 (q_m_reg[8]) ? q_m_reg[7:0] : ~q_m_reg[7:0]; cnt <=#1 (~q_m_reg[8]) ? (cnt + n0q_m - n1q_m) : (cnt + n1q_m - n0q_m); end else begin if(decision3) begin dout[9] <=#1 1'b1; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 ~q_m_reg[7:0]; cnt <=#1 cnt + {q_m_reg[8], 1'b0} + (n0q_m - n1q_m); end else begin dout[9] <=#1 1'b0; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 q_m_reg[7:0]; cnt <=#1 cnt - {~q_m_reg[8], 1'b0} + (n1q_m - n0q_m); end end end else begin case ({c1_reg, c0_reg}) 2'b00: dout <=#1 CTRLTOKEN0; 2'b01: dout <=#1 CTRLTOKEN1; 2'b10: dout <=#1 CTRLTOKEN2; default: dout <=#1 CTRLTOKEN3; endcase cnt <=#1 5'h0; end end end endmodule
////////////////////////////////////////////////////////////////////////////// // // Xilinx, Inc. 2008 www.xilinx.com // ////////////////////////////////////////////////////////////////////////////// // // File name : encode.v // // Description : TMDS encoder // // Date - revision : Jan. 2008 - v 1.0 // // Author : Bob Feng // // Disclaimer: LIMITED WARRANTY AND DISCLAMER. These designs are // provided to you "as is". Xilinx and its licensors make and you // receive no warranties or conditions, express, implied, // statutory or otherwise, and Xilinx specifically disclaims any // implied warranties of merchantability, non-infringement,or // fitness for a particular purpose. Xilinx does not warrant that // the functions contained in these designs will meet your // requirements, or that the operation of these designs will be // uninterrupted or error free, or that defects in the Designs // will be corrected. Furthermore, Xilinx does not warrantor // make any representations regarding use or the results of the // use of the designs in terms of correctness, accuracy, // reliability, or otherwise. // // LIMITATION OF LIABILITY. In no event will Xilinx or its // licensors be liable for any loss of data, lost profits,cost // or procurement of substitute goods or services, or for any // special, incidental, consequential, or indirect damages // arising from the use or operation of the designs or // accompanying documentation, however caused and on any theory // of liability. This limitation will apply even if Xilinx // has been advised of the possibility of such damage. This // limitation shall apply not-withstanding the failure of the // essential purpose of any limited remedies herein. // // Copyright © 2006 Xilinx, Inc. // All rights reserved // ////////////////////////////////////////////////////////////////////////////// `timescale 1 ps / 1ps module encode ( input clkin, // pixel clock input input rstin, // async. reset input (active high) input [7:0] din, // data inputs: expect registered input c0, // c0 input input c1, // c1 input input de, // de input output reg [9:0] dout // data outputs ); //////////////////////////////////////////////////////////// // Counting number of 1s and 0s for each incoming pixel // component. Pipe line the result. // Register Data Input so it matches the pipe lined adder // output //////////////////////////////////////////////////////////// reg [3:0] n1d; //number of 1s in din reg [7:0] din_q; always @ (posedge clkin) begin n1d <=#1 din[0] + din[1] + din[2] + din[3] + din[4] + din[5] + din[6] + din[7]; din_q <=#1 din; end /////////////////////////////////////////////////////// // Stage 1: 8 bit -> 9 bit // Refer to DVI 1.0 Specification, page 29, Figure 3-5 /////////////////////////////////////////////////////// wire decision1; assign decision1 = (n1d > 4'h4) | ((n1d == 4'h4) & (din_q[0] == 1'b0)); /* reg [8:0] q_m; always @ (posedge clkin) begin q_m[0] <=#1 din_q[0]; q_m[1] <=#1 (decision1) ? (q_m[0] ^~ din_q[1]) : (q_m[0] ^ din_q[1]); q_m[2] <=#1 (decision1) ? (q_m[1] ^~ din_q[2]) : (q_m[1] ^ din_q[2]); q_m[3] <=#1 (decision1) ? (q_m[2] ^~ din_q[3]) : (q_m[2] ^ din_q[3]); q_m[4] <=#1 (decision1) ? (q_m[3] ^~ din_q[4]) : (q_m[3] ^ din_q[4]); q_m[5] <=#1 (decision1) ? (q_m[4] ^~ din_q[5]) : (q_m[4] ^ din_q[5]); q_m[6] <=#1 (decision1) ? (q_m[5] ^~ din_q[6]) : (q_m[5] ^ din_q[6]); q_m[7] <=#1 (decision1) ? (q_m[6] ^~ din_q[7]) : (q_m[6] ^ din_q[7]); q_m[8] <=#1 (decision1) ? 1'b0 : 1'b1; end */ wire [8:0] q_m; assign q_m[0] = din_q[0]; assign q_m[1] = (decision1) ? (q_m[0] ^~ din_q[1]) : (q_m[0] ^ din_q[1]); assign q_m[2] = (decision1) ? (q_m[1] ^~ din_q[2]) : (q_m[1] ^ din_q[2]); assign q_m[3] = (decision1) ? (q_m[2] ^~ din_q[3]) : (q_m[2] ^ din_q[3]); assign q_m[4] = (decision1) ? (q_m[3] ^~ din_q[4]) : (q_m[3] ^ din_q[4]); assign q_m[5] = (decision1) ? (q_m[4] ^~ din_q[5]) : (q_m[4] ^ din_q[5]); assign q_m[6] = (decision1) ? (q_m[5] ^~ din_q[6]) : (q_m[5] ^ din_q[6]); assign q_m[7] = (decision1) ? (q_m[6] ^~ din_q[7]) : (q_m[6] ^ din_q[7]); assign q_m[8] = (decision1) ? 1'b0 : 1'b1; ///////////////////////////////////////////////////////// // Stage 2: 9 bit -> 10 bit // Refer to DVI 1.0 Specification, page 29, Figure 3-5 ///////////////////////////////////////////////////////// reg [3:0] n1q_m, n0q_m; // number of 1s and 0s for q_m always @ (posedge clkin) begin n1q_m <=#1 q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] + q_m[5] + q_m[6] + q_m[7]; n0q_m <=#1 4'h8 - (q_m[0] + q_m[1] + q_m[2] + q_m[3] + q_m[4] + q_m[5] + q_m[6] + q_m[7]); end parameter CTRLTOKEN0 = 10'b1101010100; parameter CTRLTOKEN1 = 10'b0010101011; parameter CTRLTOKEN2 = 10'b0101010100; parameter CTRLTOKEN3 = 10'b1010101011; reg [4:0] cnt; //disparity counter, MSB is the sign bit wire decision2, decision3; assign decision2 = (cnt == 5'h0) | (n1q_m == n0q_m); ///////////////////////////////////////////////////////////////////////// // [(cnt > 0) and (N1q_m > N0q_m)] or [(cnt < 0) and (N0q_m > N1q_m)] ///////////////////////////////////////////////////////////////////////// assign decision3 = (~cnt[4] & (n1q_m > n0q_m)) | (cnt[4] & (n0q_m > n1q_m)); //////////////////////////////////// // pipe line alignment //////////////////////////////////// reg de_q, de_reg; reg c0_q, c1_q; reg c0_reg, c1_reg; reg [8:0] q_m_reg; always @ (posedge clkin) begin de_q <=#1 de; de_reg <=#1 de_q; c0_q <=#1 c0; c0_reg <=#1 c0_q; c1_q <=#1 c1; c1_reg <=#1 c1_q; q_m_reg <=#1 q_m; end /////////////////////////////// // 10-bit out // disparity counter /////////////////////////////// always @ (posedge clkin or posedge rstin) begin if(rstin) begin dout <= 10'h0; cnt <= 5'h0; end else begin if (de_reg) begin if(decision2) begin dout[9] <=#1 ~q_m_reg[8]; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 (q_m_reg[8]) ? q_m_reg[7:0] : ~q_m_reg[7:0]; cnt <=#1 (~q_m_reg[8]) ? (cnt + n0q_m - n1q_m) : (cnt + n1q_m - n0q_m); end else begin if(decision3) begin dout[9] <=#1 1'b1; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 ~q_m_reg[7:0]; cnt <=#1 cnt + {q_m_reg[8], 1'b0} + (n0q_m - n1q_m); end else begin dout[9] <=#1 1'b0; dout[8] <=#1 q_m_reg[8]; dout[7:0] <=#1 q_m_reg[7:0]; cnt <=#1 cnt - {~q_m_reg[8], 1'b0} + (n1q_m - n0q_m); end end end else begin case ({c1_reg, c0_reg}) 2'b00: dout <=#1 CTRLTOKEN0; 2'b01: dout <=#1 CTRLTOKEN1; 2'b10: dout <=#1 CTRLTOKEN2; default: dout <=#1 CTRLTOKEN3; endcase cnt <=#1 5'h0; end end end endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : arb_row_col.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** // This block receives request to send row and column commands. These requests // come the individual bank machines. The arbitration winner is selected // and driven back to the bank machines. // // The CS enables are generated. For 2:1 mode, row commands are sent // in the "0" phase, and column commands are sent in the "1" phase. // // In 2T mode, a further arbitration is performed between the row // and column commands. The winner of this arbitration inhibits // arbitration by the loser. The winner is allowed to arbitrate, the loser is // blocked until the next state. The winning address command // is repeated on both the "0" and the "1" phases and the CS // is asserted for just the "1" phase. `timescale 1 ps / 1 ps module mig_7series_v1_9_arb_row_col # ( parameter TCQ = 100, parameter ADDR_CMD_MODE = "1T", parameter CWL = 5, parameter EARLY_WR_DATA_ADDR = "OFF", parameter nBANK_MACHS = 4, parameter nCK_PER_CLK = 2, parameter nRAS = 37500, // ACT->PRE cmd period (CKs) parameter nRCD = 12500, // ACT->R/W delay (CKs) parameter nWR = 6 // Write recovery (CKs) ) (/*AUTOARG*/ // Outputs grant_row_r, grant_pre_r, sent_row, sending_row, sending_pre, grant_config_r, rnk_config_strobe, rnk_config_valid_r, grant_col_r, sending_col, sent_col, sent_col_r, grant_col_wr, send_cmd0_row, send_cmd0_col, send_cmd1_row, send_cmd1_col, send_cmd2_row, send_cmd2_col, send_cmd2_pre, send_cmd3_col, col_channel_offset, cs_en0, cs_en1, cs_en2, cs_en3, insert_maint_r1, rnk_config_kill_rts_col, // Inputs clk, rst, rts_row, rts_pre, insert_maint_r, rts_col, rtc, col_rdy_wr ); // Create a delay when switching ranks localparam RNK2RNK_DLY = 12; localparam RNK2RNK_DLY_CLKS = (RNK2RNK_DLY / nCK_PER_CLK) + (RNK2RNK_DLY % nCK_PER_CLK ? 1 : 0); input clk; input rst; input [nBANK_MACHS-1:0] rts_row; input insert_maint_r; input [nBANK_MACHS-1:0] rts_col; reg [RNK2RNK_DLY_CLKS-1:0] rnk_config_strobe_r; wire block_grant_row; wire block_grant_col; wire rnk_config_kill_rts_col_lcl = RNK2RNK_DLY_CLKS > 0 ? |rnk_config_strobe_r : 1'b0; output rnk_config_kill_rts_col; assign rnk_config_kill_rts_col = rnk_config_kill_rts_col_lcl; wire [nBANK_MACHS-1:0] col_request; wire granted_col_ns = |col_request; wire [nBANK_MACHS-1:0] row_request = rts_row & {nBANK_MACHS{~insert_maint_r}}; wire granted_row_ns = |row_request; generate if (ADDR_CMD_MODE == "2T" && nCK_PER_CLK != 4) begin : row_col_2T_arb assign col_request = rts_col & {nBANK_MACHS{~(rnk_config_kill_rts_col_lcl || insert_maint_r)}}; // Give column command priority whenever previous state has no row request. wire [1:0] row_col_grant; wire [1:0] current_master = ~granted_row_ns ? 2'b10 : row_col_grant; wire upd_last_master = ~granted_row_ns || |row_col_grant; mig_7series_v1_9_round_robin_arb # (.WIDTH (2)) row_col_arb0 (.grant_ns (), .grant_r (row_col_grant), .upd_last_master (upd_last_master), .current_master (current_master), .clk (clk), .rst (rst), .req ({granted_row_ns, granted_col_ns}), .disable_grant (1'b0)); assign {block_grant_col, block_grant_row} = row_col_grant; end else begin : row_col_1T_arb assign col_request = rts_col & {nBANK_MACHS{~rnk_config_kill_rts_col_lcl}}; assign block_grant_row = 1'b0; assign block_grant_col = 1'b0; end endgenerate // Row address/command arbitration. wire[nBANK_MACHS-1:0] grant_row_r_lcl; output wire[nBANK_MACHS-1:0] grant_row_r; assign grant_row_r = grant_row_r_lcl; reg granted_row_r; always @(posedge clk) granted_row_r <= #TCQ granted_row_ns; wire sent_row_lcl = granted_row_r && ~block_grant_row; output wire sent_row; assign sent_row = sent_row_lcl; mig_7series_v1_9_round_robin_arb # (.WIDTH (nBANK_MACHS)) row_arb0 (.grant_ns (), .grant_r (grant_row_r_lcl[nBANK_MACHS-1:0]), .upd_last_master (sent_row_lcl), .current_master (grant_row_r_lcl[nBANK_MACHS-1:0]), .clk (clk), .rst (rst), .req (row_request), .disable_grant (1'b0)); output wire [nBANK_MACHS-1:0] sending_row; assign sending_row = grant_row_r_lcl & {nBANK_MACHS{~block_grant_row}}; // Precharge arbitration for 4:1 mode input [nBANK_MACHS-1:0] rts_pre; output wire[nBANK_MACHS-1:0] grant_pre_r; output wire [nBANK_MACHS-1:0] sending_pre; wire sent_pre_lcl; generate if((nCK_PER_CLK == 4) && (ADDR_CMD_MODE != "2T")) begin : pre_4_1_1T_arb reg granted_pre_r; wire[nBANK_MACHS-1:0] grant_pre_r_lcl; wire granted_pre_ns = |rts_pre; assign grant_pre_r = grant_pre_r_lcl; always @(posedge clk) granted_pre_r <= #TCQ granted_pre_ns; assign sent_pre_lcl = granted_pre_r; assign sending_pre = grant_pre_r_lcl; mig_7series_v1_9_round_robin_arb # (.WIDTH (nBANK_MACHS)) pre_arb0 (.grant_ns (), .grant_r (grant_pre_r_lcl[nBANK_MACHS-1:0]), .upd_last_master (sent_pre_lcl), .current_master (grant_pre_r_lcl[nBANK_MACHS-1:0]), .clk (clk), .rst (rst), .req (rts_pre), .disable_grant (1'b0)); end endgenerate `ifdef MC_SVA all_bank_machines_row_arb: cover property (@(posedge clk) (~rst && &rts_row)); `endif // Rank config arbitration. input [nBANK_MACHS-1:0] rtc; wire [nBANK_MACHS-1:0] grant_config_r_lcl; output wire [nBANK_MACHS-1:0] grant_config_r; assign grant_config_r = grant_config_r_lcl; wire upd_rnk_config_last_master; mig_7series_v1_9_round_robin_arb # (.WIDTH (nBANK_MACHS)) config_arb0 (.grant_ns (), .grant_r (grant_config_r_lcl[nBANK_MACHS-1:0]), .upd_last_master (upd_rnk_config_last_master), .current_master (grant_config_r_lcl[nBANK_MACHS-1:0]), .clk (clk), .rst (rst), .req (rtc[nBANK_MACHS-1:0]), .disable_grant (1'b0)); `ifdef MC_SVA all_bank_machines_config_arb: cover property (@(posedge clk) (~rst && &rtc)); `endif wire rnk_config_strobe_ns = ~rnk_config_strobe_r[0] && |rtc && ~granted_col_ns; always @(posedge clk) rnk_config_strobe_r[0] <= #TCQ rnk_config_strobe_ns; genvar i; generate for(i = 1; i < RNK2RNK_DLY_CLKS; i = i + 1) always @(posedge clk) rnk_config_strobe_r[i] <= #TCQ rnk_config_strobe_r[i-1]; endgenerate output wire rnk_config_strobe; assign rnk_config_strobe = rnk_config_strobe_r[0]; assign upd_rnk_config_last_master = rnk_config_strobe_r[0]; // Generate rnk_config_valid. reg rnk_config_valid_r_lcl; wire rnk_config_valid_ns; assign rnk_config_valid_ns = ~rst && (rnk_config_valid_r_lcl || rnk_config_strobe_ns); always @(posedge clk) rnk_config_valid_r_lcl <= #TCQ rnk_config_valid_ns; output wire rnk_config_valid_r; assign rnk_config_valid_r = rnk_config_valid_r_lcl; // Column address/command arbitration. wire [nBANK_MACHS-1:0] grant_col_r_lcl; output wire [nBANK_MACHS-1:0] grant_col_r; assign grant_col_r = grant_col_r_lcl; reg granted_col_r; always @(posedge clk) granted_col_r <= #TCQ granted_col_ns; wire sent_col_lcl; mig_7series_v1_9_round_robin_arb # (.WIDTH (nBANK_MACHS)) col_arb0 (.grant_ns (), .grant_r (grant_col_r_lcl[nBANK_MACHS-1:0]), .upd_last_master (sent_col_lcl), .current_master (grant_col_r_lcl[nBANK_MACHS-1:0]), .clk (clk), .rst (rst), .req (col_request), .disable_grant (1'b0)); `ifdef MC_SVA all_bank_machines_col_arb: cover property (@(posedge clk) (~rst && &rts_col)); `endif output wire [nBANK_MACHS-1:0] sending_col; assign sending_col = grant_col_r_lcl & {nBANK_MACHS{~block_grant_col}}; assign sent_col_lcl = granted_col_r && ~block_grant_col; reg sent_col_lcl_r = 1'b0; always @(posedge clk) sent_col_lcl_r <= #TCQ sent_col_lcl; output wire sent_col; assign sent_col = sent_col_lcl; output wire sent_col_r; assign sent_col_r = sent_col_lcl_r; // If we need early wr_data_addr because ECC is on, arbitrate // to see which bank machine might sent the next wr_data_addr; input [nBANK_MACHS-1:0] col_rdy_wr; output wire [nBANK_MACHS-1:0] grant_col_wr; generate if (EARLY_WR_DATA_ADDR == "OFF") begin : early_wr_addr_arb_off assign grant_col_wr = {nBANK_MACHS{1'b0}}; end else begin : early_wr_addr_arb_on wire [nBANK_MACHS-1:0] grant_col_wr_raw; mig_7series_v1_9_round_robin_arb # (.WIDTH (nBANK_MACHS)) col_arb0 (.grant_ns (grant_col_wr_raw), .grant_r (), .upd_last_master (sent_col_lcl), .current_master (grant_col_r_lcl[nBANK_MACHS-1:0]), .clk (clk), .rst (rst), .req (col_rdy_wr), .disable_grant (1'b0)); reg [nBANK_MACHS-1:0] grant_col_wr_r; wire [nBANK_MACHS-1:0] grant_col_wr_ns = granted_col_ns ? grant_col_wr_raw : grant_col_wr_r; always @(posedge clk) grant_col_wr_r <= #TCQ grant_col_wr_ns; assign grant_col_wr = grant_col_wr_ns; end // block: early_wr_addr_arb_on endgenerate output reg send_cmd0_row = 1'b0; output reg send_cmd0_col = 1'b0; output reg send_cmd1_row = 1'b0; output reg send_cmd1_col = 1'b0; output reg send_cmd2_row = 1'b0; output reg send_cmd2_col = 1'b0; output reg send_cmd2_pre = 1'b0; output reg send_cmd3_col = 1'b0; output reg cs_en0 = 1'b0; output reg cs_en1 = 1'b0; output reg cs_en2 = 1'b0; output reg cs_en3 = 1'b0; output wire [5:0] col_channel_offset; reg insert_maint_r1_lcl; always @(posedge clk) insert_maint_r1_lcl <= #TCQ insert_maint_r; output wire insert_maint_r1; assign insert_maint_r1 = insert_maint_r1_lcl; wire sent_row_or_maint = sent_row_lcl || insert_maint_r1_lcl; reg sent_row_or_maint_r = 1'b0; always @(posedge clk) sent_row_or_maint_r <= #TCQ sent_row_or_maint; generate case ({(nCK_PER_CLK == 4), (nCK_PER_CLK == 2), (ADDR_CMD_MODE == "2T")}) 3'b000 : begin : one_one_not2T end 3'b001 : begin : one_one_2T end 3'b010 : begin : two_one_not2T if(!(CWL % 2)) begin // Place column commands on slot 0 for even CWL always @(sent_col_lcl) begin cs_en0 = sent_col_lcl; send_cmd0_col = sent_col_lcl; end always @(sent_row_or_maint) begin cs_en1 = sent_row_or_maint; send_cmd1_row = sent_row_or_maint; end assign col_channel_offset = 0; end else begin // Place column commands on slot 1 for odd CWL always @(sent_row_or_maint) begin cs_en0 = sent_row_or_maint; send_cmd0_row = sent_row_or_maint; end always @(sent_col_lcl) begin cs_en1 = sent_col_lcl; send_cmd1_col = sent_col_lcl; end assign col_channel_offset = 1; end end 3'b011 : begin : two_one_2T if(!(CWL % 2)) begin // Place column commands on slot 1->0 for even CWL always @(sent_row_or_maint_r or sent_col_lcl_r) cs_en0 = sent_row_or_maint_r || sent_col_lcl_r; always @(sent_row_or_maint or sent_row_or_maint_r) begin send_cmd0_row = sent_row_or_maint_r; send_cmd1_row = sent_row_or_maint; end always @(sent_col_lcl or sent_col_lcl_r) begin send_cmd0_col = sent_col_lcl_r; send_cmd1_col = sent_col_lcl; end assign col_channel_offset = 0; end else begin // Place column commands on slot 0->1 for odd CWL always @(sent_col_lcl or sent_row_or_maint) cs_en1 = sent_row_or_maint || sent_col_lcl; always @(sent_row_or_maint) begin send_cmd0_row = sent_row_or_maint; send_cmd1_row = sent_row_or_maint; end always @(sent_col_lcl) begin send_cmd0_col = sent_col_lcl; send_cmd1_col = sent_col_lcl; end assign col_channel_offset = 1; end end 3'b100 : begin : four_one_not2T if(!(CWL % 2)) begin // Place column commands on slot 0 for even CWL always @(sent_col_lcl) begin cs_en0 = sent_col_lcl; send_cmd0_col = sent_col_lcl; end always @(sent_row_or_maint) begin cs_en1 = sent_row_or_maint; send_cmd1_row = sent_row_or_maint; end assign col_channel_offset = 0; end else begin // Place column commands on slot 1 for odd CWL always @(sent_row_or_maint) begin cs_en0 = sent_row_or_maint; send_cmd0_row = sent_row_or_maint; end always @(sent_col_lcl) begin cs_en1 = sent_col_lcl; send_cmd1_col = sent_col_lcl; end assign col_channel_offset = 1; end always @(sent_pre_lcl) begin cs_en2 = sent_pre_lcl; send_cmd2_pre = sent_pre_lcl; end end 3'b101 : begin : four_one_2T if(!(CWL % 2)) begin // Place column commands on slot 3->0 for even CWL always @(sent_col_lcl or sent_col_lcl_r) begin cs_en0 = sent_col_lcl_r; send_cmd0_col = sent_col_lcl_r; send_cmd3_col = sent_col_lcl; end always @(sent_row_or_maint) begin cs_en2 = sent_row_or_maint; send_cmd1_row = sent_row_or_maint; send_cmd2_row = sent_row_or_maint; end assign col_channel_offset = 0; end else begin // Place column commands on slot 2->3 for odd CWL always @(sent_row_or_maint) begin cs_en1 = sent_row_or_maint; send_cmd0_row = sent_row_or_maint; send_cmd1_row = sent_row_or_maint; end always @(sent_col_lcl) begin cs_en3 = sent_col_lcl; send_cmd2_col = sent_col_lcl; send_cmd3_col = sent_col_lcl; end assign col_channel_offset = 3; end end endcase endgenerate endmodule
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2013 by Wilson Snyder. module t (/*AUTOARG*/ // Inputs clk ); input clk; `ifdef INLINE_A //verilator inline_module `else //verilator no_inline_module `endif bmod bsub3 (.clk, .n(3)); bmod bsub2 (.clk, .n(2)); bmod bsub1 (.clk, .n(1)); bmod bsub0 (.clk, .n(0)); endmodule module bmod (input clk, input [31:0] n); `ifdef INLINE_B //verilator inline_module `else //verilator no_inline_module `endif cmod csub (.clk, .n); endmodule module cmod (input clk, input [31:0] n); `ifdef INLINE_C //verilator inline_module `else //verilator no_inline_module `endif reg [31:0] clocal; always @ (posedge clk) clocal <= n; dmod dsub (.clk, .n); endmodule module dmod (input clk, input [31:0] n); `ifdef INLINE_D //verilator inline_module `else //verilator no_inline_module `endif reg [31:0] dlocal; always @ (posedge clk) dlocal <= n; int cyc; always @(posedge clk) begin cyc <= cyc+1; end always @(posedge clk) begin if (cyc>10) begin `ifdef TEST_VERBOSE $display("%m: csub.clocal=%0d dlocal=%0d", csub.clocal, dlocal); `endif if (csub.clocal !== n) $stop; if (dlocal !== n) $stop; end if (cyc==99) begin $write("*-* All Finished *-*\n"); $finish; end end endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: sg_list_reader_64.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Reads data from the scatter gather list buffer. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_SGR64_RD_0 2'b01 `define S_SGR64_RD_1 2'b11 `define S_SGR64_RD_WAIT 2'b10 `define S_SGR64_CAP_0 2'b00 `define S_SGR64_CAP_1 2'b01 `define S_SGR64_CAP_RDY 2'b10 `timescale 1ns/1ns module sg_list_reader_64 #( parameter C_DATA_WIDTH = 9'd64 ) ( input CLK, input RST, input [C_DATA_WIDTH-1:0] BUF_DATA, // Scatter gather buffer data input BUF_DATA_EMPTY, // Scatter gather buffer data empty output BUF_DATA_REN, // Scatter gather buffer data read enable output VALID, // Scatter gather element data is valid output EMPTY, // Scatter gather elements empty input REN, // Scatter gather element data read enable output [63:0] ADDR, // Scatter gather element address output [31:0] LEN // Scatter gather element length (in words) ); (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [1:0] rRdState=`S_SGR64_RD_0, _rRdState=`S_SGR64_RD_0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [1:0] rCapState=`S_SGR64_CAP_0, _rCapState=`S_SGR64_CAP_0; reg [C_DATA_WIDTH-1:0] rData={C_DATA_WIDTH{1'd0}}, _rData={C_DATA_WIDTH{1'd0}}; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [31:0] rLen=0, _rLen=0; reg rFifoValid=0, _rFifoValid=0; reg rDataValid=0, _rDataValid=0; assign BUF_DATA_REN = rRdState[0]; // Not S_SGR64_RD_WAIT assign VALID = rCapState[1]; // S_SGR64_CAP_RDY assign EMPTY = (BUF_DATA_EMPTY & rRdState[0]); // Not S_SGR64_RD_WAIT assign ADDR = rAddr; assign LEN = rLen; // Capture address and length as it comes out of the FIFO always @ (posedge CLK) begin rRdState <= #1 (RST ? `S_SGR64_RD_0 : _rRdState); rCapState <= #1 (RST ? `S_SGR64_CAP_0 : _rCapState); rData <= #1 _rData; rFifoValid <= #1 (RST ? 1'd0 : _rFifoValid); rDataValid <= #1 (RST ? 1'd0 : _rDataValid); rAddr <= #1 _rAddr; rLen <= #1 _rLen; end always @ (*) begin _rRdState = rRdState; _rCapState = rCapState; _rAddr = rAddr; _rLen = rLen; _rData = BUF_DATA; _rFifoValid = (BUF_DATA_REN & !BUF_DATA_EMPTY); _rDataValid = rFifoValid; case (rCapState) `S_SGR64_CAP_0: begin if (rDataValid) begin _rAddr = rData; _rCapState = `S_SGR64_CAP_1; end end `S_SGR64_CAP_1: begin if (rDataValid) begin _rLen = rData[31:0]; _rCapState = `S_SGR64_CAP_RDY; end end `S_SGR64_CAP_RDY: begin if (REN) _rCapState = `S_SGR64_CAP_0; end default: begin _rCapState = `S_SGR64_CAP_0; end endcase case (rRdState) `S_SGR64_RD_0: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR64_RD_1; end `S_SGR64_RD_1: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR64_RD_WAIT; end `S_SGR64_RD_WAIT: begin // Wait for the data to be consumed if (REN) _rRdState = `S_SGR64_RD_0; end default: begin _rRdState = `S_SGR64_RD_0; end endcase end endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: sg_list_reader_64.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Reads data from the scatter gather list buffer. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_SGR64_RD_0 2'b01 `define S_SGR64_RD_1 2'b11 `define S_SGR64_RD_WAIT 2'b10 `define S_SGR64_CAP_0 2'b00 `define S_SGR64_CAP_1 2'b01 `define S_SGR64_CAP_RDY 2'b10 `timescale 1ns/1ns module sg_list_reader_64 #( parameter C_DATA_WIDTH = 9'd64 ) ( input CLK, input RST, input [C_DATA_WIDTH-1:0] BUF_DATA, // Scatter gather buffer data input BUF_DATA_EMPTY, // Scatter gather buffer data empty output BUF_DATA_REN, // Scatter gather buffer data read enable output VALID, // Scatter gather element data is valid output EMPTY, // Scatter gather elements empty input REN, // Scatter gather element data read enable output [63:0] ADDR, // Scatter gather element address output [31:0] LEN // Scatter gather element length (in words) ); (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [1:0] rRdState=`S_SGR64_RD_0, _rRdState=`S_SGR64_RD_0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [1:0] rCapState=`S_SGR64_CAP_0, _rCapState=`S_SGR64_CAP_0; reg [C_DATA_WIDTH-1:0] rData={C_DATA_WIDTH{1'd0}}, _rData={C_DATA_WIDTH{1'd0}}; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [31:0] rLen=0, _rLen=0; reg rFifoValid=0, _rFifoValid=0; reg rDataValid=0, _rDataValid=0; assign BUF_DATA_REN = rRdState[0]; // Not S_SGR64_RD_WAIT assign VALID = rCapState[1]; // S_SGR64_CAP_RDY assign EMPTY = (BUF_DATA_EMPTY & rRdState[0]); // Not S_SGR64_RD_WAIT assign ADDR = rAddr; assign LEN = rLen; // Capture address and length as it comes out of the FIFO always @ (posedge CLK) begin rRdState <= #1 (RST ? `S_SGR64_RD_0 : _rRdState); rCapState <= #1 (RST ? `S_SGR64_CAP_0 : _rCapState); rData <= #1 _rData; rFifoValid <= #1 (RST ? 1'd0 : _rFifoValid); rDataValid <= #1 (RST ? 1'd0 : _rDataValid); rAddr <= #1 _rAddr; rLen <= #1 _rLen; end always @ (*) begin _rRdState = rRdState; _rCapState = rCapState; _rAddr = rAddr; _rLen = rLen; _rData = BUF_DATA; _rFifoValid = (BUF_DATA_REN & !BUF_DATA_EMPTY); _rDataValid = rFifoValid; case (rCapState) `S_SGR64_CAP_0: begin if (rDataValid) begin _rAddr = rData; _rCapState = `S_SGR64_CAP_1; end end `S_SGR64_CAP_1: begin if (rDataValid) begin _rLen = rData[31:0]; _rCapState = `S_SGR64_CAP_RDY; end end `S_SGR64_CAP_RDY: begin if (REN) _rCapState = `S_SGR64_CAP_0; end default: begin _rCapState = `S_SGR64_CAP_0; end endcase case (rRdState) `S_SGR64_RD_0: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR64_RD_1; end `S_SGR64_RD_1: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR64_RD_WAIT; end `S_SGR64_RD_WAIT: begin // Wait for the data to be consumed if (REN) _rRdState = `S_SGR64_RD_0; end default: begin _rRdState = `S_SGR64_RD_0; end endcase end endmodule
`timescale 1ns / 1ps //////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 14:40:38 05/31/2011 // Design Name: main // Module Name: /home/ikari/prj/sd2snes/verilog/sd2snes/main_tf.v // Project Name: sd2snes // Target Device: // Tool versions: // Description: // // Verilog Test Fixture created by ISE for module: main // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // //////////////////////////////////////////////////////////////////////////////// module main_tf; // Inputs reg CLKIN; reg [23:0] SNES_ADDR; reg SNES_READ; reg SNES_WRITE; reg SNES_CS; reg SNES_CPU_CLK; reg SNES_REFRESH; reg SNES_SYSCLK; reg SPI_MOSI; reg SPI_SS; reg MCU_OVR; reg [3:0] SD_DAT; // Outputs wire SNES_DATABUS_OE; wire SNES_DATABUS_DIR; wire IRQ_DIR; wire [22:0] ROM_ADDR; wire ROM_CE; wire ROM_OE; wire ROM_WE; wire ROM_BHE; wire ROM_BLE; wire [18:0] RAM_ADDR; wire RAM_CE; wire RAM_OE; wire RAM_WE; wire DAC_MCLK; wire DAC_LRCK; wire DAC_SDOUT; // Bidirs wire [7:0] SNES_DATA; wire SNES_IRQ; wire [15:0] ROM_DATA; wire [7:0] RAM_DATA; wire SPI_MISO; wire SPI_SCK; wire SD_CMD; wire SD_CLK; // Instantiate the Unit Under Test (UUT) main uut ( .CLKIN(CLKIN), .SNES_ADDR(SNES_ADDR), .SNES_READ(SNES_READ), .SNES_WRITE(SNES_WRITE), .SNES_CS(SNES_CS), .SNES_DATA(SNES_DATA), .SNES_CPU_CLK(SNES_CPU_CLK), .SNES_REFRESH(SNES_REFRESH), .SNES_IRQ(SNES_IRQ), .SNES_DATABUS_OE(SNES_DATABUS_OE), .SNES_DATABUS_DIR(SNES_DATABUS_DIR), .IRQ_DIR(IRQ_DIR), .SNES_SYSCLK(SNES_SYSCLK), .ROM_DATA(ROM_DATA), .ROM_ADDR(ROM_ADDR), .ROM_CE(ROM_CE), .ROM_OE(ROM_OE), .ROM_WE(ROM_WE), .ROM_BHE(ROM_BHE), .ROM_BLE(ROM_BLE), .RAM_DATA(RAM_DATA), .RAM_ADDR(RAM_ADDR), .RAM_CE(RAM_CE), .RAM_OE(RAM_OE), .RAM_WE(RAM_WE), .SPI_MOSI(SPI_MOSI), .SPI_MISO(SPI_MISO), .SPI_SS(SPI_SS), .SPI_SCK(SPI_SCK), .MCU_OVR(MCU_OVR), .DAC_MCLK(DAC_MCLK), .DAC_LRCK(DAC_LRCK), .DAC_SDOUT(DAC_SDOUT), .SD_DAT(SD_DAT), .SD_CMD(SD_CMD), .SD_CLK(SD_CLK) ); integer i; reg [7:0] SNES_DATA_OUT; reg [7:0] SNES_DATA_IN; assign SNES_DATA = (!SNES_READ) ? 8'bZ : SNES_DATA_IN; initial begin // Initialize Inputs CLKIN = 0; SNES_ADDR = 0; SNES_READ = 1; SNES_WRITE = 1; SNES_CS = 0; SNES_CPU_CLK = 0; SNES_REFRESH = 0; SNES_SYSCLK = 0; SPI_MOSI = 0; SPI_SS = 0; MCU_OVR = 1; SD_DAT = 0; // Wait 100 ns for global reset to finish #500; // Add stimulus here SNES_ADDR = 24'h208000; SNES_DATA_IN = 8'h1f; SNES_WRITE = 0; #100 SNES_WRITE = 1; #100; for (i = 0; i < 4096; i = i + 1) begin #140 SNES_READ = 0; SNES_CPU_CLK = 1; #140 SNES_READ = 1; SNES_CPU_CLK = 0; end end always #24 CLKIN = ~CLKIN; endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: channel_128.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Represents a RIFFA channel. Contains a RX port and a // TX port. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module channel_128 #( parameter C_DATA_WIDTH = 9'd128, parameter C_MAX_READ_REQ = 2, // Max read: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B // Local parameters parameter C_RX_FIFO_DEPTH = 1024, parameter C_TX_FIFO_DEPTH = 512, parameter C_SG_FIFO_DEPTH = 1024, parameter C_DATA_WORD_WIDTH = clog2((C_DATA_WIDTH/32)+1) ) ( input CLK, input RST, input [2:0] CONFIG_MAX_READ_REQUEST_SIZE, // Maximum read payload: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B input [2:0] CONFIG_MAX_PAYLOAD_SIZE, // Maximum write payload: 000=128B, 001=256B, 010=512B, 011=1024B input [31:0] PIO_DATA, // Single word programmed I/O data input [C_DATA_WIDTH-1:0] ENG_DATA, // Main incoming data output SG_RX_BUF_RECVD, // Scatter gather RX buffer completely read (ready for next if applicable) input SG_RX_BUF_LEN_VALID, // Scatter gather RX buffer length valid input SG_RX_BUF_ADDR_HI_VALID, // Scatter gather RX buffer high address valid input SG_RX_BUF_ADDR_LO_VALID, // Scatter gather RX buffer low address valid output SG_TX_BUF_RECVD, // Scatter gather TX buffer completely read (ready for next if applicable) input SG_TX_BUF_LEN_VALID, // Scatter gather TX buffer length valid input SG_TX_BUF_ADDR_HI_VALID, // Scatter gather TX buffer high address valid input SG_TX_BUF_ADDR_LO_VALID, // Scatter gather TX buffer low address valid input TXN_RX_LEN_VALID, // Read transaction length valid input TXN_RX_OFF_LAST_VALID, // Read transaction offset/last valid output [31:0] TXN_RX_DONE_LEN, // Read transaction actual transfer length output TXN_RX_DONE, // Read transaction done input TXN_RX_DONE_ACK, // Read transaction actual transfer length read output TXN_TX, // Write transaction notification input TXN_TX_ACK, // Write transaction acknowledged output [31:0] TXN_TX_LEN, // Write transaction length output [31:0] TXN_TX_OFF_LAST, // Write transaction offset/last output [31:0] TXN_TX_DONE_LEN, // Write transaction actual transfer length output TXN_TX_DONE, // Write transaction done input TXN_TX_DONE_ACK, // Write transaction actual transfer length read output RX_REQ, // Read request input RX_REQ_ACK, // Read request accepted output [1:0] RX_REQ_TAG, // Read request data tag output [63:0] RX_REQ_ADDR, // Read request address output [9:0] RX_REQ_LEN, // Read request length output TX_REQ, // Outgoing write request input TX_REQ_ACK, // Outgoing write request acknowledged output [63:0] TX_ADDR, // Outgoing write high address output [9:0] TX_LEN, // Outgoing write length (in 32 bit words) output [C_DATA_WIDTH-1:0] TX_DATA, // Outgoing write data input TX_DATA_REN, // Outgoing write data read enable input TX_SENT, // Outgoing write complete input [C_DATA_WORD_WIDTH-1:0] MAIN_DATA_EN, // Main incoming data enable input MAIN_DONE, // Main incoming data complete input MAIN_ERR, // Main incoming data completed with error input [C_DATA_WORD_WIDTH-1:0] SG_RX_DATA_EN, // Scatter gather for RX incoming data enable input SG_RX_DONE, // Scatter gather for RX incoming data complete input SG_RX_ERR, // Scatter gather for RX incoming data completed with error input [C_DATA_WORD_WIDTH-1:0] SG_TX_DATA_EN, // Scatter gather for TX incoming data enable input SG_TX_DONE, // Scatter gather for TX incoming data complete input SG_TX_ERR, // Scatter gather for TX incoming data completed with error input CHNL_RX_CLK, // Channel read clock output CHNL_RX, // Channel read receive signal input CHNL_RX_ACK, // Channle read received signal output CHNL_RX_LAST, // Channel last read output [31:0] CHNL_RX_LEN, // Channel read length output [30:0] CHNL_RX_OFF, // Channel read offset output [C_DATA_WIDTH-1:0] CHNL_RX_DATA, // Channel read data output CHNL_RX_DATA_VALID, // Channel read data valid input CHNL_RX_DATA_REN, // Channel read data has been recieved input CHNL_TX_CLK, // Channel write clock input CHNL_TX, // Channel write receive signal output CHNL_TX_ACK, // Channel write acknowledgement signal input CHNL_TX_LAST, // Channel last write input [31:0] CHNL_TX_LEN, // Channel write length (in 32 bit words) input [30:0] CHNL_TX_OFF, // Channel write offset input [C_DATA_WIDTH-1:0] CHNL_TX_DATA, // Channel write data input CHNL_TX_DATA_VALID, // Channel write data valid output CHNL_TX_DATA_REN // Channel write data has been recieved ); `include "functions.vh" wire [C_DATA_WIDTH-1:0] wTxSgData; wire wTxSgDataEmpty; wire wTxSgDataRen; wire wTxSgDataErr; wire wTxSgDataRst; // Receiving port (data to the channel) rx_port_128 #( .C_DATA_WIDTH(C_DATA_WIDTH), .C_MAIN_FIFO_DEPTH(C_RX_FIFO_DEPTH), .C_SG_FIFO_DEPTH(C_SG_FIFO_DEPTH), .C_MAX_READ_REQ(C_MAX_READ_REQ) ) rxPort ( .RST(RST), .CLK(CLK), .CONFIG_MAX_READ_REQUEST_SIZE(CONFIG_MAX_READ_REQUEST_SIZE), .SG_RX_BUF_RECVD(SG_RX_BUF_RECVD), .SG_RX_BUF_DATA(PIO_DATA), .SG_RX_BUF_LEN_VALID(SG_RX_BUF_LEN_VALID), .SG_RX_BUF_ADDR_HI_VALID(SG_RX_BUF_ADDR_HI_VALID), .SG_RX_BUF_ADDR_LO_VALID(SG_RX_BUF_ADDR_LO_VALID), .SG_TX_BUF_RECVD(SG_TX_BUF_RECVD), .SG_TX_BUF_DATA(PIO_DATA), .SG_TX_BUF_LEN_VALID(SG_TX_BUF_LEN_VALID), .SG_TX_BUF_ADDR_HI_VALID(SG_TX_BUF_ADDR_HI_VALID), .SG_TX_BUF_ADDR_LO_VALID(SG_TX_BUF_ADDR_LO_VALID), .SG_DATA(wTxSgData), .SG_DATA_EMPTY(wTxSgDataEmpty), .SG_DATA_REN(wTxSgDataRen), .SG_RST(wTxSgDataRst), .SG_ERR(wTxSgDataErr), .TXN_DATA(PIO_DATA), .TXN_LEN_VALID(TXN_RX_LEN_VALID), .TXN_OFF_LAST_VALID(TXN_RX_OFF_LAST_VALID), .TXN_DONE_LEN(TXN_RX_DONE_LEN), .TXN_DONE(TXN_RX_DONE), .TXN_DONE_ACK(TXN_RX_DONE_ACK), .RX_REQ(RX_REQ), .RX_REQ_ACK(RX_REQ_ACK), .RX_REQ_TAG(RX_REQ_TAG), .RX_REQ_ADDR(RX_REQ_ADDR), .RX_REQ_LEN(RX_REQ_LEN), .MAIN_DATA(ENG_DATA), .MAIN_DATA_EN(MAIN_DATA_EN), .MAIN_DONE(MAIN_DONE), .MAIN_ERR(MAIN_ERR), .SG_RX_DATA(ENG_DATA), .SG_RX_DATA_EN(SG_RX_DATA_EN), .SG_RX_DONE(SG_RX_DONE), .SG_RX_ERR(SG_RX_ERR), .SG_TX_DATA(ENG_DATA), .SG_TX_DATA_EN(SG_TX_DATA_EN), .SG_TX_DONE(SG_TX_DONE), .SG_TX_ERR(SG_TX_ERR), .CHNL_CLK(CHNL_RX_CLK), .CHNL_RX(CHNL_RX), .CHNL_RX_ACK(CHNL_RX_ACK), .CHNL_RX_LAST(CHNL_RX_LAST), .CHNL_RX_LEN(CHNL_RX_LEN), .CHNL_RX_OFF(CHNL_RX_OFF), .CHNL_RX_DATA(CHNL_RX_DATA), .CHNL_RX_DATA_VALID(CHNL_RX_DATA_VALID), .CHNL_RX_DATA_REN(CHNL_RX_DATA_REN) ); // Sending port (data from the channel) tx_port_128 #( .C_DATA_WIDTH(C_DATA_WIDTH), .C_FIFO_DEPTH(C_TX_FIFO_DEPTH) ) txPort ( .CLK(CLK), .RST(RST), .CONFIG_MAX_PAYLOAD_SIZE(CONFIG_MAX_PAYLOAD_SIZE), .TXN(TXN_TX), .TXN_ACK(TXN_TX_ACK), .TXN_LEN(TXN_TX_LEN), .TXN_OFF_LAST(TXN_TX_OFF_LAST), .TXN_DONE_LEN(TXN_TX_DONE_LEN), .TXN_DONE(TXN_TX_DONE), .TXN_DONE_ACK(TXN_TX_DONE_ACK), .SG_DATA(wTxSgData), .SG_DATA_EMPTY(wTxSgDataEmpty), .SG_DATA_REN(wTxSgDataRen), .SG_RST(wTxSgDataRst), .SG_ERR(wTxSgDataErr), .TX_REQ(TX_REQ), .TX_REQ_ACK(TX_REQ_ACK), .TX_ADDR(TX_ADDR), .TX_LEN(TX_LEN), .TX_DATA(TX_DATA), .TX_DATA_REN(TX_DATA_REN), .TX_SENT(TX_SENT), .CHNL_CLK(CHNL_TX_CLK), .CHNL_TX(CHNL_TX), .CHNL_TX_ACK(CHNL_TX_ACK), .CHNL_TX_LAST(CHNL_TX_LAST), .CHNL_TX_LEN(CHNL_TX_LEN), .CHNL_TX_OFF(CHNL_TX_OFF), .CHNL_TX_DATA(CHNL_TX_DATA), .CHNL_TX_DATA_VALID(CHNL_TX_DATA_VALID), .CHNL_TX_DATA_REN(CHNL_TX_DATA_REN) ); endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: channel_128.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Represents a RIFFA channel. Contains a RX port and a // TX port. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module channel_128 #( parameter C_DATA_WIDTH = 9'd128, parameter C_MAX_READ_REQ = 2, // Max read: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B // Local parameters parameter C_RX_FIFO_DEPTH = 1024, parameter C_TX_FIFO_DEPTH = 512, parameter C_SG_FIFO_DEPTH = 1024, parameter C_DATA_WORD_WIDTH = clog2((C_DATA_WIDTH/32)+1) ) ( input CLK, input RST, input [2:0] CONFIG_MAX_READ_REQUEST_SIZE, // Maximum read payload: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B input [2:0] CONFIG_MAX_PAYLOAD_SIZE, // Maximum write payload: 000=128B, 001=256B, 010=512B, 011=1024B input [31:0] PIO_DATA, // Single word programmed I/O data input [C_DATA_WIDTH-1:0] ENG_DATA, // Main incoming data output SG_RX_BUF_RECVD, // Scatter gather RX buffer completely read (ready for next if applicable) input SG_RX_BUF_LEN_VALID, // Scatter gather RX buffer length valid input SG_RX_BUF_ADDR_HI_VALID, // Scatter gather RX buffer high address valid input SG_RX_BUF_ADDR_LO_VALID, // Scatter gather RX buffer low address valid output SG_TX_BUF_RECVD, // Scatter gather TX buffer completely read (ready for next if applicable) input SG_TX_BUF_LEN_VALID, // Scatter gather TX buffer length valid input SG_TX_BUF_ADDR_HI_VALID, // Scatter gather TX buffer high address valid input SG_TX_BUF_ADDR_LO_VALID, // Scatter gather TX buffer low address valid input TXN_RX_LEN_VALID, // Read transaction length valid input TXN_RX_OFF_LAST_VALID, // Read transaction offset/last valid output [31:0] TXN_RX_DONE_LEN, // Read transaction actual transfer length output TXN_RX_DONE, // Read transaction done input TXN_RX_DONE_ACK, // Read transaction actual transfer length read output TXN_TX, // Write transaction notification input TXN_TX_ACK, // Write transaction acknowledged output [31:0] TXN_TX_LEN, // Write transaction length output [31:0] TXN_TX_OFF_LAST, // Write transaction offset/last output [31:0] TXN_TX_DONE_LEN, // Write transaction actual transfer length output TXN_TX_DONE, // Write transaction done input TXN_TX_DONE_ACK, // Write transaction actual transfer length read output RX_REQ, // Read request input RX_REQ_ACK, // Read request accepted output [1:0] RX_REQ_TAG, // Read request data tag output [63:0] RX_REQ_ADDR, // Read request address output [9:0] RX_REQ_LEN, // Read request length output TX_REQ, // Outgoing write request input TX_REQ_ACK, // Outgoing write request acknowledged output [63:0] TX_ADDR, // Outgoing write high address output [9:0] TX_LEN, // Outgoing write length (in 32 bit words) output [C_DATA_WIDTH-1:0] TX_DATA, // Outgoing write data input TX_DATA_REN, // Outgoing write data read enable input TX_SENT, // Outgoing write complete input [C_DATA_WORD_WIDTH-1:0] MAIN_DATA_EN, // Main incoming data enable input MAIN_DONE, // Main incoming data complete input MAIN_ERR, // Main incoming data completed with error input [C_DATA_WORD_WIDTH-1:0] SG_RX_DATA_EN, // Scatter gather for RX incoming data enable input SG_RX_DONE, // Scatter gather for RX incoming data complete input SG_RX_ERR, // Scatter gather for RX incoming data completed with error input [C_DATA_WORD_WIDTH-1:0] SG_TX_DATA_EN, // Scatter gather for TX incoming data enable input SG_TX_DONE, // Scatter gather for TX incoming data complete input SG_TX_ERR, // Scatter gather for TX incoming data completed with error input CHNL_RX_CLK, // Channel read clock output CHNL_RX, // Channel read receive signal input CHNL_RX_ACK, // Channle read received signal output CHNL_RX_LAST, // Channel last read output [31:0] CHNL_RX_LEN, // Channel read length output [30:0] CHNL_RX_OFF, // Channel read offset output [C_DATA_WIDTH-1:0] CHNL_RX_DATA, // Channel read data output CHNL_RX_DATA_VALID, // Channel read data valid input CHNL_RX_DATA_REN, // Channel read data has been recieved input CHNL_TX_CLK, // Channel write clock input CHNL_TX, // Channel write receive signal output CHNL_TX_ACK, // Channel write acknowledgement signal input CHNL_TX_LAST, // Channel last write input [31:0] CHNL_TX_LEN, // Channel write length (in 32 bit words) input [30:0] CHNL_TX_OFF, // Channel write offset input [C_DATA_WIDTH-1:0] CHNL_TX_DATA, // Channel write data input CHNL_TX_DATA_VALID, // Channel write data valid output CHNL_TX_DATA_REN // Channel write data has been recieved ); `include "functions.vh" wire [C_DATA_WIDTH-1:0] wTxSgData; wire wTxSgDataEmpty; wire wTxSgDataRen; wire wTxSgDataErr; wire wTxSgDataRst; // Receiving port (data to the channel) rx_port_128 #( .C_DATA_WIDTH(C_DATA_WIDTH), .C_MAIN_FIFO_DEPTH(C_RX_FIFO_DEPTH), .C_SG_FIFO_DEPTH(C_SG_FIFO_DEPTH), .C_MAX_READ_REQ(C_MAX_READ_REQ) ) rxPort ( .RST(RST), .CLK(CLK), .CONFIG_MAX_READ_REQUEST_SIZE(CONFIG_MAX_READ_REQUEST_SIZE), .SG_RX_BUF_RECVD(SG_RX_BUF_RECVD), .SG_RX_BUF_DATA(PIO_DATA), .SG_RX_BUF_LEN_VALID(SG_RX_BUF_LEN_VALID), .SG_RX_BUF_ADDR_HI_VALID(SG_RX_BUF_ADDR_HI_VALID), .SG_RX_BUF_ADDR_LO_VALID(SG_RX_BUF_ADDR_LO_VALID), .SG_TX_BUF_RECVD(SG_TX_BUF_RECVD), .SG_TX_BUF_DATA(PIO_DATA), .SG_TX_BUF_LEN_VALID(SG_TX_BUF_LEN_VALID), .SG_TX_BUF_ADDR_HI_VALID(SG_TX_BUF_ADDR_HI_VALID), .SG_TX_BUF_ADDR_LO_VALID(SG_TX_BUF_ADDR_LO_VALID), .SG_DATA(wTxSgData), .SG_DATA_EMPTY(wTxSgDataEmpty), .SG_DATA_REN(wTxSgDataRen), .SG_RST(wTxSgDataRst), .SG_ERR(wTxSgDataErr), .TXN_DATA(PIO_DATA), .TXN_LEN_VALID(TXN_RX_LEN_VALID), .TXN_OFF_LAST_VALID(TXN_RX_OFF_LAST_VALID), .TXN_DONE_LEN(TXN_RX_DONE_LEN), .TXN_DONE(TXN_RX_DONE), .TXN_DONE_ACK(TXN_RX_DONE_ACK), .RX_REQ(RX_REQ), .RX_REQ_ACK(RX_REQ_ACK), .RX_REQ_TAG(RX_REQ_TAG), .RX_REQ_ADDR(RX_REQ_ADDR), .RX_REQ_LEN(RX_REQ_LEN), .MAIN_DATA(ENG_DATA), .MAIN_DATA_EN(MAIN_DATA_EN), .MAIN_DONE(MAIN_DONE), .MAIN_ERR(MAIN_ERR), .SG_RX_DATA(ENG_DATA), .SG_RX_DATA_EN(SG_RX_DATA_EN), .SG_RX_DONE(SG_RX_DONE), .SG_RX_ERR(SG_RX_ERR), .SG_TX_DATA(ENG_DATA), .SG_TX_DATA_EN(SG_TX_DATA_EN), .SG_TX_DONE(SG_TX_DONE), .SG_TX_ERR(SG_TX_ERR), .CHNL_CLK(CHNL_RX_CLK), .CHNL_RX(CHNL_RX), .CHNL_RX_ACK(CHNL_RX_ACK), .CHNL_RX_LAST(CHNL_RX_LAST), .CHNL_RX_LEN(CHNL_RX_LEN), .CHNL_RX_OFF(CHNL_RX_OFF), .CHNL_RX_DATA(CHNL_RX_DATA), .CHNL_RX_DATA_VALID(CHNL_RX_DATA_VALID), .CHNL_RX_DATA_REN(CHNL_RX_DATA_REN) ); // Sending port (data from the channel) tx_port_128 #( .C_DATA_WIDTH(C_DATA_WIDTH), .C_FIFO_DEPTH(C_TX_FIFO_DEPTH) ) txPort ( .CLK(CLK), .RST(RST), .CONFIG_MAX_PAYLOAD_SIZE(CONFIG_MAX_PAYLOAD_SIZE), .TXN(TXN_TX), .TXN_ACK(TXN_TX_ACK), .TXN_LEN(TXN_TX_LEN), .TXN_OFF_LAST(TXN_TX_OFF_LAST), .TXN_DONE_LEN(TXN_TX_DONE_LEN), .TXN_DONE(TXN_TX_DONE), .TXN_DONE_ACK(TXN_TX_DONE_ACK), .SG_DATA(wTxSgData), .SG_DATA_EMPTY(wTxSgDataEmpty), .SG_DATA_REN(wTxSgDataRen), .SG_RST(wTxSgDataRst), .SG_ERR(wTxSgDataErr), .TX_REQ(TX_REQ), .TX_REQ_ACK(TX_REQ_ACK), .TX_ADDR(TX_ADDR), .TX_LEN(TX_LEN), .TX_DATA(TX_DATA), .TX_DATA_REN(TX_DATA_REN), .TX_SENT(TX_SENT), .CHNL_CLK(CHNL_TX_CLK), .CHNL_TX(CHNL_TX), .CHNL_TX_ACK(CHNL_TX_ACK), .CHNL_TX_LAST(CHNL_TX_LAST), .CHNL_TX_LEN(CHNL_TX_LEN), .CHNL_TX_OFF(CHNL_TX_OFF), .CHNL_TX_DATA(CHNL_TX_DATA), .CHNL_TX_DATA_VALID(CHNL_TX_DATA_VALID), .CHNL_TX_DATA_REN(CHNL_TX_DATA_REN) ); endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: channel_128.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Represents a RIFFA channel. Contains a RX port and a // TX port. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module channel_128 #( parameter C_DATA_WIDTH = 9'd128, parameter C_MAX_READ_REQ = 2, // Max read: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B // Local parameters parameter C_RX_FIFO_DEPTH = 1024, parameter C_TX_FIFO_DEPTH = 512, parameter C_SG_FIFO_DEPTH = 1024, parameter C_DATA_WORD_WIDTH = clog2((C_DATA_WIDTH/32)+1) ) ( input CLK, input RST, input [2:0] CONFIG_MAX_READ_REQUEST_SIZE, // Maximum read payload: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B input [2:0] CONFIG_MAX_PAYLOAD_SIZE, // Maximum write payload: 000=128B, 001=256B, 010=512B, 011=1024B input [31:0] PIO_DATA, // Single word programmed I/O data input [C_DATA_WIDTH-1:0] ENG_DATA, // Main incoming data output SG_RX_BUF_RECVD, // Scatter gather RX buffer completely read (ready for next if applicable) input SG_RX_BUF_LEN_VALID, // Scatter gather RX buffer length valid input SG_RX_BUF_ADDR_HI_VALID, // Scatter gather RX buffer high address valid input SG_RX_BUF_ADDR_LO_VALID, // Scatter gather RX buffer low address valid output SG_TX_BUF_RECVD, // Scatter gather TX buffer completely read (ready for next if applicable) input SG_TX_BUF_LEN_VALID, // Scatter gather TX buffer length valid input SG_TX_BUF_ADDR_HI_VALID, // Scatter gather TX buffer high address valid input SG_TX_BUF_ADDR_LO_VALID, // Scatter gather TX buffer low address valid input TXN_RX_LEN_VALID, // Read transaction length valid input TXN_RX_OFF_LAST_VALID, // Read transaction offset/last valid output [31:0] TXN_RX_DONE_LEN, // Read transaction actual transfer length output TXN_RX_DONE, // Read transaction done input TXN_RX_DONE_ACK, // Read transaction actual transfer length read output TXN_TX, // Write transaction notification input TXN_TX_ACK, // Write transaction acknowledged output [31:0] TXN_TX_LEN, // Write transaction length output [31:0] TXN_TX_OFF_LAST, // Write transaction offset/last output [31:0] TXN_TX_DONE_LEN, // Write transaction actual transfer length output TXN_TX_DONE, // Write transaction done input TXN_TX_DONE_ACK, // Write transaction actual transfer length read output RX_REQ, // Read request input RX_REQ_ACK, // Read request accepted output [1:0] RX_REQ_TAG, // Read request data tag output [63:0] RX_REQ_ADDR, // Read request address output [9:0] RX_REQ_LEN, // Read request length output TX_REQ, // Outgoing write request input TX_REQ_ACK, // Outgoing write request acknowledged output [63:0] TX_ADDR, // Outgoing write high address output [9:0] TX_LEN, // Outgoing write length (in 32 bit words) output [C_DATA_WIDTH-1:0] TX_DATA, // Outgoing write data input TX_DATA_REN, // Outgoing write data read enable input TX_SENT, // Outgoing write complete input [C_DATA_WORD_WIDTH-1:0] MAIN_DATA_EN, // Main incoming data enable input MAIN_DONE, // Main incoming data complete input MAIN_ERR, // Main incoming data completed with error input [C_DATA_WORD_WIDTH-1:0] SG_RX_DATA_EN, // Scatter gather for RX incoming data enable input SG_RX_DONE, // Scatter gather for RX incoming data complete input SG_RX_ERR, // Scatter gather for RX incoming data completed with error input [C_DATA_WORD_WIDTH-1:0] SG_TX_DATA_EN, // Scatter gather for TX incoming data enable input SG_TX_DONE, // Scatter gather for TX incoming data complete input SG_TX_ERR, // Scatter gather for TX incoming data completed with error input CHNL_RX_CLK, // Channel read clock output CHNL_RX, // Channel read receive signal input CHNL_RX_ACK, // Channle read received signal output CHNL_RX_LAST, // Channel last read output [31:0] CHNL_RX_LEN, // Channel read length output [30:0] CHNL_RX_OFF, // Channel read offset output [C_DATA_WIDTH-1:0] CHNL_RX_DATA, // Channel read data output CHNL_RX_DATA_VALID, // Channel read data valid input CHNL_RX_DATA_REN, // Channel read data has been recieved input CHNL_TX_CLK, // Channel write clock input CHNL_TX, // Channel write receive signal output CHNL_TX_ACK, // Channel write acknowledgement signal input CHNL_TX_LAST, // Channel last write input [31:0] CHNL_TX_LEN, // Channel write length (in 32 bit words) input [30:0] CHNL_TX_OFF, // Channel write offset input [C_DATA_WIDTH-1:0] CHNL_TX_DATA, // Channel write data input CHNL_TX_DATA_VALID, // Channel write data valid output CHNL_TX_DATA_REN // Channel write data has been recieved ); `include "functions.vh" wire [C_DATA_WIDTH-1:0] wTxSgData; wire wTxSgDataEmpty; wire wTxSgDataRen; wire wTxSgDataErr; wire wTxSgDataRst; // Receiving port (data to the channel) rx_port_128 #( .C_DATA_WIDTH(C_DATA_WIDTH), .C_MAIN_FIFO_DEPTH(C_RX_FIFO_DEPTH), .C_SG_FIFO_DEPTH(C_SG_FIFO_DEPTH), .C_MAX_READ_REQ(C_MAX_READ_REQ) ) rxPort ( .RST(RST), .CLK(CLK), .CONFIG_MAX_READ_REQUEST_SIZE(CONFIG_MAX_READ_REQUEST_SIZE), .SG_RX_BUF_RECVD(SG_RX_BUF_RECVD), .SG_RX_BUF_DATA(PIO_DATA), .SG_RX_BUF_LEN_VALID(SG_RX_BUF_LEN_VALID), .SG_RX_BUF_ADDR_HI_VALID(SG_RX_BUF_ADDR_HI_VALID), .SG_RX_BUF_ADDR_LO_VALID(SG_RX_BUF_ADDR_LO_VALID), .SG_TX_BUF_RECVD(SG_TX_BUF_RECVD), .SG_TX_BUF_DATA(PIO_DATA), .SG_TX_BUF_LEN_VALID(SG_TX_BUF_LEN_VALID), .SG_TX_BUF_ADDR_HI_VALID(SG_TX_BUF_ADDR_HI_VALID), .SG_TX_BUF_ADDR_LO_VALID(SG_TX_BUF_ADDR_LO_VALID), .SG_DATA(wTxSgData), .SG_DATA_EMPTY(wTxSgDataEmpty), .SG_DATA_REN(wTxSgDataRen), .SG_RST(wTxSgDataRst), .SG_ERR(wTxSgDataErr), .TXN_DATA(PIO_DATA), .TXN_LEN_VALID(TXN_RX_LEN_VALID), .TXN_OFF_LAST_VALID(TXN_RX_OFF_LAST_VALID), .TXN_DONE_LEN(TXN_RX_DONE_LEN), .TXN_DONE(TXN_RX_DONE), .TXN_DONE_ACK(TXN_RX_DONE_ACK), .RX_REQ(RX_REQ), .RX_REQ_ACK(RX_REQ_ACK), .RX_REQ_TAG(RX_REQ_TAG), .RX_REQ_ADDR(RX_REQ_ADDR), .RX_REQ_LEN(RX_REQ_LEN), .MAIN_DATA(ENG_DATA), .MAIN_DATA_EN(MAIN_DATA_EN), .MAIN_DONE(MAIN_DONE), .MAIN_ERR(MAIN_ERR), .SG_RX_DATA(ENG_DATA), .SG_RX_DATA_EN(SG_RX_DATA_EN), .SG_RX_DONE(SG_RX_DONE), .SG_RX_ERR(SG_RX_ERR), .SG_TX_DATA(ENG_DATA), .SG_TX_DATA_EN(SG_TX_DATA_EN), .SG_TX_DONE(SG_TX_DONE), .SG_TX_ERR(SG_TX_ERR), .CHNL_CLK(CHNL_RX_CLK), .CHNL_RX(CHNL_RX), .CHNL_RX_ACK(CHNL_RX_ACK), .CHNL_RX_LAST(CHNL_RX_LAST), .CHNL_RX_LEN(CHNL_RX_LEN), .CHNL_RX_OFF(CHNL_RX_OFF), .CHNL_RX_DATA(CHNL_RX_DATA), .CHNL_RX_DATA_VALID(CHNL_RX_DATA_VALID), .CHNL_RX_DATA_REN(CHNL_RX_DATA_REN) ); // Sending port (data from the channel) tx_port_128 #( .C_DATA_WIDTH(C_DATA_WIDTH), .C_FIFO_DEPTH(C_TX_FIFO_DEPTH) ) txPort ( .CLK(CLK), .RST(RST), .CONFIG_MAX_PAYLOAD_SIZE(CONFIG_MAX_PAYLOAD_SIZE), .TXN(TXN_TX), .TXN_ACK(TXN_TX_ACK), .TXN_LEN(TXN_TX_LEN), .TXN_OFF_LAST(TXN_TX_OFF_LAST), .TXN_DONE_LEN(TXN_TX_DONE_LEN), .TXN_DONE(TXN_TX_DONE), .TXN_DONE_ACK(TXN_TX_DONE_ACK), .SG_DATA(wTxSgData), .SG_DATA_EMPTY(wTxSgDataEmpty), .SG_DATA_REN(wTxSgDataRen), .SG_RST(wTxSgDataRst), .SG_ERR(wTxSgDataErr), .TX_REQ(TX_REQ), .TX_REQ_ACK(TX_REQ_ACK), .TX_ADDR(TX_ADDR), .TX_LEN(TX_LEN), .TX_DATA(TX_DATA), .TX_DATA_REN(TX_DATA_REN), .TX_SENT(TX_SENT), .CHNL_CLK(CHNL_TX_CLK), .CHNL_TX(CHNL_TX), .CHNL_TX_ACK(CHNL_TX_ACK), .CHNL_TX_LAST(CHNL_TX_LAST), .CHNL_TX_LEN(CHNL_TX_LEN), .CHNL_TX_OFF(CHNL_TX_OFF), .CHNL_TX_DATA(CHNL_TX_DATA), .CHNL_TX_DATA_VALID(CHNL_TX_DATA_VALID), .CHNL_TX_DATA_REN(CHNL_TX_DATA_REN) ); endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- /* Filename: shiftreg.v Version: 1.0 Verilog Standard: Verilog-2001 Description: A simple parameterized shift register. Notes: Any modifications to this file should meet the conditions set forth in the "Trellis Style Guide" Author: Dustin Richmond (@darichmond) Co-Authors: */ `timescale 1ns/1ns module shiftreg #(parameter C_DEPTH=10, parameter C_WIDTH=32, parameter C_VALUE=0 ) (input CLK, input RST_IN, input [C_WIDTH-1:0] WR_DATA, output [(C_DEPTH+1)*C_WIDTH-1:0] RD_DATA); // Start Flag Shift Register. Data enables are derived from the // taps on this shift register. wire [(C_DEPTH+1)*C_WIDTH-1:0] wDataShift; reg [C_WIDTH-1:0] rDataShift[C_DEPTH:0]; assign wDataShift[(C_WIDTH*0)+:C_WIDTH] = WR_DATA; always @(posedge CLK) begin rDataShift[0] <= WR_DATA; end genvar i; generate for (i = 1 ; i <= C_DEPTH; i = i + 1) begin : gen_sr_registers assign wDataShift[(C_WIDTH*i)+:C_WIDTH] = rDataShift[i-1]; always @(posedge CLK) begin if(RST_IN) rDataShift[i] <= C_VALUE; else rDataShift[i] <= rDataShift[i-1]; end end endgenerate assign RD_DATA = wDataShift; endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: rx_port_64.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Receives data from the rx_engine and buffers the output // for the RIFFA channel. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module rx_port_64 #( parameter C_DATA_WIDTH = 9'd64, parameter C_MAIN_FIFO_DEPTH = 1024, parameter C_SG_FIFO_DEPTH = 512, parameter C_MAX_READ_REQ = 2, // Max read: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B // Local parameters parameter C_DATA_WORD_WIDTH = clog2((C_DATA_WIDTH/32)+1), parameter C_MAIN_FIFO_DEPTH_WIDTH = clog2((2**clog2(C_MAIN_FIFO_DEPTH))+1), parameter C_SG_FIFO_DEPTH_WIDTH = clog2((2**clog2(C_SG_FIFO_DEPTH))+1) ) ( input CLK, input RST, input [2:0] CONFIG_MAX_READ_REQUEST_SIZE, // Maximum read payload: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B output SG_RX_BUF_RECVD, // Scatter gather RX buffer completely read (ready for next if applicable) input [31:0] SG_RX_BUF_DATA, // Scatter gather RX buffer data input SG_RX_BUF_LEN_VALID, // Scatter gather RX buffer length valid input SG_RX_BUF_ADDR_HI_VALID, // Scatter gather RX buffer high address valid input SG_RX_BUF_ADDR_LO_VALID, // Scatter gather RX buffer low address valid output SG_TX_BUF_RECVD, // Scatter gather TX buffer completely read (ready for next if applicable) input [31:0] SG_TX_BUF_DATA, // Scatter gather TX buffer data input SG_TX_BUF_LEN_VALID, // Scatter gather TX buffer length valid input SG_TX_BUF_ADDR_HI_VALID, // Scatter gather TX buffer high address valid input SG_TX_BUF_ADDR_LO_VALID, // Scatter gather TX buffer low address valid output [C_DATA_WIDTH-1:0] SG_DATA, // Scatter gather TX buffer data output SG_DATA_EMPTY, // Scatter gather TX buffer data empty input SG_DATA_REN, // Scatter gather TX buffer data read enable input SG_RST, // Scatter gather TX buffer data reset output SG_ERR, // Scatter gather TX encountered an error input [31:0] TXN_DATA, // Read transaction data input TXN_LEN_VALID, // Read transaction length valid input TXN_OFF_LAST_VALID, // Read transaction offset/last valid output [31:0] TXN_DONE_LEN, // Read transaction actual transfer length output TXN_DONE, // Read transaction done input TXN_DONE_ACK, // Read transaction actual transfer length read output RX_REQ, // Read request input RX_REQ_ACK, // Read request accepted output [1:0] RX_REQ_TAG, // Read request data tag output [63:0] RX_REQ_ADDR, // Read request address output [9:0] RX_REQ_LEN, // Read request length input [C_DATA_WIDTH-1:0] MAIN_DATA, // Main incoming data input [C_DATA_WORD_WIDTH-1:0] MAIN_DATA_EN, // Main incoming data enable input MAIN_DONE, // Main incoming data complete input MAIN_ERR, // Main incoming data completed with error input [C_DATA_WIDTH-1:0] SG_RX_DATA, // Scatter gather for RX incoming data input [C_DATA_WORD_WIDTH-1:0] SG_RX_DATA_EN, // Scatter gather for RX incoming data enable input SG_RX_DONE, // Scatter gather for RX incoming data complete input SG_RX_ERR, // Scatter gather for RX incoming data completed with error input [C_DATA_WIDTH-1:0] SG_TX_DATA, // Scatter gather for TX incoming data input [C_DATA_WORD_WIDTH-1:0] SG_TX_DATA_EN, // Scatter gather for TX incoming data enable input SG_TX_DONE, // Scatter gather for TX incoming data complete input SG_TX_ERR, // Scatter gather for TX incoming data completed with error input CHNL_CLK, // Channel read clock output CHNL_RX, // Channel read receive signal input CHNL_RX_ACK, // Channle read received signal output CHNL_RX_LAST, // Channel last read output [31:0] CHNL_RX_LEN, // Channel read length output [30:0] CHNL_RX_OFF, // Channel read offset output [C_DATA_WIDTH-1:0] CHNL_RX_DATA, // Channel read data output CHNL_RX_DATA_VALID, // Channel read data valid input CHNL_RX_DATA_REN // Channel read data has been recieved ); `include "functions.vh" wire [C_DATA_WIDTH-1:0] wPackedMainData; wire wPackedMainWen; wire wPackedMainDone; wire wPackedMainErr; wire wMainFlush; wire wMainFlushed; wire [C_DATA_WIDTH-1:0] wPackedSgRxData; wire wPackedSgRxWen; wire wPackedSgRxDone; wire wPackedSgRxErr; wire wSgRxFlush; wire wSgRxFlushed; wire [C_DATA_WIDTH-1:0] wPackedSgTxData; wire wPackedSgTxWen; wire wPackedSgTxDone; wire wPackedSgTxErr; wire wSgTxFlush; wire wSgTxFlushed; wire wMainDataRen; wire wMainDataEmpty; wire [C_DATA_WIDTH-1:0] wMainData; wire wSgRxRst; wire wSgRxDataRen; wire wSgRxDataEmpty; wire [C_DATA_WIDTH-1:0] wSgRxData; wire [C_SG_FIFO_DEPTH_WIDTH-1:0] wSgRxFifoCount; wire wSgTxRst; wire [C_SG_FIFO_DEPTH_WIDTH-1:0] wSgTxFifoCount; wire wSgRxReq; wire [63:0] wSgRxReqAddr; wire [9:0] wSgRxReqLen; wire wSgTxReq; wire [63:0] wSgTxReqAddr; wire [9:0] wSgTxReqLen; wire wSgRxReqProc; wire wSgTxReqProc; wire wMainReqProc; wire wReqAck; wire wSgElemRdy; wire wSgElemRen; wire [63:0] wSgElemAddr; wire [31:0] wSgElemLen; wire wSgRst; wire wMainReq; wire [63:0] wMainReqAddr; wire [9:0] wMainReqLen; wire wTxnErr; wire wChnlRx; wire wChnlRxRecvd; wire wChnlRxAckRecvd; wire wChnlRxLast; wire [31:0] wChnlRxLen; wire [30:0] wChnlRxOff; wire [31:0] wChnlRxConsumed; reg [4:0] rWideRst=0; reg rRst=0; assign SG_ERR = (wPackedSgTxDone & wPackedSgTxErr); // Generate a wide reset from the input reset. always @ (posedge CLK) begin rRst <= #1 rWideRst[4]; if (RST) rWideRst <= #1 5'b11111; else rWideRst <= (rWideRst<<1); end // Pack received data tightly into our FIFOs fifo_packer_64 mainFifoPacker ( .CLK(CLK), .RST(rRst), .DATA_IN(MAIN_DATA), .DATA_IN_EN(MAIN_DATA_EN), .DATA_IN_DONE(MAIN_DONE), .DATA_IN_ERR(MAIN_ERR), .DATA_IN_FLUSH(wMainFlush), .PACKED_DATA(wPackedMainData), .PACKED_WEN(wPackedMainWen), .PACKED_DATA_DONE(wPackedMainDone), .PACKED_DATA_ERR(wPackedMainErr), .PACKED_DATA_FLUSHED(wMainFlushed) ); fifo_packer_64 sgRxFifoPacker ( .CLK(CLK), .RST(rRst), .DATA_IN(SG_RX_DATA), .DATA_IN_EN(SG_RX_DATA_EN), .DATA_IN_DONE(SG_RX_DONE), .DATA_IN_ERR(SG_RX_ERR), .DATA_IN_FLUSH(wSgRxFlush), .PACKED_DATA(wPackedSgRxData), .PACKED_WEN(wPackedSgRxWen), .PACKED_DATA_DONE(wPackedSgRxDone), .PACKED_DATA_ERR(wPackedSgRxErr), .PACKED_DATA_FLUSHED(wSgRxFlushed) ); fifo_packer_64 sgTxFifoPacker ( .CLK(CLK), .RST(rRst), .DATA_IN(SG_TX_DATA), .DATA_IN_EN(SG_TX_DATA_EN), .DATA_IN_DONE(SG_TX_DONE), .DATA_IN_ERR(SG_TX_ERR), .DATA_IN_FLUSH(wSgTxFlush), .PACKED_DATA(wPackedSgTxData), .PACKED_WEN(wPackedSgTxWen), .PACKED_DATA_DONE(wPackedSgTxDone), .PACKED_DATA_ERR(wPackedSgTxErr), .PACKED_DATA_FLUSHED(wSgTxFlushed) ); // FIFOs for storing received data for the channel. (* RAM_STYLE="BLOCK" *) async_fifo_fwft #(.C_WIDTH(C_DATA_WIDTH), .C_DEPTH(C_MAIN_FIFO_DEPTH)) mainFifo ( .WR_CLK(CLK), .WR_RST(rRst | (wTxnErr & TXN_DONE) | wSgRst), .WR_EN(wPackedMainWen), .WR_DATA(wPackedMainData), .WR_FULL(), .RD_CLK(CHNL_CLK), .RD_RST(rRst | (wTxnErr & TXN_DONE) | wSgRst), .RD_EN(wMainDataRen), .RD_DATA(wMainData), .RD_EMPTY(wMainDataEmpty) ); (* RAM_STYLE="BLOCK" *) sync_fifo #(.C_WIDTH(C_DATA_WIDTH), .C_DEPTH(C_SG_FIFO_DEPTH), .C_PROVIDE_COUNT(1)) sgRxFifo ( .RST(rRst | wSgRxRst), .CLK(CLK), .WR_EN(wPackedSgRxWen), .WR_DATA(wPackedSgRxData), .FULL(), .RD_EN(wSgRxDataRen), .RD_DATA(wSgRxData), .EMPTY(wSgRxDataEmpty), .COUNT(wSgRxFifoCount) ); (* RAM_STYLE="BLOCK" *) sync_fifo #(.C_WIDTH(C_DATA_WIDTH), .C_DEPTH(C_SG_FIFO_DEPTH), .C_PROVIDE_COUNT(1)) sgTxFifo ( .RST(rRst | wSgTxRst), .CLK(CLK), .WR_EN(wPackedSgTxWen), .WR_DATA(wPackedSgTxData), .FULL(), .RD_EN(SG_DATA_REN), .RD_DATA(SG_DATA), .EMPTY(SG_DATA_EMPTY), .COUNT(wSgTxFifoCount) ); // Manage requesting and acknowledging scatter gather data. Note that // these modules will share the main requestor's RX channel. They will // take priority over the main logic's use of the RX channel. sg_list_requester #(.C_FIFO_DATA_WIDTH(C_DATA_WIDTH), .C_FIFO_DEPTH(C_SG_FIFO_DEPTH), .C_MAX_READ_REQ(C_MAX_READ_REQ)) sgRxReq ( .CLK(CLK), .RST(rRst), .CONFIG_MAX_READ_REQUEST_SIZE(CONFIG_MAX_READ_REQUEST_SIZE), .USER_RST(wSgRst), .BUF_RECVD(SG_RX_BUF_RECVD), .BUF_DATA(SG_RX_BUF_DATA), .BUF_LEN_VALID(SG_RX_BUF_LEN_VALID), .BUF_ADDR_HI_VALID(SG_RX_BUF_ADDR_HI_VALID), .BUF_ADDR_LO_VALID(SG_RX_BUF_ADDR_LO_VALID), .FIFO_COUNT(wSgRxFifoCount), .FIFO_FLUSH(wSgRxFlush), .FIFO_FLUSHED(wSgRxFlushed), .FIFO_RST(wSgRxRst), .RX_REQ(wSgRxReq), .RX_ADDR(wSgRxReqAddr), .RX_LEN(wSgRxReqLen), .RX_REQ_ACK(wReqAck & wSgRxReqProc), .RX_DONE(wPackedSgRxDone) ); sg_list_requester #(.C_FIFO_DATA_WIDTH(C_DATA_WIDTH), .C_FIFO_DEPTH(C_SG_FIFO_DEPTH), .C_MAX_READ_REQ(C_MAX_READ_REQ)) sgTxReq ( .CLK(CLK), .RST(rRst), .CONFIG_MAX_READ_REQUEST_SIZE(CONFIG_MAX_READ_REQUEST_SIZE), .USER_RST(SG_RST), .BUF_RECVD(SG_TX_BUF_RECVD), .BUF_DATA(SG_TX_BUF_DATA), .BUF_LEN_VALID(SG_TX_BUF_LEN_VALID), .BUF_ADDR_HI_VALID(SG_TX_BUF_ADDR_HI_VALID), .BUF_ADDR_LO_VALID(SG_TX_BUF_ADDR_LO_VALID), .FIFO_COUNT(wSgTxFifoCount), .FIFO_FLUSH(wSgTxFlush), .FIFO_FLUSHED(wSgTxFlushed), .FIFO_RST(wSgTxRst), .RX_REQ(wSgTxReq), .RX_ADDR(wSgTxReqAddr), .RX_LEN(wSgTxReqLen), .RX_REQ_ACK(wReqAck & wSgTxReqProc), .RX_DONE(wPackedSgTxDone) ); // A read requester for the channel and scatter gather requesters. rx_port_requester_mux requesterMux ( .RST(rRst), .CLK(CLK), .SG_RX_REQ(wSgRxReq), .SG_RX_LEN(wSgRxReqLen), .SG_RX_ADDR(wSgRxReqAddr), .SG_RX_REQ_PROC(wSgRxReqProc), .SG_TX_REQ(wSgTxReq), .SG_TX_LEN(wSgTxReqLen), .SG_TX_ADDR(wSgTxReqAddr), .SG_TX_REQ_PROC(wSgTxReqProc), .MAIN_REQ(wMainReq), .MAIN_LEN(wMainReqLen), .MAIN_ADDR(wMainReqAddr), .MAIN_REQ_PROC(wMainReqProc), .RX_REQ(RX_REQ), .RX_REQ_ACK(RX_REQ_ACK), .RX_REQ_TAG(RX_REQ_TAG), .RX_REQ_ADDR(RX_REQ_ADDR), .RX_REQ_LEN(RX_REQ_LEN), .REQ_ACK(wReqAck) ); // Read the scatter gather buffer address and length, continuously so that // we have it ready whenever the next buffer is needed. sg_list_reader_64 #(.C_DATA_WIDTH(C_DATA_WIDTH)) sgListReader ( .CLK(CLK), .RST(rRst | wSgRst), .BUF_DATA(wSgRxData), .BUF_DATA_EMPTY(wSgRxDataEmpty), .BUF_DATA_REN(wSgRxDataRen), .VALID(wSgElemRdy), .EMPTY(), .REN(wSgElemRen), .ADDR(wSgElemAddr), .LEN(wSgElemLen) ); // Main port reader logic rx_port_reader #(.C_DATA_WIDTH(C_DATA_WIDTH), .C_FIFO_DEPTH(C_MAIN_FIFO_DEPTH), .C_MAX_READ_REQ(C_MAX_READ_REQ)) reader ( .CLK(CLK), .RST(rRst), .CONFIG_MAX_READ_REQUEST_SIZE(CONFIG_MAX_READ_REQUEST_SIZE), .TXN_DATA(TXN_DATA), .TXN_LEN_VALID(TXN_LEN_VALID), .TXN_OFF_LAST_VALID(TXN_OFF_LAST_VALID), .TXN_DONE_LEN(TXN_DONE_LEN), .TXN_DONE(TXN_DONE), .TXN_ERR(wTxnErr), .TXN_DONE_ACK(TXN_DONE_ACK), .TXN_DATA_FLUSH(wMainFlush), .TXN_DATA_FLUSHED(wMainFlushed), .RX_REQ(wMainReq), .RX_ADDR(wMainReqAddr), .RX_LEN(wMainReqLen), .RX_REQ_ACK(wReqAck & wMainReqProc), .RX_DATA_EN(MAIN_DATA_EN), .RX_DONE(wPackedMainDone), .RX_ERR(wPackedMainErr), .SG_DONE(wPackedSgRxDone), .SG_ERR(wPackedSgRxErr), .SG_ELEM_ADDR(wSgElemAddr), .SG_ELEM_LEN(wSgElemLen), .SG_ELEM_RDY(wSgElemRdy), .SG_ELEM_REN(wSgElemRen), .SG_RST(wSgRst), .CHNL_RX(wChnlRx), .CHNL_RX_LEN(wChnlRxLen), .CHNL_RX_LAST(wChnlRxLast), .CHNL_RX_OFF(wChnlRxOff), .CHNL_RX_RECVD(wChnlRxRecvd), .CHNL_RX_ACK_RECVD(wChnlRxAckRecvd), .CHNL_RX_CONSUMED(wChnlRxConsumed) ); // Manage the CHNL_RX* signals in the CHNL_CLK domain. rx_port_channel_gate #(.C_DATA_WIDTH(C_DATA_WIDTH)) gate ( .RST(rRst), .CLK(CLK), .RX(wChnlRx), .RX_RECVD(wChnlRxRecvd), .RX_ACK_RECVD(wChnlRxAckRecvd), .RX_LAST(wChnlRxLast), .RX_LEN(wChnlRxLen), .RX_OFF(wChnlRxOff), .RX_CONSUMED(wChnlRxConsumed), .RD_DATA(wMainData), .RD_EMPTY(wMainDataEmpty), .RD_EN(wMainDataRen), .CHNL_CLK(CHNL_CLK), .CHNL_RX(CHNL_RX), .CHNL_RX_ACK(CHNL_RX_ACK), .CHNL_RX_LAST(CHNL_RX_LAST), .CHNL_RX_LEN(CHNL_RX_LEN), .CHNL_RX_OFF(CHNL_RX_OFF), .CHNL_RX_DATA(CHNL_RX_DATA), .CHNL_RX_DATA_VALID(CHNL_RX_DATA_VALID), .CHNL_RX_DATA_REN(CHNL_RX_DATA_REN) ); /* wire [35:0] wControl0; chipscope_icon_1 cs_icon( .CONTROL0(wControl0) ); chipscope_ila_t8_512 a0( .CLK(CLK), .CONTROL(wControl0), .TRIG0({SG_RX_DATA_EN != 0, wSgElemRen, wMainReq | wSgRxReq | wSgTxReq, RX_REQ, SG_RX_BUF_ADDR_LO_VALID | SG_RX_BUF_ADDR_HI_VALID | SG_RX_BUF_LEN_VALID, wSgRst, wTxnErr | wPackedSgRxDone | wSgRxFlush | wSgRxFlushed, TXN_OFF_LAST_VALID | TXN_LEN_VALID}), .DATA({ wPackedSgRxErr, // 1 wPackedSgRxDone, // 1 wPackedSgRxWen, // 1 wPackedSgRxData, // 64 SG_RX_ERR, // 1 SG_RX_DONE, // 1 SG_RX_DATA_EN, // 2 SG_RX_DATA, // 64 wSgRxDataRen, // 1 wSgRxDataEmpty, // 1 wSgRxData, // 64 wSgRst, // 1 SG_RST, // 1 wPackedSgRxDone, // 1 wSgRxRst, // 1 wSgRxFlushed, // 1 wSgRxFlush, // 1 SG_RX_BUF_ADDR_LO_VALID, // 1 SG_RX_BUF_ADDR_HI_VALID, // 1 SG_RX_BUF_LEN_VALID, // 1 SG_RX_BUF_DATA, // 32 RX_REQ_ADDR, // 64 RX_REQ_TAG, // 2 RX_REQ_ACK, // 1 RX_REQ, // 1 wSgTxReqProc, // 1 wSgTxReqAddr, // 64 wSgTxReq, // 1 wSgRxReqProc, // 1 wSgRxReqAddr, // 64 wSgRxReq, // 1 wMainReqProc, // 1 wMainReqAddr, // 64 wMainReq, // 1 wReqAck, // 1 wTxnErr, // 1 TXN_OFF_LAST_VALID, // 1 TXN_LEN_VALID}) // 1 ); */ endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : round_robin_arb.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** // A simple round robin arbiter implemented in a not so simple // way. Two things make this special. First, it takes width as // a parameter and secondly it's constructed in a way to work with // restrictions synthesis programs. // // Consider each req/grant pair to be a // "channel". The arbiter computes a grant response to a request // on a channel by channel basis. // // The arbiter implementes a "round robin" algorithm. Ie, the granting // process is totally fair and symmetric. Each requester is given // equal priority. If all requests are asserted, the arbiter will // work sequentially around the list of requesters, giving each a grant. // // Grant priority is based on the "last_master". The last_master // vector stores the channel receiving the most recent grant. The // next higher numbered channel (wrapping around to zero) has highest // priority in subsequent cycles. Relative priority wraps around // the request vector with the last_master channel having lowest priority. // // At the highest implementation level, a per channel inhibit signal is computed. // This inhibit is bit-wise AND'ed with the incoming requests to // generate the grant. // // There will be at most a single grant per state. The logic // of the arbiter depends on this. // // Once a grant is given, it is stored as the last_master. The // last_master vector is initialized at reset to the zero'th channel. // Although the particular channel doesn't matter, it does matter // that the last_master contains a valid grant pattern. // // The heavy lifting is in computing the per channel inhibit signals. // This is accomplished in the generate statement. // // The first "for" loop in the generate statement steps through the channels. // // The second "for" loop steps through the last mast_master vector // for each channel. For each last_master bit, an inh_group is generated. // Following the end of the second "for" loop, the inh_group signals are OR'ed // together to generate the overall inhibit bit for the channel. // // For a four bit wide arbiter, this is what's generated for channel zero: // // inh_group[1] = last_master[0] && |req[3:1]; // any other req inhibits // inh_group[2] = last_master[1] && |req[3:2]; // req[3], or req[2] inhibit // inh_group[3] = last_master[2] && |req[3:3]; // only req[3] inhibits // // For req[0], last_master[3] is ignored because channel zero is highest priority // if last_master[3] is true. // `timescale 1ps/1ps module mig_7series_v1_9_round_robin_arb #( parameter TCQ = 100, parameter WIDTH = 3 ) ( /*AUTOARG*/ // Outputs grant_ns, grant_r, // Inputs clk, rst, req, disable_grant, current_master, upd_last_master ); input clk; input rst; input [WIDTH-1:0] req; wire [WIDTH-1:0] last_master_ns; reg [WIDTH*2-1:0] dbl_last_master_ns; always @(/*AS*/last_master_ns) dbl_last_master_ns = {last_master_ns, last_master_ns}; reg [WIDTH*2-1:0] dbl_req; always @(/*AS*/req) dbl_req = {req, req}; reg [WIDTH-1:0] inhibit = {WIDTH{1'b0}}; genvar i; genvar j; generate for (i = 0; i < WIDTH; i = i + 1) begin : channel wire [WIDTH-1:1] inh_group; for (j = 0; j < (WIDTH-1); j = j + 1) begin : last_master assign inh_group[j+1] = dbl_last_master_ns[i+j] && |dbl_req[i+WIDTH-1:i+j+1]; end always @(/*AS*/inh_group) inhibit[i] = |inh_group; end endgenerate input disable_grant; output wire [WIDTH-1:0] grant_ns; assign grant_ns = req & ~inhibit & {WIDTH{~disable_grant}}; output reg [WIDTH-1:0] grant_r; always @(posedge clk) grant_r <= #TCQ grant_ns; input [WIDTH-1:0] current_master; input upd_last_master; reg [WIDTH-1:0] last_master_r; localparam ONE = 1 << (WIDTH - 1); //Changed form '1' to fix the CR #544024 //A '1' in the LSB of the last_master_r //signal gives a low priority to req[0] //after reset. To avoid this made MSB as //'1' at reset. assign last_master_ns = rst ? ONE[0+:WIDTH] : upd_last_master ? current_master : last_master_r; always @(posedge clk) last_master_r <= #TCQ last_master_ns; `ifdef MC_SVA grant_is_one_hot_zero: assert property (@(posedge clk) (rst || $onehot0(grant_ns))); last_master_r_is_one_hot: assert property (@(posedge clk) (rst || $onehot(last_master_r))); `endif endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : round_robin_arb.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** // A simple round robin arbiter implemented in a not so simple // way. Two things make this special. First, it takes width as // a parameter and secondly it's constructed in a way to work with // restrictions synthesis programs. // // Consider each req/grant pair to be a // "channel". The arbiter computes a grant response to a request // on a channel by channel basis. // // The arbiter implementes a "round robin" algorithm. Ie, the granting // process is totally fair and symmetric. Each requester is given // equal priority. If all requests are asserted, the arbiter will // work sequentially around the list of requesters, giving each a grant. // // Grant priority is based on the "last_master". The last_master // vector stores the channel receiving the most recent grant. The // next higher numbered channel (wrapping around to zero) has highest // priority in subsequent cycles. Relative priority wraps around // the request vector with the last_master channel having lowest priority. // // At the highest implementation level, a per channel inhibit signal is computed. // This inhibit is bit-wise AND'ed with the incoming requests to // generate the grant. // // There will be at most a single grant per state. The logic // of the arbiter depends on this. // // Once a grant is given, it is stored as the last_master. The // last_master vector is initialized at reset to the zero'th channel. // Although the particular channel doesn't matter, it does matter // that the last_master contains a valid grant pattern. // // The heavy lifting is in computing the per channel inhibit signals. // This is accomplished in the generate statement. // // The first "for" loop in the generate statement steps through the channels. // // The second "for" loop steps through the last mast_master vector // for each channel. For each last_master bit, an inh_group is generated. // Following the end of the second "for" loop, the inh_group signals are OR'ed // together to generate the overall inhibit bit for the channel. // // For a four bit wide arbiter, this is what's generated for channel zero: // // inh_group[1] = last_master[0] && |req[3:1]; // any other req inhibits // inh_group[2] = last_master[1] && |req[3:2]; // req[3], or req[2] inhibit // inh_group[3] = last_master[2] && |req[3:3]; // only req[3] inhibits // // For req[0], last_master[3] is ignored because channel zero is highest priority // if last_master[3] is true. // `timescale 1ps/1ps module mig_7series_v1_9_round_robin_arb #( parameter TCQ = 100, parameter WIDTH = 3 ) ( /*AUTOARG*/ // Outputs grant_ns, grant_r, // Inputs clk, rst, req, disable_grant, current_master, upd_last_master ); input clk; input rst; input [WIDTH-1:0] req; wire [WIDTH-1:0] last_master_ns; reg [WIDTH*2-1:0] dbl_last_master_ns; always @(/*AS*/last_master_ns) dbl_last_master_ns = {last_master_ns, last_master_ns}; reg [WIDTH*2-1:0] dbl_req; always @(/*AS*/req) dbl_req = {req, req}; reg [WIDTH-1:0] inhibit = {WIDTH{1'b0}}; genvar i; genvar j; generate for (i = 0; i < WIDTH; i = i + 1) begin : channel wire [WIDTH-1:1] inh_group; for (j = 0; j < (WIDTH-1); j = j + 1) begin : last_master assign inh_group[j+1] = dbl_last_master_ns[i+j] && |dbl_req[i+WIDTH-1:i+j+1]; end always @(/*AS*/inh_group) inhibit[i] = |inh_group; end endgenerate input disable_grant; output wire [WIDTH-1:0] grant_ns; assign grant_ns = req & ~inhibit & {WIDTH{~disable_grant}}; output reg [WIDTH-1:0] grant_r; always @(posedge clk) grant_r <= #TCQ grant_ns; input [WIDTH-1:0] current_master; input upd_last_master; reg [WIDTH-1:0] last_master_r; localparam ONE = 1 << (WIDTH - 1); //Changed form '1' to fix the CR #544024 //A '1' in the LSB of the last_master_r //signal gives a low priority to req[0] //after reset. To avoid this made MSB as //'1' at reset. assign last_master_ns = rst ? ONE[0+:WIDTH] : upd_last_master ? current_master : last_master_r; always @(posedge clk) last_master_r <= #TCQ last_master_ns; `ifdef MC_SVA grant_is_one_hot_zero: assert property (@(posedge clk) (rst || $onehot0(grant_ns))); last_master_r_is_one_hot: assert property (@(posedge clk) (rst || $onehot(last_master_r))); `endif endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : arb_select.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** // Based on granta_r and grantc_r, this module selects a // row and column command from the request information // provided by the bank machines. // // Depending on address mode configuration, nCL and nCWL, a column // command pipeline of up to three states will be created. `timescale 1 ps / 1 ps module mig_7series_v1_9_arb_select # ( parameter TCQ = 100, parameter EVEN_CWL_2T_MODE = "OFF", parameter ADDR_CMD_MODE = "1T", parameter BANK_VECT_INDX = 11, parameter BANK_WIDTH = 3, parameter BURST_MODE = "8", parameter CS_WIDTH = 4, parameter CL = 5, parameter CWL = 5, parameter DATA_BUF_ADDR_VECT_INDX = 31, parameter DATA_BUF_ADDR_WIDTH = 8, parameter DRAM_TYPE = "DDR3", parameter EARLY_WR_DATA_ADDR = "OFF", parameter ECC = "OFF", parameter nBANK_MACHS = 4, parameter nCK_PER_CLK = 2, parameter nCS_PER_RANK = 1, parameter CKE_ODT_AUX = "FALSE", parameter nSLOTS = 2, parameter RANKS = 1, parameter RANK_VECT_INDX = 15, parameter RANK_WIDTH = 2, parameter ROW_VECT_INDX = 63, parameter ROW_WIDTH = 16, parameter RTT_NOM = "40", parameter RTT_WR = "120", parameter SLOT_0_CONFIG = 8'b0000_0101, parameter SLOT_1_CONFIG = 8'b0000_1010 ) ( // Outputs output wire col_periodic_rd, output wire [RANK_WIDTH-1:0] col_ra, output wire [BANK_WIDTH-1:0] col_ba, output wire [ROW_WIDTH-1:0] col_a, output wire col_rmw, output wire col_rd_wr, output wire col_size, output wire [ROW_WIDTH-1:0] col_row, output wire [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr, output wire [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr, output wire [nCK_PER_CLK-1:0] mc_ras_n, output wire [nCK_PER_CLK-1:0] mc_cas_n, output wire [nCK_PER_CLK-1:0] mc_we_n, output wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address, output wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank, output wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n, output wire [1:0] mc_odt, output wire [nCK_PER_CLK-1:0] mc_cke, output wire [3:0] mc_aux_out0, output wire [3:0] mc_aux_out1, output [2:0] mc_cmd, output wire [5:0] mc_data_offset, output wire [5:0] mc_data_offset_1, output wire [5:0] mc_data_offset_2, output wire [1:0] mc_cas_slot, output wire [RANK_WIDTH-1:0] rnk_config, // Inputs input clk, input rst, input init_calib_complete, input [RANK_VECT_INDX:0] req_rank_r, input [BANK_VECT_INDX:0] req_bank_r, input [nBANK_MACHS-1:0] req_ras, input [nBANK_MACHS-1:0] req_cas, input [nBANK_MACHS-1:0] req_wr_r, input [nBANK_MACHS-1:0] grant_row_r, input [nBANK_MACHS-1:0] grant_pre_r, input [ROW_VECT_INDX:0] row_addr, input [nBANK_MACHS-1:0] row_cmd_wr, input insert_maint_r1, input maint_zq_r, input maint_sre_r, input maint_srx_r, input [RANK_WIDTH-1:0] maint_rank_r, input [nBANK_MACHS-1:0] req_periodic_rd_r, input [nBANK_MACHS-1:0] req_size_r, input [nBANK_MACHS-1:0] rd_wr_r, input [ROW_VECT_INDX:0] req_row_r, input [ROW_VECT_INDX:0] col_addr, input [DATA_BUF_ADDR_VECT_INDX:0] req_data_buf_addr_r, input [nBANK_MACHS-1:0] grant_col_r, input [nBANK_MACHS-1:0] grant_col_wr, input [6*RANKS-1:0] calib_rddata_offset, input [6*RANKS-1:0] calib_rddata_offset_1, input [6*RANKS-1:0] calib_rddata_offset_2, input [5:0] col_channel_offset, input [nBANK_MACHS-1:0] grant_config_r, input rnk_config_strobe, input [7:0] slot_0_present, input [7:0] slot_1_present, input send_cmd0_row, input send_cmd0_col, input send_cmd1_row, input send_cmd1_col, input send_cmd2_row, input send_cmd2_col, input send_cmd2_pre, input send_cmd3_col, input sent_col, input cs_en0, input cs_en1, input cs_en2, input cs_en3 ); localparam OUT_CMD_WIDTH = RANK_WIDTH + BANK_WIDTH + ROW_WIDTH + 1 + 1 + 1; reg col_rd_wr_ns; reg col_rd_wr_r = 1'b0; reg [OUT_CMD_WIDTH-1:0] col_cmd_r = {OUT_CMD_WIDTH {1'b0}}; reg [OUT_CMD_WIDTH-1:0] row_cmd_r = {OUT_CMD_WIDTH {1'b0}}; // calib_rd_data_offset for currently targeted rank reg [5:0] rank_rddata_offset_0; reg [5:0] rank_rddata_offset_1; reg [5:0] rank_rddata_offset_2; // Toggle CKE[0] when entering and exiting self-refresh, disable CKE[1] assign mc_aux_out0[0] = (maint_sre_r || maint_srx_r) & insert_maint_r1; assign mc_aux_out0[2] = 1'b0; reg cke_r; reg cke_ns; generate if(CKE_ODT_AUX == "FALSE")begin always @(posedge clk) begin if (rst) cke_r = 1'b1; else cke_r = cke_ns; end always @(*) begin cke_ns = 1'b1; if (maint_sre_r & insert_maint_r1) cke_ns = 1'b0; else if (cke_r==1'b0) begin if (maint_srx_r & insert_maint_r1) cke_ns = 1'b1; else cke_ns = 1'b0; end end end endgenerate // Disable ODT & CKE toggle enable high bits assign mc_aux_out1 = 4'b0; // implement PHY command word assign mc_cmd[0] = sent_col; assign mc_cmd[1] = EVEN_CWL_2T_MODE == "ON" ? sent_col && col_rd_wr_r : sent_col && col_rd_wr_ns; assign mc_cmd[2] = ~sent_col; // generate calib_rd_data_offset for current rank - only use rank 0 values for now always @(calib_rddata_offset or calib_rddata_offset_1 or calib_rddata_offset_2) begin rank_rddata_offset_0 = calib_rddata_offset[5:0]; rank_rddata_offset_1 = calib_rddata_offset_1[5:0]; rank_rddata_offset_2 = calib_rddata_offset_2[5:0]; end // generate data offset generate if(EVEN_CWL_2T_MODE == "ON") begin : gen_mc_data_offset_even_cwl_2t assign mc_data_offset = ~sent_col ? 6'b0 : col_rd_wr_r ? rank_rddata_offset_0 + col_channel_offset : nCK_PER_CLK == 2 ? CWL - 2 + col_channel_offset : // nCK_PER_CLK == 4 CWL + 2 + col_channel_offset; assign mc_data_offset_1 = ~sent_col ? 6'b0 : col_rd_wr_r ? rank_rddata_offset_1 + col_channel_offset : nCK_PER_CLK == 2 ? CWL - 2 + col_channel_offset : // nCK_PER_CLK == 4 CWL + 2 + col_channel_offset; assign mc_data_offset_2 = ~sent_col ? 6'b0 : col_rd_wr_r ? rank_rddata_offset_2 + col_channel_offset : nCK_PER_CLK == 2 ? CWL - 2 + col_channel_offset : // nCK_PER_CLK == 4 CWL + 2 + col_channel_offset; end else begin : gen_mc_data_offset_not_even_cwl_2t assign mc_data_offset = ~sent_col ? 6'b0 : col_rd_wr_ns ? rank_rddata_offset_0 + col_channel_offset : nCK_PER_CLK == 2 ? CWL - 2 + col_channel_offset : // nCK_PER_CLK == 4 CWL + 2 + col_channel_offset; assign mc_data_offset_1 = ~sent_col ? 6'b0 : col_rd_wr_ns ? rank_rddata_offset_1 + col_channel_offset : nCK_PER_CLK == 2 ? CWL - 2 + col_channel_offset : // nCK_PER_CLK == 4 CWL + 2 + col_channel_offset; assign mc_data_offset_2 = ~sent_col ? 6'b0 : col_rd_wr_ns ? rank_rddata_offset_2 + col_channel_offset : nCK_PER_CLK == 2 ? CWL - 2 + col_channel_offset : // nCK_PER_CLK == 4 CWL + 2 + col_channel_offset; end endgenerate assign mc_cas_slot = col_channel_offset[1:0]; // Based on arbitration results, select the row and column commands. integer i; reg [OUT_CMD_WIDTH-1:0] row_cmd_ns; generate begin : row_mux wire [OUT_CMD_WIDTH-1:0] maint_cmd = {maint_rank_r, // maintenance rank row_cmd_r[15+:(BANK_WIDTH+ROW_WIDTH-11)], // bank plus upper address bits 1'b0, // A10 = 0 for ZQCS row_cmd_r[3+:10], // address bits [9:0] // ZQ, SRX or SRE/REFRESH (maint_zq_r ? 3'b110 : maint_srx_r ? 3'b111 : 3'b001) }; always @(/*AS*/grant_row_r or insert_maint_r1 or maint_cmd or req_bank_r or req_cas or req_rank_r or req_ras or row_addr or row_cmd_r or row_cmd_wr or rst) begin row_cmd_ns = rst ? {RANK_WIDTH{1'b0}} : insert_maint_r1 ? maint_cmd : row_cmd_r; for (i=0; i<nBANK_MACHS; i=i+1) if (grant_row_r[i]) row_cmd_ns = {req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH], req_bank_r[(BANK_WIDTH*i)+:BANK_WIDTH], row_addr[(ROW_WIDTH*i)+:ROW_WIDTH], req_ras[i], req_cas[i], row_cmd_wr[i]}; end if (ADDR_CMD_MODE == "2T" && nCK_PER_CLK == 2) always @(posedge clk) row_cmd_r <= #TCQ row_cmd_ns; end // row_mux endgenerate reg [OUT_CMD_WIDTH-1:0] pre_cmd_ns; generate if((nCK_PER_CLK == 4) && (ADDR_CMD_MODE != "2T")) begin : pre_mux reg [OUT_CMD_WIDTH-1:0] pre_cmd_r = {OUT_CMD_WIDTH {1'b0}}; always @(/*AS*/grant_pre_r or req_bank_r or req_cas or req_rank_r or req_ras or row_addr or pre_cmd_r or row_cmd_wr or rst) begin pre_cmd_ns = rst ? {RANK_WIDTH{1'b0}} : pre_cmd_r; for (i=0; i<nBANK_MACHS; i=i+1) if (grant_pre_r[i]) pre_cmd_ns = {req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH], req_bank_r[(BANK_WIDTH*i)+:BANK_WIDTH], row_addr[(ROW_WIDTH*i)+:ROW_WIDTH], req_ras[i], req_cas[i], row_cmd_wr[i]}; end end // pre_mux endgenerate reg [OUT_CMD_WIDTH-1:0] col_cmd_ns; generate begin : col_mux reg col_periodic_rd_ns; reg col_periodic_rd_r; reg col_rmw_ns; reg col_rmw_r; reg col_size_ns; reg col_size_r; reg [ROW_WIDTH-1:0] col_row_ns; reg [ROW_WIDTH-1:0] col_row_r; reg [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr_ns; reg [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr_r; always @(col_addr or col_cmd_r or col_data_buf_addr_r or col_periodic_rd_r or col_rmw_r or col_row_r or col_size_r or grant_col_r or rd_wr_r or req_bank_r or req_data_buf_addr_r or req_periodic_rd_r or req_rank_r or req_row_r or req_size_r or req_wr_r or rst or col_rd_wr_r) begin col_periodic_rd_ns = ~rst && col_periodic_rd_r; col_cmd_ns = {(rst ? {RANK_WIDTH{1'b0}} : col_cmd_r[(OUT_CMD_WIDTH-1)-:RANK_WIDTH]), ((rst && ECC != "OFF") ? {OUT_CMD_WIDTH-3-RANK_WIDTH{1'b0}} : col_cmd_r[3+:(OUT_CMD_WIDTH-3-RANK_WIDTH)]), (rst ? 3'b0 : col_cmd_r[2:0])}; col_rmw_ns = col_rmw_r; col_size_ns = rst ? 1'b0 : col_size_r; col_row_ns = col_row_r; col_rd_wr_ns = col_rd_wr_r; col_data_buf_addr_ns = col_data_buf_addr_r; for (i=0; i<nBANK_MACHS; i=i+1) if (grant_col_r[i]) begin col_periodic_rd_ns = req_periodic_rd_r[i]; col_cmd_ns = {req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH], req_bank_r[(BANK_WIDTH*i)+:BANK_WIDTH], col_addr[(ROW_WIDTH*i)+:ROW_WIDTH], 1'b1, 1'b0, rd_wr_r[i]}; col_rmw_ns = req_wr_r[i] && rd_wr_r[i]; col_size_ns = req_size_r[i]; col_row_ns = req_row_r[(ROW_WIDTH*i)+:ROW_WIDTH]; col_rd_wr_ns = rd_wr_r[i]; col_data_buf_addr_ns = req_data_buf_addr_r[(DATA_BUF_ADDR_WIDTH*i)+:DATA_BUF_ADDR_WIDTH]; end end // always @ (... if (EARLY_WR_DATA_ADDR == "OFF") begin : early_wr_data_addr_off assign col_wr_data_buf_addr = col_data_buf_addr_ns; end else begin : early_wr_data_addr_on reg [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr_ns; reg [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr_r; always @(/*AS*/col_wr_data_buf_addr_r or grant_col_wr or req_data_buf_addr_r) begin col_wr_data_buf_addr_ns = col_wr_data_buf_addr_r; for (i=0; i<nBANK_MACHS; i=i+1) if (grant_col_wr[i]) col_wr_data_buf_addr_ns = req_data_buf_addr_r[(DATA_BUF_ADDR_WIDTH*i)+:DATA_BUF_ADDR_WIDTH]; end always @(posedge clk) col_wr_data_buf_addr_r <= #TCQ col_wr_data_buf_addr_ns; assign col_wr_data_buf_addr = col_wr_data_buf_addr_ns; end always @(posedge clk) col_periodic_rd_r <= #TCQ col_periodic_rd_ns; always @(posedge clk) col_rmw_r <= #TCQ col_rmw_ns; always @(posedge clk) col_size_r <= #TCQ col_size_ns; always @(posedge clk) col_data_buf_addr_r <= #TCQ col_data_buf_addr_ns; if (ECC != "OFF" || EVEN_CWL_2T_MODE == "ON") begin always @(posedge clk) col_cmd_r <= #TCQ col_cmd_ns; always @(posedge clk) col_row_r <= #TCQ col_row_ns; end always @(posedge clk) col_rd_wr_r <= #TCQ col_rd_wr_ns; if(EVEN_CWL_2T_MODE == "ON") begin assign col_periodic_rd = col_periodic_rd_r; assign col_ra = col_cmd_r[3+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]; assign col_ba = col_cmd_r[3+ROW_WIDTH+:BANK_WIDTH]; assign col_a = col_cmd_r[3+:ROW_WIDTH]; assign col_rmw = col_rmw_r; assign col_rd_wr = col_rd_wr_r; assign col_size = col_size_r; assign col_row = col_row_r; assign col_data_buf_addr = col_data_buf_addr_r; end else begin assign col_periodic_rd = col_periodic_rd_ns; assign col_ra = col_cmd_ns[3+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]; assign col_ba = col_cmd_ns[3+ROW_WIDTH+:BANK_WIDTH]; assign col_a = col_cmd_ns[3+:ROW_WIDTH]; assign col_rmw = col_rmw_ns; assign col_rd_wr = col_rd_wr_ns; assign col_size = col_size_ns; assign col_row = col_row_ns; assign col_data_buf_addr = col_data_buf_addr_ns; end end // col_mux endgenerate reg [OUT_CMD_WIDTH-1:0] cmd0 = {OUT_CMD_WIDTH{1'b1}}; reg cke0; always @(send_cmd0_row or send_cmd0_col or row_cmd_ns or row_cmd_r or col_cmd_ns or col_cmd_r or cke_ns or cke_r ) begin cmd0 = {OUT_CMD_WIDTH{1'b1}}; if (send_cmd0_row) cmd0 = row_cmd_ns; if (send_cmd0_row && EVEN_CWL_2T_MODE == "ON" && nCK_PER_CLK == 2) cmd0 = row_cmd_r; if (send_cmd0_col) cmd0 = col_cmd_ns; if (send_cmd0_col && EVEN_CWL_2T_MODE == "ON") cmd0 = col_cmd_r; if (send_cmd0_row) cke0 = cke_ns; else cke0 = cke_r ; end reg [OUT_CMD_WIDTH-1:0] cmd1 = {OUT_CMD_WIDTH{1'b1}}; generate if ((nCK_PER_CLK == 2) || (nCK_PER_CLK == 4)) always @(send_cmd1_row or send_cmd1_col or row_cmd_ns or col_cmd_ns or pre_cmd_ns) begin cmd1 = {OUT_CMD_WIDTH{1'b1}}; if (send_cmd1_row) cmd1 = row_cmd_ns; if (send_cmd1_col) cmd1 = col_cmd_ns; end endgenerate reg [OUT_CMD_WIDTH-1:0] cmd2 = {OUT_CMD_WIDTH{1'b1}}; reg [OUT_CMD_WIDTH-1:0] cmd3 = {OUT_CMD_WIDTH{1'b1}}; generate if (nCK_PER_CLK == 4) always @(send_cmd2_row or send_cmd2_col or send_cmd2_pre or send_cmd3_col or row_cmd_ns or col_cmd_ns or pre_cmd_ns) begin cmd2 = {OUT_CMD_WIDTH{1'b1}}; cmd3 = {OUT_CMD_WIDTH{1'b1}}; if (send_cmd2_row) cmd2 = row_cmd_ns; if (send_cmd2_col) cmd2 = col_cmd_ns; if (send_cmd2_pre) cmd2 = pre_cmd_ns; if (send_cmd3_col) cmd3 = col_cmd_ns; end endgenerate // Output command bus 0. wire [RANK_WIDTH-1:0] ra0; // assign address assign {ra0, mc_bank[BANK_WIDTH-1:0], mc_address[ROW_WIDTH-1:0], mc_ras_n[0], mc_cas_n[0], mc_we_n[0]} = cmd0; // Output command bus 1. wire [RANK_WIDTH-1:0] ra1; // assign address assign {ra1, mc_bank[2*BANK_WIDTH-1:BANK_WIDTH], mc_address[2*ROW_WIDTH-1:ROW_WIDTH], mc_ras_n[1], mc_cas_n[1], mc_we_n[1]} = cmd1; wire [RANK_WIDTH-1:0] ra2; wire [RANK_WIDTH-1:0] ra3; generate if(nCK_PER_CLK == 4) begin // Output command bus 2. // assign address assign {ra2, mc_bank[3*BANK_WIDTH-1:2*BANK_WIDTH], mc_address[3*ROW_WIDTH-1:2*ROW_WIDTH], mc_ras_n[2], mc_cas_n[2], mc_we_n[2]} = cmd2; // Output command bus 3. // assign address assign {ra3, mc_bank[4*BANK_WIDTH-1:3*BANK_WIDTH], mc_address[4*ROW_WIDTH-1:3*ROW_WIDTH], mc_ras_n[3], mc_cas_n[3], mc_we_n[3]} = cmd3; end endgenerate generate if(CKE_ODT_AUX == "FALSE")begin assign mc_cke[0] = cke0; assign mc_cke[1] = cke_ns; if(nCK_PER_CLK == 4) begin assign mc_cke[2] = cke_ns; assign mc_cke[3] = cke_ns; end end endgenerate // Output cs busses. localparam ONE = {nCS_PER_RANK{1'b1}}; wire [(CS_WIDTH*nCS_PER_RANK)-1:0] cs_one_hot = {{CS_WIDTH{1'b0}},ONE}; assign mc_cs_n[CS_WIDTH*nCS_PER_RANK -1 :0 ] = {(~(cs_one_hot << (nCS_PER_RANK*ra0)) | {CS_WIDTH*nCS_PER_RANK{~cs_en0}})}; assign mc_cs_n[2*CS_WIDTH*nCS_PER_RANK -1 : CS_WIDTH*nCS_PER_RANK ] = {(~(cs_one_hot << (nCS_PER_RANK*ra1)) | {CS_WIDTH*nCS_PER_RANK{~cs_en1}})}; generate if(nCK_PER_CLK == 4) begin assign mc_cs_n[3*CS_WIDTH*nCS_PER_RANK -1 :2*CS_WIDTH*nCS_PER_RANK ] = {(~(cs_one_hot << (nCS_PER_RANK*ra2)) | {CS_WIDTH*nCS_PER_RANK{~cs_en2}})}; assign mc_cs_n[4*CS_WIDTH*nCS_PER_RANK -1 :3*CS_WIDTH*nCS_PER_RANK ] = {(~(cs_one_hot << (nCS_PER_RANK*ra3)) | {CS_WIDTH*nCS_PER_RANK{~cs_en3}})}; end endgenerate // Output rnk_config info. reg [RANK_WIDTH-1:0] rnk_config_ns; reg [RANK_WIDTH-1:0] rnk_config_r; always @(/*AS*/grant_config_r or rnk_config_r or rnk_config_strobe or req_rank_r or rst) begin if (rst) rnk_config_ns = {RANK_WIDTH{1'b0}}; else begin rnk_config_ns = rnk_config_r; if (rnk_config_strobe) for (i=0; i<nBANK_MACHS; i=i+1) if (grant_config_r[i]) rnk_config_ns = req_rank_r[(RANK_WIDTH*i)+:RANK_WIDTH]; end end always @(posedge clk) rnk_config_r <= #TCQ rnk_config_ns; assign rnk_config = rnk_config_ns; // Generate ODT signals. wire [CS_WIDTH-1:0] col_ra_one_hot = cs_one_hot << col_ra; wire slot_0_select = (nSLOTS == 1) ? |(col_ra_one_hot & slot_0_present) : (slot_0_present[2] & slot_0_present[0]) ? |(col_ra_one_hot[CS_WIDTH-1:0] & {slot_0_present[2], slot_0_present[0]}) : (slot_0_present[0])? col_ra_one_hot[0] : 1'b0; wire slot_0_read = EVEN_CWL_2T_MODE == "ON" ? slot_0_select && col_rd_wr_r : slot_0_select && col_rd_wr_ns; wire slot_0_write = EVEN_CWL_2T_MODE == "ON" ? slot_0_select && ~col_rd_wr_r : slot_0_select && ~col_rd_wr_ns; reg [1:0] slot_1_population = 2'b0; reg[1:0] slot_0_population; always @(/*AS*/slot_0_present) begin slot_0_population = 2'b0; for (i=0; i<8; i=i+1) if (~slot_0_population[1]) if (slot_0_present[i] == 1'b1) slot_0_population = slot_0_population + 2'b1; end // ODT on in slot 0 for writes to slot 0 (and R/W to slot 1 for DDR3) wire slot_0_odt = (DRAM_TYPE == "DDR3") ? ~slot_0_read : slot_0_write; assign mc_aux_out0[1] = slot_0_odt & sent_col; // Only send for COL cmds generate if (nSLOTS > 1) begin : slot_1_configured wire slot_1_select = (slot_1_present[3] & slot_1_present[1])? |({col_ra_one_hot[slot_0_population+1], col_ra_one_hot[slot_0_population]}) : (slot_1_present[1]) ? col_ra_one_hot[slot_0_population] :1'b0; wire slot_1_read = EVEN_CWL_2T_MODE == "ON" ? slot_1_select && col_rd_wr_r : slot_1_select && col_rd_wr_ns; wire slot_1_write = EVEN_CWL_2T_MODE == "ON" ? slot_1_select && ~col_rd_wr_r : slot_1_select && ~col_rd_wr_ns; // ODT on in slot 1 for writes to slot 1 (and R/W to slot 0 for DDR3) wire slot_1_odt = (DRAM_TYPE == "DDR3") ? ~slot_1_read : slot_1_write; assign mc_aux_out0[3] = slot_1_odt & sent_col; // Only send for COL cmds end // if (nSLOTS > 1) else begin // Disable slot 1 ODT when not present assign mc_aux_out0[3] = 1'b0; end // else: !if(nSLOTS > 1) endgenerate generate if(CKE_ODT_AUX == "FALSE")begin reg[1:0] mc_aux_out_r ; reg[1:0] mc_aux_out_r_1 ; reg[1:0] mc_aux_out_r_2 ; always@(posedge clk) begin mc_aux_out_r[0] <= #TCQ mc_aux_out0[1] ; mc_aux_out_r[1] <= #TCQ mc_aux_out0[3] ; mc_aux_out_r_1 <= #TCQ mc_aux_out_r ; mc_aux_out_r_2 <= #TCQ mc_aux_out_r_1 ; end if((nCK_PER_CLK == 4) && (nSLOTS > 1 )) begin:odt_high_time_4_1_dslot assign mc_odt[0] = mc_aux_out0[1] | mc_aux_out_r[0] | mc_aux_out_r_1[0]; assign mc_odt[1] = mc_aux_out0[3] | mc_aux_out_r[1] | mc_aux_out_r_1[1]; end else if(nCK_PER_CLK == 4) begin:odt_high_time_4_1 assign mc_odt[0] = mc_aux_out0[1] | mc_aux_out_r[0] ; assign mc_odt[1] = mc_aux_out0[3] | mc_aux_out_r[1] ; end else if(nCK_PER_CLK == 2) begin:odt_high_time_2_1 assign mc_odt[0] = mc_aux_out0[1] | mc_aux_out_r[0] | mc_aux_out_r_1[0] | mc_aux_out_r_2[0] ; assign mc_odt[1] = mc_aux_out0[3] | mc_aux_out_r[1] | mc_aux_out_r_1[1] | mc_aux_out_r_2[1] ; end end endgenerate endmodule
//***************************************************************************** // (c) Copyright 2009 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor: Xilinx // \ \ \/ Version: %version // \ \ Application: MIG // / / Filename: ddr_phy_oclkdelay_cal.v // /___/ /\ Date Last Modified: $Date: 2011/02/25 02:07:40 $ // \ \ / \ Date Created: Aug 03 2009 // \___\/\___\ // //Device: 7 Series //Design Name: DDR3 SDRAM //Purpose: Center write DQS in write DQ valid window using Phaser_Out Stage3 // delay //Reference: //Revision History: //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_ddr_phy_oclkdelay_cal # ( parameter TCQ = 100, parameter tCK = 2500, parameter nCK_PER_CLK = 4, parameter DRAM_TYPE = "DDR3", parameter DRAM_WIDTH = 8, parameter DQS_CNT_WIDTH = 3, parameter DQS_WIDTH = 8, parameter DQ_WIDTH = 64, parameter SIM_CAL_OPTION = "NONE", parameter OCAL_EN = "ON" ) ( input clk, input rst, // Start only after PO and PI FINE delay decremented input oclk_init_delay_start, input oclkdelay_calib_start, input [5:0] oclkdelay_init_val, // Detect write valid data edge during OCLKDELAY calib input phy_rddata_en, input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data, // Precharge done status from ddr_phy_init input prech_done, // Write Level signals during OCLKDELAY calibration input [6*DQS_WIDTH-1:0] wl_po_fine_cnt, output reg wrlvl_final, // Inc/dec Phaser_Out fine delay line output reg po_stg3_incdec, output reg po_en_stg3, output reg po_stg23_sel, output reg po_stg23_incdec, output reg po_en_stg23, // Completed initial delay increment output oclk_init_delay_done, output [DQS_CNT_WIDTH:0] oclkdelay_calib_cnt, output reg oclk_prech_req, output reg oclk_calib_resume, output oclkdelay_calib_done, output [255:0] dbg_phy_oclkdelay_cal, output [16*DRAM_WIDTH-1:0] dbg_oclkdelay_rd_data ); // Start with an initial delay of 0 on OCLKDELAY. This is required to // detect two valid data edges when possible. Two edges cannot be // detected if write DQ and DQS are exactly edge aligned at stage3 tap0. localparam TAP_CNT = 0; //(tCK <= 938) ? 13 : //(tCK <= 1072) ? 14 : //(tCK <= 1250) ? 15 : //(tCK <= 1500) ? 16 : 17; localparam WAIT_CNT = 15; // Default set to TRUE because there can be a case where the ocal_rise_right_edge // may not be detected if WRLVL stage2 tap value is large upto 63 and the initial // DQS position is more than 225 degrees localparam MINUS_32 = "TRUE"; localparam [4:0] OCAL_IDLE = 5'h00; localparam [4:0] OCAL_NEW_DQS_WAIT = 5'h01; localparam [4:0] OCAL_STG3_SEL = 5'h02; localparam [4:0] OCAL_STG3_SEL_WAIT = 5'h03; localparam [4:0] OCAL_STG3_EN_WAIT = 5'h04; localparam [4:0] OCAL_STG3_DEC = 5'h05; localparam [4:0] OCAL_STG3_WAIT = 5'h06; localparam [4:0] OCAL_STG3_CALC = 5'h07; localparam [4:0] OCAL_STG3_INC = 5'h08; localparam [4:0] OCAL_STG3_INC_WAIT = 5'h09; localparam [4:0] OCAL_STG2_SEL = 5'h0A; localparam [4:0] OCAL_STG2_WAIT = 5'h0B; localparam [4:0] OCAL_STG2_INC = 5'h0C; localparam [4:0] OCAL_STG2_DEC = 5'h0D; localparam [4:0] OCAL_STG2_DEC_WAIT = 5'h0E; localparam [4:0] OCAL_NEXT_DQS = 5'h0F; localparam [4:0] OCAL_NEW_DQS_READ = 5'h10; localparam [4:0] OCAL_INC_DONE_WAIT = 5'h11; localparam [4:0] OCAL_STG3_DEC_WAIT = 5'h12; localparam [4:0] OCAL_DEC_DONE_WAIT = 5'h13; localparam [4:0] OCAL_DONE = 5'h14; integer i; reg oclk_init_delay_start_r; reg [3:0] count; reg delay_done; reg delay_done_r1; reg delay_done_r2; reg delay_done_r3; reg delay_done_r4; reg [5:0] delay_cnt_r; reg po_stg3_dec; wire [DQ_WIDTH-1:0] rd_data_rise0; wire [DQ_WIDTH-1:0] rd_data_fall0; wire [DQ_WIDTH-1:0] rd_data_rise1; wire [DQ_WIDTH-1:0] rd_data_fall1; wire [DQ_WIDTH-1:0] rd_data_rise2; wire [DQ_WIDTH-1:0] rd_data_fall2; wire [DQ_WIDTH-1:0] rd_data_rise3; wire [DQ_WIDTH-1:0] rd_data_fall3; reg [DQS_CNT_WIDTH:0] cnt_dqs_r; reg [DQS_CNT_WIDTH:0] mux_sel_r; reg [DRAM_WIDTH-1:0] sel_rd_rise0_r; reg [DRAM_WIDTH-1:0] sel_rd_fall0_r; reg [DRAM_WIDTH-1:0] sel_rd_rise1_r; reg [DRAM_WIDTH-1:0] sel_rd_fall1_r; reg [DRAM_WIDTH-1:0] sel_rd_rise2_r; reg [DRAM_WIDTH-1:0] sel_rd_fall2_r; reg [DRAM_WIDTH-1:0] sel_rd_rise3_r; reg [DRAM_WIDTH-1:0] sel_rd_fall3_r; reg [DRAM_WIDTH-1:0] prev_rd_rise0_r; reg [DRAM_WIDTH-1:0] prev_rd_fall0_r; reg [DRAM_WIDTH-1:0] prev_rd_rise1_r; reg [DRAM_WIDTH-1:0] prev_rd_fall1_r; reg [DRAM_WIDTH-1:0] prev_rd_rise2_r; reg [DRAM_WIDTH-1:0] prev_rd_fall2_r; reg [DRAM_WIDTH-1:0] prev_rd_rise3_r; reg [DRAM_WIDTH-1:0] prev_rd_fall3_r; reg rd_active_r; reg rd_active_r1; reg rd_active_r2; reg rd_active_r3; reg rd_active_r4; reg [DRAM_WIDTH-1:0] pat_match_fall0_r; reg pat_match_fall0_and_r; reg [DRAM_WIDTH-1:0] pat_match_fall1_r; reg pat_match_fall1_and_r; reg [DRAM_WIDTH-1:0] pat_match_fall2_r; reg pat_match_fall2_and_r; reg [DRAM_WIDTH-1:0] pat_match_fall3_r; reg pat_match_fall3_and_r; reg [DRAM_WIDTH-1:0] pat_match_rise0_r; reg pat_match_rise0_and_r; reg [DRAM_WIDTH-1:0] pat_match_rise1_r; reg pat_match_rise1_and_r; reg [DRAM_WIDTH-1:0] pat_match_rise2_r; reg pat_match_rise2_and_r; reg [DRAM_WIDTH-1:0] pat_match_rise3_r; reg pat_match_rise3_and_r; reg pat_data_match_r; reg pat_data_match_valid_r; reg pat_data_match_valid_r1; //reg [3:0] stable_stg3_cnt; //reg stable_eye_r; reg [3:0] stable_rise_stg3_cnt; reg stable_rise_eye_r; reg [3:0] stable_fall_stg3_cnt; reg stable_fall_eye_r; reg wait_cnt_en_r; reg [3:0] wait_cnt_r; reg cnt_next_state; reg oclkdelay_calib_start_r; reg [5:0] stg3_tap_cnt; reg [5:0] stg3_incdec_limit; reg stg3_dec2inc; reg [5:0] stg2_tap_cnt; reg [1:0] stg2_inc2_cnt; reg [1:0] stg2_dec2_cnt; reg [5:0] stg2_dec_cnt; reg stg3_dec; reg stg3_dec_r; reg [4:0] ocal_state_r; reg [4:0] ocal_state_r1; reg [5:0] ocal_final_cnt_r; reg ocal_final_cnt_r_calc; reg [5:0] ocal_inc_cnt; reg [5:0] ocal_dec_cnt; reg ocal_stg3_inc_en; reg ocal_rise_edge1_found; reg ocal_rise_edge2_found; reg ocal_rise_edge1_found_timing; reg ocal_rise_edge2_found_timing; reg [5:0] ocal_rise_edge1_taps; reg [5:0] ocal_rise_edge2_taps; reg [5:0] ocal_rise_right_edge; reg ocal_fall_edge1_found; reg ocal_fall_edge2_found; reg [5:0] ocal_fall_edge1_taps; reg [5:0] ocal_fall_edge2_taps; reg [5:0] ocal_final_cnt_r_mux_a; reg [5:0] ocal_final_cnt_r_mux_b; reg [5:0] ocal_final_cnt_r_mux_c; reg [5:0] ocal_final_cnt_r_mux_d; reg ocal_byte_done; reg ocal_wrlvl_done; reg ocal_wrlvl_done_r; (* keep = "true", max_fanout = 10 *) reg ocal_done_r /* synthesis syn_maxfan = 10 */; reg [5:0] ocal_tap_cnt_r[0:DQS_WIDTH-1]; reg prech_done_r; reg rise_win; reg fall_win; // timing registers reg stg3_tap_cnt_eq_oclkdelay_init_val; reg stg3_tap_cnt_eq_0; //reg stg3_tap_cnt_gt_20; reg stg3_tap_cnt_eq_63; reg stg3_tap_cnt_less_oclkdelay_init_val; reg stg3_limit; wire [5:0] wl_po_fine_cnt_w[0:DQS_WIDTH-1]; //************************************************************************** // Debug signals //************************************************************************** genvar dqs_i; generate for (dqs_i=0; dqs_i < DQS_WIDTH; dqs_i = dqs_i + 1) begin: oclkdelay_tap_cnt assign dbg_phy_oclkdelay_cal[6*dqs_i+:6] = ocal_tap_cnt_r[dqs_i][5:0]; end endgenerate assign dbg_phy_oclkdelay_cal[57:54] = cnt_dqs_r; assign dbg_phy_oclkdelay_cal[58] = ocal_rise_edge1_found_timing; assign dbg_phy_oclkdelay_cal[59] = ocal_rise_edge2_found_timing; assign dbg_phy_oclkdelay_cal[65:60] = ocal_rise_edge1_taps; assign dbg_phy_oclkdelay_cal[71:66] = ocal_rise_edge2_taps; assign dbg_phy_oclkdelay_cal[76:72] = ocal_state_r1; assign dbg_phy_oclkdelay_cal[77] = pat_data_match_valid_r; assign dbg_phy_oclkdelay_cal[78] = pat_data_match_r; assign dbg_phy_oclkdelay_cal[84:79] = stg3_tap_cnt; assign dbg_phy_oclkdelay_cal[88:85] = stable_rise_stg3_cnt; assign dbg_phy_oclkdelay_cal[89] = stable_rise_eye_r; assign dbg_phy_oclkdelay_cal[97:90] = prev_rd_rise0_r; assign dbg_phy_oclkdelay_cal[105:98] = prev_rd_fall0_r; assign dbg_phy_oclkdelay_cal[113:106] = prev_rd_rise1_r; assign dbg_phy_oclkdelay_cal[121:114] = prev_rd_fall1_r; assign dbg_phy_oclkdelay_cal[129:122] = prev_rd_rise2_r; assign dbg_phy_oclkdelay_cal[137:130] = prev_rd_fall2_r; assign dbg_phy_oclkdelay_cal[145:138] = prev_rd_rise3_r; assign dbg_phy_oclkdelay_cal[153:146] = prev_rd_fall3_r; assign dbg_phy_oclkdelay_cal[154] = rd_active_r; assign dbg_phy_oclkdelay_cal[162:155] = sel_rd_rise0_r; assign dbg_phy_oclkdelay_cal[170:163] = sel_rd_fall0_r; assign dbg_phy_oclkdelay_cal[178:171] = sel_rd_rise1_r; assign dbg_phy_oclkdelay_cal[186:179] = sel_rd_fall1_r; assign dbg_phy_oclkdelay_cal[194:187] = sel_rd_rise2_r; assign dbg_phy_oclkdelay_cal[202:195] = sel_rd_fall2_r; assign dbg_phy_oclkdelay_cal[210:203] = sel_rd_rise3_r; assign dbg_phy_oclkdelay_cal[218:211] = sel_rd_fall3_r; assign dbg_phy_oclkdelay_cal[219+:6] = stg2_tap_cnt; assign dbg_phy_oclkdelay_cal[225] = ocal_fall_edge1_found; assign dbg_phy_oclkdelay_cal[226] = ocal_fall_edge2_found; assign dbg_phy_oclkdelay_cal[232:227] = ocal_fall_edge1_taps; assign dbg_phy_oclkdelay_cal[238:233] = ocal_fall_edge2_taps; assign dbg_phy_oclkdelay_cal[244:239] = ocal_rise_right_edge; assign dbg_phy_oclkdelay_cal[250:245] = 'd0; assign dbg_phy_oclkdelay_cal[251] = stable_fall_eye_r; assign dbg_phy_oclkdelay_cal[252] = rise_win; assign dbg_phy_oclkdelay_cal[253] = fall_win; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*1 -1:0] = prev_rd_rise0_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*2 -1:DRAM_WIDTH*1] = prev_rd_fall0_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*3 -1:DRAM_WIDTH*2] = prev_rd_rise1_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*4 -1:DRAM_WIDTH*3] = prev_rd_fall1_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*5 -1:DRAM_WIDTH*4] = prev_rd_rise2_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*6 -1:DRAM_WIDTH*5] = prev_rd_fall2_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*7 -1:DRAM_WIDTH*6] = prev_rd_rise3_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*8 -1:DRAM_WIDTH*7] = prev_rd_fall3_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*9 -1:DRAM_WIDTH*8] = sel_rd_rise0_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*10 -1:DRAM_WIDTH*9] = sel_rd_fall0_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*11 -1:DRAM_WIDTH*10] = sel_rd_rise1_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*12 -1:DRAM_WIDTH*11] = sel_rd_fall1_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*13 -1:DRAM_WIDTH*12] = sel_rd_rise2_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*14 -1:DRAM_WIDTH*13] = sel_rd_fall2_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*15 -1:DRAM_WIDTH*14] = sel_rd_rise3_r; assign dbg_oclkdelay_rd_data[DRAM_WIDTH*16 -1:DRAM_WIDTH*15] = sel_rd_fall3_r; assign oclk_init_delay_done = ((SIM_CAL_OPTION == "FAST_CAL") || (DRAM_TYPE!="DDR3")) ? 1'b1 : delay_done_r4; //(SIM_CAL_OPTION != "NONE") assign oclkdelay_calib_cnt = cnt_dqs_r; assign oclkdelay_calib_done = (OCAL_EN == "ON") ? ocal_done_r : 1'b1; always @(posedge clk) oclk_init_delay_start_r <= #TCQ oclk_init_delay_start; always @(posedge clk) begin if (rst || po_stg3_dec) count <= #TCQ WAIT_CNT; else if (oclk_init_delay_start && (count > 'd0)) count <= #TCQ count - 1; end always @(posedge clk) begin if (rst) po_stg3_dec <= #TCQ 1'b0; else if ((count == 'd1) && (delay_cnt_r != 'd0)) po_stg3_dec <= #TCQ 1'b1; else po_stg3_dec <= #TCQ 1'b0; end //po_stg3_incdec and po_en_stg3 asserted for all data byte lanes always @(posedge clk) begin if (rst) begin po_stg3_incdec <= #TCQ 1'b0; po_en_stg3 <= #TCQ 1'b0; end else if (po_stg3_dec) begin po_stg3_incdec <= #TCQ 1'b0; po_en_stg3 <= #TCQ 1'b1; end else begin po_stg3_incdec <= #TCQ 1'b0; po_en_stg3 <= #TCQ 1'b0; end end // delay counter to count TAP_CNT cycles always @(posedge clk) begin // load delay counter with init value of TAP_CNT if (rst) delay_cnt_r <= #TCQ TAP_CNT; else if (po_stg3_dec && (delay_cnt_r > 6'd0)) delay_cnt_r <= #TCQ delay_cnt_r - 1; end // when all the ctl_lanes have their output phase shifted by 1/4 cycle, delay shifting is done. always @(posedge clk) begin if (rst) begin delay_done <= #TCQ 1'b0; end else if ((TAP_CNT == 6'd0) || ((delay_cnt_r == 6'd1) && (count == 'd1))) begin delay_done <= #TCQ 1'b1; end end always @(posedge clk) begin delay_done_r1 <= #TCQ delay_done; delay_done_r2 <= #TCQ delay_done_r1; delay_done_r3 <= #TCQ delay_done_r2; delay_done_r4 <= #TCQ delay_done_r3; end //************************************************************************** // OCLKDELAY Calibration //************************************************************************** generate if (nCK_PER_CLK == 4) begin: gen_rd_data_div4 assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0]; assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH]; assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH]; assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH]; assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH]; assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH]; assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH]; assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH]; end else if (nCK_PER_CLK == 2) begin: gen_rd_data_div2 assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0]; assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH]; assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH]; assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH]; end endgenerate always @(posedge clk) begin mux_sel_r <= #TCQ cnt_dqs_r; oclkdelay_calib_start_r <= #TCQ oclkdelay_calib_start; ocal_wrlvl_done_r <= #TCQ ocal_wrlvl_done; rd_active_r <= #TCQ phy_rddata_en; rd_active_r1 <= #TCQ rd_active_r; rd_active_r2 <= #TCQ rd_active_r1; rd_active_r3 <= #TCQ rd_active_r2; rd_active_r4 <= #TCQ rd_active_r3; stg3_dec_r <= #TCQ stg3_dec; ocal_state_r1 <= #TCQ ocal_state_r; end // Register outputs for improved timing. // All bits in selected DQS group are checked in aggregate generate genvar mux_j; for (mux_j = 0; mux_j < DRAM_WIDTH; mux_j = mux_j + 1) begin: gen_mux_rd always @(posedge clk) begin if (phy_rddata_en) begin sel_rd_rise0_r[mux_j] <= #TCQ rd_data_rise0[DRAM_WIDTH*mux_sel_r + mux_j]; sel_rd_fall0_r[mux_j] <= #TCQ rd_data_fall0[DRAM_WIDTH*mux_sel_r + mux_j]; sel_rd_rise1_r[mux_j] <= #TCQ rd_data_rise1[DRAM_WIDTH*mux_sel_r + mux_j]; sel_rd_fall1_r[mux_j] <= #TCQ rd_data_fall1[DRAM_WIDTH*mux_sel_r + mux_j]; sel_rd_rise2_r[mux_j] <= #TCQ rd_data_rise2[DRAM_WIDTH*mux_sel_r + mux_j]; sel_rd_fall2_r[mux_j] <= #TCQ rd_data_fall2[DRAM_WIDTH*mux_sel_r + mux_j]; sel_rd_rise3_r[mux_j] <= #TCQ rd_data_rise3[DRAM_WIDTH*mux_sel_r + mux_j]; sel_rd_fall3_r[mux_j] <= #TCQ rd_data_fall3[DRAM_WIDTH*mux_sel_r + mux_j]; end end end endgenerate always @(posedge clk) if (((stg3_tap_cnt_eq_oclkdelay_init_val) && rd_active_r) | rd_active_r4) begin prev_rd_rise0_r <= #TCQ sel_rd_rise0_r; prev_rd_fall0_r <= #TCQ sel_rd_fall0_r; prev_rd_rise1_r <= #TCQ sel_rd_rise1_r; prev_rd_fall1_r <= #TCQ sel_rd_fall1_r; prev_rd_rise2_r <= #TCQ sel_rd_rise2_r; prev_rd_fall2_r <= #TCQ sel_rd_fall2_r; prev_rd_rise3_r <= #TCQ sel_rd_rise3_r; prev_rd_fall3_r <= #TCQ sel_rd_fall3_r; end // Each bit of each byte is compared with previous data to // detect an edge generate genvar pt_j; if (nCK_PER_CLK == 4) begin: gen_pat_match_div4 always @(posedge clk) begin if (rd_active_r) begin rise_win <= #TCQ ((|sel_rd_rise0_r) | (|sel_rd_rise1_r) | (|sel_rd_rise2_r) | (|sel_rd_rise3_r)); fall_win <= #TCQ ((&sel_rd_rise0_r) & (&sel_rd_rise1_r) & (&sel_rd_rise2_r) & (&sel_rd_rise3_r)); end end for (pt_j = 0; pt_j < DRAM_WIDTH; pt_j = pt_j + 1) begin: gen_pat_match always @(posedge clk) begin if (sel_rd_rise0_r[pt_j] == prev_rd_rise0_r[pt_j]) pat_match_rise0_r[pt_j] <= #TCQ 1'b1; else pat_match_rise0_r[pt_j] <= #TCQ 1'b0; if (sel_rd_fall0_r[pt_j] == prev_rd_fall0_r[pt_j]) pat_match_fall0_r[pt_j] <= #TCQ 1'b1; else pat_match_fall0_r[pt_j] <= #TCQ 1'b0; if (sel_rd_rise1_r[pt_j] == prev_rd_rise1_r[pt_j]) pat_match_rise1_r[pt_j] <= #TCQ 1'b1; else pat_match_rise1_r[pt_j] <= #TCQ 1'b0; if (sel_rd_fall1_r[pt_j] == prev_rd_fall1_r[pt_j]) pat_match_fall1_r[pt_j] <= #TCQ 1'b1; else pat_match_fall1_r[pt_j] <= #TCQ 1'b0; if (sel_rd_rise2_r[pt_j] == prev_rd_rise2_r[pt_j]) pat_match_rise2_r[pt_j] <= #TCQ 1'b1; else pat_match_rise2_r[pt_j] <= #TCQ 1'b0; if (sel_rd_fall2_r[pt_j] == prev_rd_fall2_r[pt_j]) pat_match_fall2_r[pt_j] <= #TCQ 1'b1; else pat_match_fall2_r[pt_j] <= #TCQ 1'b0; if (sel_rd_rise3_r[pt_j] == prev_rd_rise3_r[pt_j]) pat_match_rise3_r[pt_j] <= #TCQ 1'b1; else pat_match_rise3_r[pt_j] <= #TCQ 1'b0; if (sel_rd_fall3_r[pt_j] == prev_rd_fall3_r[pt_j]) pat_match_fall3_r[pt_j] <= #TCQ 1'b1; else pat_match_fall3_r[pt_j] <= #TCQ 1'b0; end end always @(posedge clk) begin pat_match_rise0_and_r <= #TCQ &pat_match_rise0_r; pat_match_fall0_and_r <= #TCQ &pat_match_fall0_r; pat_match_rise1_and_r <= #TCQ &pat_match_rise1_r; pat_match_fall1_and_r <= #TCQ &pat_match_fall1_r; pat_match_rise2_and_r <= #TCQ &pat_match_rise2_r; pat_match_fall2_and_r <= #TCQ &pat_match_fall2_r; pat_match_rise3_and_r <= #TCQ &pat_match_rise3_r; pat_match_fall3_and_r <= #TCQ &pat_match_fall3_r; pat_data_match_r <= #TCQ (//pat_match_rise0_and_r && //pat_match_fall0_and_r && pat_match_rise1_and_r && pat_match_fall1_and_r && pat_match_rise2_and_r && pat_match_fall2_and_r && pat_match_rise3_and_r && pat_match_fall3_and_r); pat_data_match_valid_r <= #TCQ rd_active_r2; end always @(posedge clk) pat_data_match_valid_r1 <= #TCQ pat_data_match_valid_r; end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2 always @(posedge clk) begin if (rd_active_r) begin rise_win <= #TCQ ((|sel_rd_rise0_r) | (|sel_rd_rise1_r)); fall_win <= #TCQ ((&sel_rd_rise0_r) & (&sel_rd_rise1_r)); end end for (pt_j = 0; pt_j < DRAM_WIDTH; pt_j = pt_j + 1) begin: gen_pat_match always @(posedge clk) begin if (sel_rd_rise0_r[pt_j] == prev_rd_rise0_r[pt_j]) pat_match_rise0_r[pt_j] <= #TCQ 1'b1; else pat_match_rise0_r[pt_j] <= #TCQ 1'b0; if (sel_rd_fall0_r[pt_j] == prev_rd_fall0_r[pt_j]) pat_match_fall0_r[pt_j] <= #TCQ 1'b1; else pat_match_fall0_r[pt_j] <= #TCQ 1'b0; if (sel_rd_rise1_r[pt_j] == prev_rd_rise1_r[pt_j]) pat_match_rise1_r[pt_j] <= #TCQ 1'b1; else pat_match_rise1_r[pt_j] <= #TCQ 1'b0; if (sel_rd_fall1_r[pt_j] == prev_rd_fall1_r[pt_j]) pat_match_fall1_r[pt_j] <= #TCQ 1'b1; else pat_match_fall1_r[pt_j] <= #TCQ 1'b0; end end always @(posedge clk) begin pat_match_rise0_and_r <= #TCQ &pat_match_rise0_r; pat_match_fall0_and_r <= #TCQ &pat_match_fall0_r; pat_match_rise1_and_r <= #TCQ &pat_match_rise1_r; pat_match_fall1_and_r <= #TCQ &pat_match_fall1_r; pat_data_match_r <= #TCQ (//pat_match_rise0_and_r && //pat_match_fall0_and_r && pat_match_rise1_and_r && pat_match_fall1_and_r); pat_data_match_valid_r <= #TCQ rd_active_r2; end always @(posedge clk) pat_data_match_valid_r1 <= #TCQ pat_data_match_valid_r; end endgenerate // Stable count of 16 PO Stage3 taps at 2x the resolution of stage2 taps // Required to inhibit false edge detection due to clock jitter always @(posedge clk)begin if (rst | (pat_data_match_valid_r & ~pat_data_match_r & (ocal_state_r == OCAL_NEW_DQS_WAIT)) | (ocal_state_r == OCAL_STG3_CALC)) stable_rise_stg3_cnt <= #TCQ 'd0; else if ((!stg3_tap_cnt_eq_oclkdelay_init_val) & pat_data_match_valid_r & pat_data_match_r & (ocal_state_r == OCAL_NEW_DQS_WAIT) & (stable_rise_stg3_cnt < 'd8) & ~rise_win) stable_rise_stg3_cnt <= #TCQ stable_rise_stg3_cnt + 1; end always @(posedge clk) begin if (rst | (stable_rise_stg3_cnt != 'd8)) stable_rise_eye_r <= #TCQ 1'b0; else if (stable_rise_stg3_cnt == 'd8) stable_rise_eye_r <= #TCQ 1'b1; end always @(posedge clk)begin if (rst | (pat_data_match_valid_r & ~pat_data_match_r & (ocal_state_r == OCAL_NEW_DQS_WAIT)) | (ocal_state_r == OCAL_STG3_CALC)) stable_fall_stg3_cnt <= #TCQ 'd0; else if ((!stg3_tap_cnt_eq_oclkdelay_init_val) & pat_data_match_valid_r & pat_data_match_r & (ocal_state_r == OCAL_NEW_DQS_WAIT) & (stable_fall_stg3_cnt < 'd8) & fall_win) stable_fall_stg3_cnt <= #TCQ stable_fall_stg3_cnt + 1; end always @(posedge clk) begin if (rst | (stable_fall_stg3_cnt != 'd8)) stable_fall_eye_r <= #TCQ 1'b0; else if (stable_fall_stg3_cnt == 'd8) stable_fall_eye_r <= #TCQ 1'b1; end always @(posedge clk) if ((ocal_state_r == OCAL_STG3_SEL_WAIT) || (ocal_state_r == OCAL_STG3_EN_WAIT) || (ocal_state_r == OCAL_STG3_WAIT) || (ocal_state_r == OCAL_STG3_INC_WAIT) || (ocal_state_r == OCAL_STG3_DEC_WAIT) || (ocal_state_r == OCAL_STG2_WAIT) || (ocal_state_r == OCAL_STG2_DEC_WAIT) || (ocal_state_r == OCAL_INC_DONE_WAIT) || (ocal_state_r == OCAL_DEC_DONE_WAIT)) wait_cnt_en_r <= #TCQ 1'b1; else wait_cnt_en_r <= #TCQ 1'b0; always @(posedge clk) if (!wait_cnt_en_r) begin wait_cnt_r <= #TCQ 'b0; cnt_next_state <= #TCQ 1'b0; end else begin if (wait_cnt_r != WAIT_CNT - 1) begin wait_cnt_r <= #TCQ wait_cnt_r + 1; cnt_next_state <= #TCQ 1'b0; end else begin // Need to reset to 0 to handle the case when there are two // different WAIT states back-to-back wait_cnt_r <= #TCQ 'b0; cnt_next_state <= #TCQ 1'b1; end end always @(posedge clk) begin if (rst) begin for (i=0; i < DQS_WIDTH; i = i + 1) begin: rst_ocal_tap_cnt ocal_tap_cnt_r[i] <= #TCQ 'b0; end end else if (stg3_dec_r && ~stg3_dec) ocal_tap_cnt_r[cnt_dqs_r][5:0] <= #TCQ stg3_tap_cnt; end always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEW_DQS_READ) || (ocal_state_r == OCAL_STG3_CALC) || (ocal_state_r == OCAL_DONE)) prech_done_r <= #TCQ 1'b0; else if (prech_done) prech_done_r <= #TCQ 1'b1; end // setting stg3_tap_cnt == oclkdelay_int_val always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEXT_DQS)) begin stg3_tap_cnt_eq_oclkdelay_init_val <= #TCQ 1'b1; end else begin if (ocal_state_r == OCAL_DONE) stg3_tap_cnt_eq_oclkdelay_init_val <= #TCQ 1'b0; else if (ocal_state_r == OCAL_STG3_DEC) stg3_tap_cnt_eq_oclkdelay_init_val <= #TCQ (stg3_tap_cnt == oclkdelay_init_val+1); else if (ocal_state_r == OCAL_STG3_INC) stg3_tap_cnt_eq_oclkdelay_init_val <= #TCQ (stg3_tap_cnt == oclkdelay_init_val-1); end // else: !if((rst || (ocal_state_r == OCAL_IDLE)) begin... end // always @ (posedge clk) // setting sg3_tap_cng > 20 // always @(posedge clk) begin // if ((rst)|| (ocal_state_r == OCAL_NEXT_DQS)) begin // stg3_tap_cnt_gt_20 <= #TCQ 1'b0; // end else begin // if (rst) // if (ocal_state_r == OCAL_STG3_DEC) // stg3_tap_cnt_gt_20 <= #TCQ (stg3_tap_cnt >= 'd22); // else if (ocal_state_r == OCAL_STG3_INC) // stg3_tap_cnt_gt_20 <= #TCQ (stg3_tap_cnt >= 'd20); // end // else: !if((rst || (ocal_state_r == OCAL_IDLE)) begin... // end // always @ (posedge clk) // setting sg3_tap_cnt == 0 always @(posedge clk) begin if ((rst)|| (ocal_state_r == OCAL_NEXT_DQS) || (ocal_state_r == OCAL_STG3_INC) ) begin stg3_tap_cnt_eq_0 <= #TCQ 1'b0; end else begin // if (rst) if (ocal_state_r == OCAL_STG3_DEC) stg3_tap_cnt_eq_0 <= #TCQ (stg3_tap_cnt == 'd1); end // else: !if((rst || (ocal_state_r == OCAL_IDLE)) begin... end // always @ (posedge clk) // setting sg3_tap_cnt == 63 always @(posedge clk) begin if ((rst)|| (ocal_state_r == OCAL_NEXT_DQS)) begin stg3_tap_cnt_eq_63 <= #TCQ 1'b0; end else begin // if (rst) if (ocal_state_r == OCAL_STG3_INC) stg3_tap_cnt_eq_63 <= #TCQ (stg3_tap_cnt >= 'd62); else if (ocal_state_r == OCAL_STG3_DEC) stg3_tap_cnt_eq_63 <= #TCQ 1'b0; end // else: !if((rst || (ocal_state_r == OCAL_IDLE)) begin... end // always @ (posedge clk) // setting sg3_tap_cnt < ocaldelay_init_val always @(posedge clk) begin if ((rst)|| (ocal_state_r == OCAL_NEXT_DQS)) begin stg3_tap_cnt_less_oclkdelay_init_val <= #TCQ 1'b0; end else begin // if (rst) if (ocal_state_r == OCAL_STG3_DEC) stg3_tap_cnt_less_oclkdelay_init_val <= #TCQ (stg3_tap_cnt <= oclkdelay_init_val); else if (ocal_state_r == OCAL_STG3_INC) stg3_tap_cnt_less_oclkdelay_init_val <= #TCQ (stg3_tap_cnt <= oclkdelay_init_val-2); end // else: !if((rst || (ocal_state_r == OCAL_IDLE)) begin... end // always @ (posedge clk) // setting stg3_incdec_limit == 15 always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEXT_DQS) || (ocal_state_r == OCAL_DONE)) begin stg3_limit <= #TCQ 1'b0; end else if ((ocal_state_r == OCAL_STG3_WAIT) || (ocal_state_r == OCAL_STG2_WAIT)) begin stg3_limit <= #TCQ (stg3_incdec_limit == 'd14); end end // Registers feeding into the ocal_final_cnt_r computation // Equation is in the form of ((A-B)/2) + C + D where the values taken are // A = ocal_fall_edge_taps, ocal_rise_right_edge, stg3_tap_cnt or ocal_fall_edge2_taps // B = ocal_fall_edge1_taps, ocal_rise_edge1_taps or '0' // C = (stg3_tap_cnt - ocal_rise_right_edge), '0' or '1' // D = '32' or '0' always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEXT_DQS) || (ocal_state_r == OCAL_DONE)) ocal_final_cnt_r_mux_a <= #TCQ 'd0; else if (|ocal_rise_right_edge) begin if (ocal_fall_edge2_found && ocal_fall_edge1_found) ocal_final_cnt_r_mux_a <= #TCQ ocal_fall_edge2_taps; else ocal_final_cnt_r_mux_a <= #TCQ ocal_rise_right_edge; end else if (ocal_rise_edge2_found) ocal_final_cnt_r_mux_a <= #TCQ ocal_rise_edge2_taps; else if (~ocal_rise_edge2_found && ocal_rise_edge1_found) ocal_final_cnt_r_mux_a <= #TCQ stg3_tap_cnt; else if (ocal_fall_edge2_found && ocal_fall_edge1_found) ocal_final_cnt_r_mux_a <= #TCQ ocal_fall_edge2_taps; end always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEXT_DQS) || (ocal_state_r == OCAL_DONE)) ocal_final_cnt_r_mux_b <= #TCQ 'd0; else if (|ocal_rise_right_edge) begin if (ocal_fall_edge2_found && ocal_fall_edge1_found) ocal_final_cnt_r_mux_b <= #TCQ ocal_fall_edge1_taps; else ocal_final_cnt_r_mux_b <= #TCQ ocal_rise_edge1_taps; end else if (ocal_rise_edge2_found && ocal_rise_edge1_found) ocal_final_cnt_r_mux_b <= #TCQ ocal_rise_edge1_taps; else if (ocal_rise_edge2_found && ~ocal_rise_edge1_found) ocal_final_cnt_r_mux_b <= #TCQ 'd0; else if (~ocal_rise_edge2_found && ocal_rise_edge1_found) ocal_final_cnt_r_mux_b <= #TCQ ocal_rise_edge1_taps; else if (ocal_fall_edge2_found && ocal_fall_edge1_found) ocal_final_cnt_r_mux_b <= #TCQ ocal_fall_edge1_taps; end always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEXT_DQS) || (ocal_state_r == OCAL_DONE)) ocal_final_cnt_r_mux_c <= #TCQ 'd0; else if (|ocal_rise_right_edge) begin if (ocal_fall_edge2_found && ocal_fall_edge1_found) ocal_final_cnt_r_mux_c <= #TCQ 'd1; else ocal_final_cnt_r_mux_c <= #TCQ (stg3_tap_cnt - ocal_rise_right_edge); end else if (~ocal_rise_edge2_found && ocal_rise_edge1_found) ocal_final_cnt_r_mux_c <= #TCQ 'd0; else ocal_final_cnt_r_mux_c <= #TCQ 'd1; end always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEXT_DQS) || (ocal_state_r == OCAL_DONE)) ocal_final_cnt_r_mux_d <= #TCQ 'd0; else if (((|ocal_rise_right_edge) && (ocal_fall_edge2_found && ocal_fall_edge1_found)) || (ocal_fall_edge2_found && ocal_fall_edge1_found)) ocal_final_cnt_r_mux_d <= #TCQ 'd32; else ocal_final_cnt_r_mux_d <= #TCQ 'd0; end always @(posedge clk) begin if (rst || (ocal_state_r == OCAL_NEXT_DQS) || (ocal_state_r == OCAL_DONE)) ocal_final_cnt_r <= #TCQ 'd0; else if (ocal_state_r == OCAL_STG3_CALC) ocal_final_cnt_r <= #TCQ ((ocal_final_cnt_r_mux_a - ocal_final_cnt_r_mux_b)>>1) + ocal_final_cnt_r_mux_c + ocal_final_cnt_r_mux_d; end genvar dqs_q; generate for (dqs_q=0; dqs_q < DQS_WIDTH; dqs_q = dqs_q + 1) begin: tap_cnt_split assign wl_po_fine_cnt_w[dqs_q] = wl_po_fine_cnt[6*dqs_q+:6]; end endgenerate // State Machine always @(posedge clk) begin if (rst) begin ocal_state_r <= #TCQ OCAL_IDLE; cnt_dqs_r <= #TCQ 'd0; stg3_tap_cnt <= #TCQ oclkdelay_init_val; stg3_incdec_limit <= #TCQ 'd0; stg3_dec2inc <= #TCQ 1'b0; stg2_tap_cnt <= #TCQ 'd0; stg2_inc2_cnt <= #TCQ 2'b00; stg2_dec2_cnt <= #TCQ 2'b00; stg2_dec_cnt <= #TCQ 'd0; stg3_dec <= #TCQ 1'b0; wrlvl_final <= #TCQ 1'b0; oclk_calib_resume <= #TCQ 1'b0; oclk_prech_req <= #TCQ 1'b0; ocal_inc_cnt <= #TCQ 'd0; ocal_dec_cnt <= #TCQ 'd0; ocal_stg3_inc_en <= #TCQ 1'b0; ocal_rise_edge1_found <= #TCQ 1'b0; ocal_rise_edge2_found <= #TCQ 1'b0; ocal_rise_edge1_found_timing <= #TCQ 1'b0; ocal_rise_edge2_found_timing <= #TCQ 1'b0; ocal_rise_right_edge <= #TCQ 'd0; ocal_rise_edge1_taps <= #TCQ 'd0; ocal_rise_edge2_taps <= #TCQ 'd0; ocal_fall_edge1_found <= #TCQ 1'b0; ocal_fall_edge2_found <= #TCQ 1'b0; ocal_fall_edge1_taps <= #TCQ 'd0; ocal_fall_edge2_taps <= #TCQ 'd0; ocal_byte_done <= #TCQ 1'b0; ocal_wrlvl_done <= #TCQ 1'b0; ocal_done_r <= #TCQ 1'b0; po_stg23_sel <= #TCQ 1'b0; po_en_stg23 <= #TCQ 1'b0; po_stg23_incdec <= #TCQ 1'b0; ocal_final_cnt_r_calc <= #TCQ 1'b0; end else begin case (ocal_state_r) OCAL_IDLE: begin if (oclkdelay_calib_start && ~oclkdelay_calib_start_r) begin ocal_state_r <= #TCQ OCAL_NEW_DQS_WAIT; stg3_tap_cnt <= #TCQ oclkdelay_init_val; stg2_tap_cnt <= #TCQ wl_po_fine_cnt_w[cnt_dqs_r]; end end OCAL_NEW_DQS_READ: begin oclk_prech_req <= #TCQ 1'b0; oclk_calib_resume <= #TCQ 1'b0; if (pat_data_match_valid_r) ocal_state_r <= #TCQ OCAL_NEW_DQS_WAIT; end OCAL_NEW_DQS_WAIT: begin oclk_calib_resume <= #TCQ 1'b0; oclk_prech_req <= #TCQ 1'b0; po_en_stg23 <= #TCQ 1'b0; po_stg23_incdec <= #TCQ 1'b0; if (pat_data_match_valid_r && !stg3_tap_cnt_eq_oclkdelay_init_val) begin if ((stg3_limit && ~ocal_stg3_inc_en) || stg3_tap_cnt == 'd0) begin // No write levling performed to avoid stage 2 coarse dec. // Therefore stage 3 taps can only be decremented by an // additional 15 taps after stage 2 taps reach 63. ocal_state_r <= #TCQ OCAL_STG3_SEL; ocal_stg3_inc_en <= #TCQ 1'b1; stg3_incdec_limit <= #TCQ 'd0; // An edge was detected end else if (~pat_data_match_r) begin // Sticky bit - asserted after we encounter an edge, although // the current edge may not be considered the "first edge" this // just means we found at least one edge if (~ocal_stg3_inc_en) begin if (|stable_fall_stg3_cnt && ~ocal_fall_edge1_found) begin ocal_fall_edge1_found <= #TCQ 1'b1; ocal_fall_edge1_taps <= #TCQ stg3_tap_cnt + 1; end else begin ocal_rise_edge1_found <= #TCQ 1'b1; ocal_rise_edge1_found_timing <= #TCQ 1'b1; end end // Sarting point was in the jitter region close to the right edge if (~stable_rise_eye_r && ~ocal_stg3_inc_en) begin ocal_rise_right_edge <= #TCQ stg3_tap_cnt; ocal_state_r <= #TCQ OCAL_STG3_SEL; // Starting point was in the valid window close to the right edge // Or away from the right edge hence no stable_eye_r condition // Or starting point was in the right jitter region and ocal_rise_right_edge // is detected end else if (ocal_stg3_inc_en) begin // Both edges found if (stable_fall_eye_r) begin ocal_state_r <= #TCQ OCAL_STG3_CALC; ocal_fall_edge2_found <= #TCQ 1'b1; ocal_fall_edge2_taps <= #TCQ stg3_tap_cnt - 1; end else begin ocal_state_r <= #TCQ OCAL_STG3_CALC; ocal_rise_edge2_found <= #TCQ 1'b1; ocal_rise_edge2_found_timing <= #TCQ 1'b1; ocal_rise_edge2_taps <= #TCQ stg3_tap_cnt - 1; end // Starting point in the valid window away from left edge // Assuming starting point will not be in valid window close to // left edge end else if (stable_rise_eye_r) begin ocal_rise_edge1_taps <= #TCQ stg3_tap_cnt + 1; ocal_state_r <= #TCQ OCAL_STG3_SEL; ocal_stg3_inc_en <= #TCQ 1'b1; stg3_incdec_limit <= #TCQ 'd0; end else ocal_state_r <= #TCQ OCAL_STG3_SEL; end else ocal_state_r <= #TCQ OCAL_STG3_SEL; end else if (stg3_tap_cnt_eq_oclkdelay_init_val) ocal_state_r <= #TCQ OCAL_STG3_SEL; else if ((stg3_limit && ocal_stg3_inc_en) || (stg3_tap_cnt_eq_63)) begin ocal_state_r <= #TCQ OCAL_STG3_CALC; stg3_incdec_limit <= #TCQ 'd0; end end OCAL_STG3_SEL: begin po_stg23_sel <= #TCQ 1'b1; ocal_wrlvl_done <= #TCQ 1'b0; ocal_state_r <= #TCQ OCAL_STG3_SEL_WAIT; ocal_final_cnt_r_calc <= #TCQ 1'b0; end OCAL_STG3_SEL_WAIT: begin if (cnt_next_state) begin ocal_state_r <= #TCQ OCAL_STG3_EN_WAIT; if (ocal_stg3_inc_en) begin po_stg23_incdec <= #TCQ 1'b1; if (stg3_tap_cnt_less_oclkdelay_init_val) begin ocal_inc_cnt <= #TCQ oclkdelay_init_val - stg3_tap_cnt; stg3_dec2inc <= #TCQ 1'b1; oclk_prech_req <= #TCQ 1'b1; end end else begin po_stg23_incdec <= #TCQ 1'b0; if (stg3_dec) ocal_dec_cnt <= #TCQ ocal_final_cnt_r; end end end OCAL_STG3_EN_WAIT: begin if (cnt_next_state) begin if (ocal_stg3_inc_en) ocal_state_r <= #TCQ OCAL_STG3_INC; else ocal_state_r <= #TCQ OCAL_STG3_DEC; end end OCAL_STG3_DEC: begin po_en_stg23 <= #TCQ 1'b1; stg3_tap_cnt <= #TCQ stg3_tap_cnt - 1; if (ocal_dec_cnt == 1) begin ocal_byte_done <= #TCQ 1'b1; ocal_state_r <= #TCQ OCAL_DEC_DONE_WAIT; ocal_dec_cnt <= #TCQ ocal_dec_cnt - 1; end else if (ocal_dec_cnt > 'd0) begin ocal_state_r <= #TCQ OCAL_STG3_DEC_WAIT; ocal_dec_cnt <= #TCQ ocal_dec_cnt - 1; end else ocal_state_r <= #TCQ OCAL_STG3_WAIT; end OCAL_STG3_DEC_WAIT: begin po_en_stg23 <= #TCQ 1'b0; if (cnt_next_state) begin if (ocal_dec_cnt > 'd0) ocal_state_r <= #TCQ OCAL_STG3_DEC; else ocal_state_r <= #TCQ OCAL_DEC_DONE_WAIT; end end OCAL_DEC_DONE_WAIT: begin // Required to make sure that po_stg23_incdec // de-asserts some time after de-assertion of // po_en_stg23 po_en_stg23 <= #TCQ 1'b0; if (cnt_next_state) begin // Final stage 3 decrement completed, proceed // to stage 2 tap decrement ocal_state_r <= #TCQ OCAL_STG2_SEL; po_stg23_incdec <= #TCQ 1'b0; stg3_dec <= #TCQ 1'b0; end end OCAL_STG3_WAIT: begin po_en_stg23 <= #TCQ 1'b0; if (cnt_next_state) begin po_stg23_incdec <= #TCQ 1'b0; if ((stg2_tap_cnt != 6'd63) || (stg2_tap_cnt != 6'd0)) ocal_state_r <= #TCQ OCAL_STG2_SEL; else begin oclk_calib_resume <= #TCQ 1'b1; ocal_state_r <= #TCQ OCAL_NEW_DQS_WAIT; stg3_incdec_limit <= #TCQ stg3_incdec_limit + 1; end end end OCAL_STG2_SEL: begin po_stg23_sel <= #TCQ 1'b0; po_en_stg23 <= #TCQ 1'b0; po_stg23_incdec <= #TCQ 1'b0; ocal_state_r <= #TCQ OCAL_STG2_WAIT; stg2_inc2_cnt <= #TCQ 2'b01; stg2_dec2_cnt <= #TCQ 2'b01; end OCAL_STG2_WAIT: begin po_en_stg23 <= #TCQ 1'b0; po_stg23_incdec <= #TCQ 1'b0; if (cnt_next_state) begin if (ocal_byte_done) begin if (stg2_tap_cnt > 'd0) begin // Decrement stage 2 taps to '0' before // final write level is performed ocal_state_r <= #TCQ OCAL_STG2_DEC; stg2_dec_cnt <= #TCQ stg2_tap_cnt; end else begin ocal_state_r <= #TCQ OCAL_NEXT_DQS; ocal_byte_done <= #TCQ 1'b0; end end else if (stg3_dec2inc && (stg2_tap_cnt > 'd0)) begin // Decrement stage 2 tap to initial value before // edge 2 detection begins ocal_state_r <= #TCQ OCAL_STG2_DEC; stg2_dec_cnt <= #TCQ stg2_tap_cnt - wl_po_fine_cnt_w[cnt_dqs_r]; end else if (~ocal_stg3_inc_en && (stg2_tap_cnt < 6'd63)) begin // Increment stage 2 taps by 2 for every stage 3 tap decrement // as part of edge 1 detection to avoid tDQSS violation between // write DQS and CK ocal_state_r <= #TCQ OCAL_STG2_INC; end else if (ocal_stg3_inc_en && (stg2_tap_cnt > 6'd0)) begin // Decrement stage 2 taps by 2 for every stage 3 tap increment // as part of edge 2 detection to avoid tDQSS violation between // write DQS and CK ocal_state_r <= #TCQ OCAL_STG2_DEC; end else begin oclk_calib_resume <= #TCQ 1'b1; ocal_state_r <= #TCQ OCAL_NEW_DQS_WAIT; stg3_incdec_limit <= #TCQ stg3_incdec_limit + 1; end end end OCAL_STG2_INC: begin po_en_stg23 <= #TCQ 1'b1; po_stg23_incdec <= #TCQ 1'b1; stg2_tap_cnt <= #TCQ stg2_tap_cnt + 1; if (stg2_inc2_cnt > 2'b00) begin stg2_inc2_cnt <= stg2_inc2_cnt - 1; ocal_state_r <= #TCQ OCAL_STG2_WAIT; end else if (stg2_tap_cnt == 6'd62) begin ocal_state_r <= #TCQ OCAL_STG2_WAIT; end else begin oclk_calib_resume <= #TCQ 1'b1; ocal_state_r <= #TCQ OCAL_NEW_DQS_WAIT; end end OCAL_STG2_DEC: begin po_en_stg23 <= #TCQ 1'b1; po_stg23_incdec <= #TCQ 1'b0; stg2_tap_cnt <= #TCQ stg2_tap_cnt - 1; if (stg2_dec_cnt > 6'd0) begin stg2_dec_cnt <= #TCQ stg2_dec_cnt - 1; ocal_state_r <= #TCQ OCAL_STG2_DEC_WAIT; end else if (stg2_dec2_cnt > 2'b00) begin stg2_dec2_cnt <= stg2_dec2_cnt - 1; ocal_state_r <= #TCQ OCAL_STG2_WAIT; end else if (stg2_tap_cnt == 6'd1) ocal_state_r <= #TCQ OCAL_STG2_WAIT; else begin oclk_calib_resume <= #TCQ 1'b1; ocal_state_r <= #TCQ OCAL_NEW_DQS_WAIT; end end OCAL_STG2_DEC_WAIT: begin po_en_stg23 <= #TCQ 1'b0; po_stg23_incdec <= #TCQ 1'b0; if (cnt_next_state) begin if (stg2_dec_cnt > 6'd0) begin ocal_state_r <= #TCQ OCAL_STG2_DEC; end else if (ocal_byte_done) begin ocal_state_r <= #TCQ OCAL_NEXT_DQS; ocal_byte_done <= #TCQ 1'b0; end else if (prech_done_r && stg3_dec2inc) begin stg3_dec2inc <= #TCQ 1'b0; if (stg3_tap_cnt_eq_63) ocal_state_r <= #TCQ OCAL_STG3_CALC; else begin ocal_state_r <= #TCQ OCAL_NEW_DQS_READ; oclk_calib_resume <= #TCQ 1'b1; end end end end OCAL_STG3_CALC: begin if (ocal_final_cnt_r_calc) begin ocal_state_r <= #TCQ OCAL_STG3_SEL; stg3_dec <= #TCQ 1'b1; ocal_stg3_inc_en <= #TCQ 1'b0; end else ocal_final_cnt_r_calc <= #TCQ 1'b1; end OCAL_STG3_INC: begin po_en_stg23 <= #TCQ 1'b1; stg3_tap_cnt <= #TCQ stg3_tap_cnt + 1; if (ocal_inc_cnt > 'd0) ocal_inc_cnt <= #TCQ ocal_inc_cnt - 1; if (ocal_inc_cnt == 1) ocal_state_r <= #TCQ OCAL_INC_DONE_WAIT; else ocal_state_r <= #TCQ OCAL_STG3_INC_WAIT; end OCAL_STG3_INC_WAIT: begin po_en_stg23 <= #TCQ 1'b0; po_stg23_incdec <= #TCQ 1'b1; if (cnt_next_state) begin if (ocal_inc_cnt > 'd0) ocal_state_r <= #TCQ OCAL_STG3_INC; else begin ocal_state_r <= #TCQ OCAL_STG2_SEL; po_stg23_incdec <= #TCQ 1'b0; end end end OCAL_INC_DONE_WAIT: begin // Required to make sure that po_stg23_incdec // de-asserts some time after de-assertion of // po_en_stg23 po_en_stg23 <= #TCQ 1'b0; oclk_prech_req <= #TCQ 1'b0; if (cnt_next_state) begin ocal_state_r <= #TCQ OCAL_STG2_SEL; po_stg23_incdec <= #TCQ 1'b0; end end OCAL_NEXT_DQS: begin ocal_final_cnt_r_calc <= #TCQ 1'b0; po_en_stg23 <= #TCQ 1'b0; po_stg23_incdec <= #TCQ 1'b0; stg3_tap_cnt <= #TCQ 6'd0; ocal_rise_edge1_found <= #TCQ 1'b0; ocal_rise_edge2_found <= #TCQ 1'b0; ocal_rise_edge1_found_timing <= #TCQ 1'b0; ocal_rise_edge2_found_timing <= #TCQ 1'b0; ocal_rise_edge1_taps <= #TCQ 'd0; ocal_rise_edge2_taps <= #TCQ 'd0; ocal_rise_right_edge <= #TCQ 'd0; ocal_fall_edge1_found <= #TCQ 1'b0; ocal_fall_edge2_found <= #TCQ 1'b0; ocal_fall_edge1_taps <= #TCQ 'd0; ocal_fall_edge2_taps <= #TCQ 'd0; stg3_incdec_limit <= #TCQ 'd0; oclk_prech_req <= #TCQ 1'b1; if (cnt_dqs_r == DQS_WIDTH-1) wrlvl_final <= #TCQ 1'b1; if (prech_done) begin if (cnt_dqs_r == DQS_WIDTH-1) // If the last DQS group was just finished, // then end of calibration ocal_state_r <= #TCQ OCAL_DONE; else begin // Continue to next DQS group cnt_dqs_r <= #TCQ cnt_dqs_r + 1; ocal_state_r <= #TCQ OCAL_NEW_DQS_READ; stg3_tap_cnt <= #TCQ oclkdelay_init_val; stg2_tap_cnt <= #TCQ wl_po_fine_cnt_w[cnt_dqs_r + 1'b1]; end end end OCAL_DONE: begin ocal_final_cnt_r_calc <= #TCQ 1'b0; oclk_prech_req <= #TCQ 1'b0; po_stg23_sel <= #TCQ 1'b0; ocal_done_r <= #TCQ 1'b1; end endcase end end endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ecc_merge_enc.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_ecc_merge_enc #( parameter TCQ = 100, parameter PAYLOAD_WIDTH = 64, parameter CODE_WIDTH = 72, parameter DATA_BUF_ADDR_WIDTH = 4, parameter DATA_BUF_OFFSET_WIDTH = 1, parameter DATA_WIDTH = 64, parameter DQ_WIDTH = 72, parameter ECC_WIDTH = 8, parameter nCK_PER_CLK = 4 ) ( /*AUTOARG*/ // Outputs mc_wrdata, mc_wrdata_mask, // Inputs clk, rst, wr_data, wr_data_mask, rd_merge_data, h_rows, raw_not_ecc ); input clk; input rst; input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data; input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask; input [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data; reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data_r; reg [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask_r; reg [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data_r; always @(posedge clk) wr_data_r <= #TCQ wr_data; always @(posedge clk) wr_data_mask_r <= #TCQ wr_data_mask; always @(posedge clk) rd_merge_data_r <= #TCQ rd_merge_data; // Merge new data with memory read data. wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] merged_data; genvar h; genvar i; generate for (h=0; h<2*nCK_PER_CLK; h=h+1) begin : merge_data_outer for (i=0; i<DATA_WIDTH/8; i=i+1) begin : merge_data_inner assign merged_data[h*PAYLOAD_WIDTH+i*8+:8] = wr_data_mask[h*DATA_WIDTH/8+i] ? rd_merge_data[h*DATA_WIDTH+i*8+:8] : wr_data[h*PAYLOAD_WIDTH+i*8+:8]; end if (PAYLOAD_WIDTH > DATA_WIDTH) assign merged_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH]= wr_data[(h+1)*PAYLOAD_WIDTH-1-:PAYLOAD_WIDTH-DATA_WIDTH]; end endgenerate // Generate ECC and overlay onto mc_wrdata. input [CODE_WIDTH*ECC_WIDTH-1:0] h_rows; input [2*nCK_PER_CLK-1:0] raw_not_ecc; reg [2*nCK_PER_CLK-1:0] raw_not_ecc_r; always @(posedge clk) raw_not_ecc_r <= #TCQ raw_not_ecc; output reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata; reg [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata_c; genvar j; integer k; generate for (j=0; j<2*nCK_PER_CLK; j=j+1) begin : ecc_word always @(/*AS*/h_rows or merged_data or raw_not_ecc_r) begin mc_wrdata_c[j*DQ_WIDTH+:DQ_WIDTH] = {{DQ_WIDTH-PAYLOAD_WIDTH{1'b0}}, merged_data[j*PAYLOAD_WIDTH+:PAYLOAD_WIDTH]}; for (k=0; k<ECC_WIDTH; k=k+1) if (~raw_not_ecc_r[j]) mc_wrdata_c[j*DQ_WIDTH+CODE_WIDTH-k-1] = ^(merged_data[j*PAYLOAD_WIDTH+:DATA_WIDTH] & h_rows[k*CODE_WIDTH+:DATA_WIDTH]); end end endgenerate always @(posedge clk) mc_wrdata <= mc_wrdata_c; // Set all DRAM masks to zero. output wire[2*nCK_PER_CLK*DQ_WIDTH/8-1:0] mc_wrdata_mask; assign mc_wrdata_mask = {2*nCK_PER_CLK*DQ_WIDTH/8{1'b0}}; endmodule
module main_pll (inclk0, c0); input inclk0; output c0; wire [4:0] sub_wire0; wire [0:0] sub_wire4 = 1'h0; wire [0:0] sub_wire1 = sub_wire0[0:0]; wire c0 = sub_wire1; wire sub_wire2 = inclk0; wire [1:0] sub_wire3 = {sub_wire4, sub_wire2}; altpll altpll_component ( .inclk (sub_wire3), .clk (sub_wire0), .activeclock (), .areset (1'b0), .clkbad (), .clkena ({6{1'b1}}), .clkloss (), .clkswitch (1'b0), .configupdate (1'b0), .enable0 (), .enable1 (), .extclk (), .extclkena ({4{1'b1}}), .fbin (1'b1), .fbmimicbidir (), .fbout (), // synopsys translate_off .fref (), .icdrclk (), // synopsys translate_on .locked (), .pfdena (1'b1), .phasecounterselect ({4{1'b1}}), .phasedone (), .phasestep (1'b1), .phaseupdown (1'b1), .pllena (1'b1), .scanaclr (1'b0), .scanclk (1'b0), .scanclkena (1'b1), .scandata (1'b0), .scandataout (), .scandone (), .scanread (1'b0), .scanwrite (1'b0), .sclkout0 (), .sclkout1 (), .vcooverrange (), .vcounderrange ()); defparam altpll_component.bandwidth_type = "AUTO", altpll_component.clk0_divide_by = 5, altpll_component.clk0_duty_cycle = 50, altpll_component.clk0_multiply_by = 5, altpll_component.clk0_phase_shift = "0", altpll_component.compensate_clock = "CLK0", altpll_component.inclk0_input_frequency = 20000, altpll_component.intended_device_family = "Cyclone III", altpll_component.lpm_hint = "CBX_MODULE_PREFIX=main_pll", altpll_component.lpm_type = "altpll", altpll_component.operation_mode = "NORMAL", altpll_component.pll_type = "AUTO", altpll_component.port_activeclock = "PORT_UNUSED", altpll_component.port_areset = "PORT_UNUSED", altpll_component.port_clkbad0 = "PORT_UNUSED", altpll_component.port_clkbad1 = "PORT_UNUSED", altpll_component.port_clkloss = "PORT_UNUSED", altpll_component.port_clkswitch = "PORT_UNUSED", altpll_component.port_configupdate = "PORT_UNUSED", altpll_component.port_fbin = "PORT_UNUSED", altpll_component.port_inclk0 = "PORT_USED", altpll_component.port_inclk1 = "PORT_UNUSED", altpll_component.port_locked = "PORT_UNUSED", altpll_component.port_pfdena = "PORT_UNUSED", altpll_component.port_phasecounterselect = "PORT_UNUSED", altpll_component.port_phasedone = "PORT_UNUSED", altpll_component.port_phasestep = "PORT_UNUSED", altpll_component.port_phaseupdown = "PORT_UNUSED", altpll_component.port_pllena = "PORT_UNUSED", altpll_component.port_scanaclr = "PORT_UNUSED", altpll_component.port_scanclk = "PORT_UNUSED", altpll_component.port_scanclkena = "PORT_UNUSED", altpll_component.port_scandata = "PORT_UNUSED", altpll_component.port_scandataout = "PORT_UNUSED", altpll_component.port_scandone = "PORT_UNUSED", altpll_component.port_scanread = "PORT_UNUSED", altpll_component.port_scanwrite = "PORT_UNUSED", altpll_component.port_clk0 = "PORT_USED", altpll_component.port_clk1 = "PORT_UNUSED", altpll_component.port_clk2 = "PORT_UNUSED", altpll_component.port_clk3 = "PORT_UNUSED", altpll_component.port_clk4 = "PORT_UNUSED", altpll_component.port_clk5 = "PORT_UNUSED", altpll_component.port_clkena0 = "PORT_UNUSED", altpll_component.port_clkena1 = "PORT_UNUSED", altpll_component.port_clkena2 = "PORT_UNUSED", altpll_component.port_clkena3 = "PORT_UNUSED", altpll_component.port_clkena4 = "PORT_UNUSED", altpll_component.port_clkena5 = "PORT_UNUSED", altpll_component.port_extclk0 = "PORT_UNUSED", altpll_component.port_extclk1 = "PORT_UNUSED", altpll_component.port_extclk2 = "PORT_UNUSED", altpll_component.port_extclk3 = "PORT_UNUSED", altpll_component.width_clock = 5; endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: cross_domain_signal.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Send a signal from clock domain A into clock domain B // and get the signal back into clock domain A. Domain A can know roughly when // the signal is received domain B. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module cross_domain_signal ( input CLK_A, // Clock for domain A input CLK_A_SEND, // Signal from domain A to domain B output CLK_A_RECV, // Signal from domain B received in domain A input CLK_B, // Clock for domain B output CLK_B_RECV, // Signal from domain A received in domain B input CLK_B_SEND // Signal from domain B to domain A ); // Sync level signals across domains. syncff sigAtoB (.CLK(CLK_B), .IN_ASYNC(CLK_A_SEND), .OUT_SYNC(CLK_B_RECV)); syncff sigBtoA (.CLK(CLK_A), .IN_ASYNC(CLK_B_SEND), .OUT_SYNC(CLK_A_RECV)); endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: cross_domain_signal.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Send a signal from clock domain A into clock domain B // and get the signal back into clock domain A. Domain A can know roughly when // the signal is received domain B. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module cross_domain_signal ( input CLK_A, // Clock for domain A input CLK_A_SEND, // Signal from domain A to domain B output CLK_A_RECV, // Signal from domain B received in domain A input CLK_B, // Clock for domain B output CLK_B_RECV, // Signal from domain A received in domain B input CLK_B_SEND // Signal from domain B to domain A ); // Sync level signals across domains. syncff sigAtoB (.CLK(CLK_B), .IN_ASYNC(CLK_A_SEND), .OUT_SYNC(CLK_B_RECV)); syncff sigBtoA (.CLK(CLK_A), .IN_ASYNC(CLK_B_SEND), .OUT_SYNC(CLK_A_RECV)); endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: cross_domain_signal.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Send a signal from clock domain A into clock domain B // and get the signal back into clock domain A. Domain A can know roughly when // the signal is received domain B. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module cross_domain_signal ( input CLK_A, // Clock for domain A input CLK_A_SEND, // Signal from domain A to domain B output CLK_A_RECV, // Signal from domain B received in domain A input CLK_B, // Clock for domain B output CLK_B_RECV, // Signal from domain A received in domain B input CLK_B_SEND // Signal from domain B to domain A ); // Sync level signals across domains. syncff sigAtoB (.CLK(CLK_B), .IN_ASYNC(CLK_A_SEND), .OUT_SYNC(CLK_B_RECV)); syncff sigBtoA (.CLK(CLK_A), .IN_ASYNC(CLK_B_SEND), .OUT_SYNC(CLK_A_RECV)); endmodule
//----------------------------------------------------------------------------- // // Jonathan Westhues, April 2006 //----------------------------------------------------------------------------- module hi_read_rx_xcorr( pck0, ck_1356meg, ck_1356megb, pwr_lo, pwr_hi, pwr_oe1, pwr_oe2, pwr_oe3, pwr_oe4, adc_d, adc_clk, ssp_frame, ssp_din, ssp_dout, ssp_clk, cross_hi, cross_lo, dbg, xcorr_is_848, snoop, xcorr_quarter_freq ); input pck0, ck_1356meg, ck_1356megb; output pwr_lo, pwr_hi, pwr_oe1, pwr_oe2, pwr_oe3, pwr_oe4; input [7:0] adc_d; output adc_clk; input ssp_dout; output ssp_frame, ssp_din, ssp_clk; input cross_hi, cross_lo; output dbg; input xcorr_is_848, snoop, xcorr_quarter_freq; // Carrier is steady on through this, unless we're snooping. assign pwr_hi = ck_1356megb & (~snoop); assign pwr_oe1 = 1'b0; assign pwr_oe2 = 1'b0; assign pwr_oe3 = 1'b0; assign pwr_oe4 = 1'b0; reg ssp_clk; reg ssp_frame; reg fc_div_2; always @(posedge ck_1356meg) fc_div_2 = ~fc_div_2; reg fc_div_4; always @(posedge fc_div_2) fc_div_4 = ~fc_div_4; reg fc_div_8; always @(posedge fc_div_4) fc_div_8 = ~fc_div_8; reg adc_clk; always @(xcorr_is_848 or xcorr_quarter_freq or ck_1356meg) if(~xcorr_quarter_freq) begin if(xcorr_is_848) // The subcarrier frequency is fc/16; we will sample at fc, so that // means the subcarrier is 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 1 1 ... adc_clk <= ck_1356meg; else // The subcarrier frequency is fc/32; we will sample at fc/2, and // the subcarrier will look identical. adc_clk <= fc_div_2; end else begin if(xcorr_is_848) // The subcarrier frequency is fc/64 adc_clk <= fc_div_4; else // The subcarrier frequency is fc/128 adc_clk <= fc_div_8; end // When we're a reader, we just need to do the BPSK demod; but when we're an // eavesdropper, we also need to pick out the commands sent by the reader, // using AM. Do this the same way that we do it for the simulated tag. reg after_hysteresis, after_hysteresis_prev; reg [11:0] has_been_low_for; always @(negedge adc_clk) begin if(& adc_d[7:0]) after_hysteresis <= 1'b1; else if(~(| adc_d[7:0])) after_hysteresis <= 1'b0; if(after_hysteresis) begin has_been_low_for <= 7'b0; end else begin if(has_been_low_for == 12'd4095) begin has_been_low_for <= 12'd0; after_hysteresis <= 1'b1; end else has_been_low_for <= has_been_low_for + 1; end end // Let us report a correlation every 4 subcarrier cycles, or 4*16 samples, // so we need a 6-bit counter. reg [5:0] corr_i_cnt; reg [5:0] corr_q_cnt; // And a couple of registers in which to accumulate the correlations. reg signed [15:0] corr_i_accum; reg signed [15:0] corr_q_accum; reg signed [7:0] corr_i_out; reg signed [7:0] corr_q_out; // ADC data appears on the rising edge, so sample it on the falling edge always @(negedge adc_clk) begin // These are the correlators: we correlate against in-phase and quadrature // versions of our reference signal, and keep the (signed) result to // send out later over the SSP. if(corr_i_cnt == 7'd63) begin if(snoop) begin corr_i_out <= {corr_i_accum[12:6], after_hysteresis_prev}; corr_q_out <= {corr_q_accum[12:6], after_hysteresis}; end else begin // Only correlations need to be delivered. corr_i_out <= corr_i_accum[13:6]; corr_q_out <= corr_q_accum[13:6]; end corr_i_accum <= adc_d; corr_q_accum <= adc_d; corr_q_cnt <= 4; corr_i_cnt <= 0; end else begin if(corr_i_cnt[3]) corr_i_accum <= corr_i_accum - adc_d; else corr_i_accum <= corr_i_accum + adc_d; if(corr_q_cnt[3]) corr_q_accum <= corr_q_accum - adc_d; else corr_q_accum <= corr_q_accum + adc_d; corr_i_cnt <= corr_i_cnt + 1; corr_q_cnt <= corr_q_cnt + 1; end // The logic in hi_simulate.v reports 4 samples per bit. We report two // (I, Q) pairs per bit, so we should do 2 samples per pair. if(corr_i_cnt == 6'd31) after_hysteresis_prev <= after_hysteresis; // Then the result from last time is serialized and send out to the ARM. // We get one report each cycle, and each report is 16 bits, so the // ssp_clk should be the adc_clk divided by 64/16 = 4. if(corr_i_cnt[1:0] == 2'b10) ssp_clk <= 1'b0; if(corr_i_cnt[1:0] == 2'b00) begin ssp_clk <= 1'b1; // Don't shift if we just loaded new data, obviously. if(corr_i_cnt != 7'd0) begin corr_i_out[7:0] <= {corr_i_out[6:0], corr_q_out[7]}; corr_q_out[7:1] <= corr_q_out[6:0]; end end if(corr_i_cnt[5:2] == 4'b000 || corr_i_cnt[5:2] == 4'b1000) ssp_frame = 1'b1; else ssp_frame = 1'b0; end assign ssp_din = corr_i_out[7]; assign dbg = corr_i_cnt[3]; // Unused. assign pwr_lo = 1'b0; endmodule
(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2015 *) (* \VV/ **************************************************************) (* // * This file is distributed under the terms of the *) (* * GNU Lesser General Public License Version 2.1 *) (************************************************************************) Require Export NumPrelude NZAxioms. Require Import NZBase NZOrder NZAddOrder Plus Minus. (** In this file, we investigate the shape of domains satisfying the [NZDomainSig] interface. In particular, we define a translation from Peano numbers [nat] into NZ. *) Local Notation "f ^ n" := (fun x => nat_rect _ x (fun _ => f) n). Instance nat_rect_wd n {A} (R:relation A) : Proper (R==>(R==>R)==>R) (fun x f => nat_rect (fun _ => _) x (fun _ => f) n). Proof. intros x y eq_xy f g eq_fg; induction n; [assumption | now apply eq_fg]. Qed. Module NZDomainProp (Import NZ:NZDomainSig'). Include NZBaseProp NZ. (** * Relationship between points thanks to [succ] and [pred]. *) (** For any two points, one is an iterated successor of the other. *) Lemma itersucc_or_itersucc n m : exists k, n == (S^k) m \/ m == (S^k) n. Proof. revert n. apply central_induction with (z:=m). { intros x y eq_xy; apply ex_iff_morphism. intros n; apply or_iff_morphism. + split; intros; etransitivity; try eassumption; now symmetry. + split; intros; (etransitivity; [eassumption|]); [|symmetry]; (eapply nat_rect_wd; [eassumption|apply succ_wd]). } exists 0%nat. now left. intros n. split; intros [k [L|R]]. exists (Datatypes.S k). left. now apply succ_wd. destruct k as [|k]. simpl in R. exists 1%nat. left. now apply succ_wd. rewrite nat_rect_succ_r in R. exists k. now right. destruct k as [|k]; simpl in L. exists 1%nat. now right. apply succ_inj in L. exists k. now left. exists (Datatypes.S k). right. now rewrite nat_rect_succ_r. Qed. (** Generalized version of [pred_succ] when iterating *) Lemma succ_swap_pred : forall k n m, n == (S^k) m -> m == (P^k) n. Proof. induction k. simpl; auto with *. simpl; intros. apply pred_wd in H. rewrite pred_succ in H. apply IHk in H; auto. rewrite <- nat_rect_succ_r in H; auto. Qed. (** From a given point, all others are iterated successors or iterated predecessors. *) Lemma itersucc_or_iterpred : forall n m, exists k, n == (S^k) m \/ n == (P^k) m. Proof. intros n m. destruct (itersucc_or_itersucc n m) as (k,[H|H]). exists k; left; auto. exists k; right. apply succ_swap_pred; auto. Qed. (** In particular, all points are either iterated successors of [0] or iterated predecessors of [0] (or both). *) Lemma itersucc0_or_iterpred0 : forall n, exists p:nat, n == (S^p) 0 \/ n == (P^p) 0. Proof. intros n. exact (itersucc_or_iterpred n 0). Qed. (** * Study of initial point w.r.t. [succ] (if any). *) Definition initial n := forall m, n ~= S m. Lemma initial_alt : forall n, initial n <-> S (P n) ~= n. Proof. split. intros Bn EQ. symmetry in EQ. destruct (Bn _ EQ). intros NEQ m EQ. apply NEQ. rewrite EQ, pred_succ; auto with *. Qed. Lemma initial_alt2 : forall n, initial n <-> ~exists m, n == S m. Proof. firstorder. Qed. (** First case: let's assume such an initial point exists (i.e. [S] isn't surjective)... *) Section InitialExists. Hypothesis init : t. Hypothesis Initial : initial init. (** ... then we have unicity of this initial point. *) Lemma initial_unique : forall m, initial m -> m == init. Proof. intros m Im. destruct (itersucc_or_itersucc init m) as (p,[H|H]). destruct p. now simpl in *. destruct (Initial _ H). destruct p. now simpl in *. destruct (Im _ H). Qed. (** ... then all other points are descendant of it. *) Lemma initial_ancestor : forall m, exists p, m == (S^p) init. Proof. intros m. destruct (itersucc_or_itersucc init m) as (p,[H|H]). destruct p; simpl in *; auto. exists O; auto with *. destruct (Initial _ H). exists p; auto. Qed. (** NB : We would like to have [pred n == n] for the initial element, but nothing forces that. For instance we can have -3 as initial point, and P(-3) = 2. A bit odd indeed, but legal according to [NZDomainSig]. We can hence have [n == (P^k) m] without [exists k', m == (S^k') n]. *) (** We need decidability of [eq] (or classical reasoning) for this: *) Section SuccPred. Hypothesis eq_decidable : forall n m, n==m \/ n~=m. Lemma succ_pred_approx : forall n, ~initial n -> S (P n) == n. Proof. intros n NB. rewrite initial_alt in NB. destruct (eq_decidable (S (P n)) n); auto. elim NB; auto. Qed. End SuccPred. End InitialExists. (** Second case : let's suppose now [S] surjective, i.e. no initial point. *) Section InitialDontExists. Hypothesis succ_onto : forall n, exists m, n == S m. Lemma succ_onto_gives_succ_pred : forall n, S (P n) == n. Proof. intros n. destruct (succ_onto n) as (m,H). rewrite H, pred_succ; auto with *. Qed. Lemma succ_onto_pred_injective : forall n m, P n == P m -> n == m. Proof. intros n m. intros H; apply succ_wd in H. rewrite !succ_onto_gives_succ_pred in H; auto. Qed. End InitialDontExists. (** To summarize: S is always injective, P is always surjective (thanks to [pred_succ]). I) If S is not surjective, we have an initial point, which is unique. This bottom is below zero: we have N shifted (or not) to the left. P cannot be injective: P init = P (S (P init)). (P init) can be arbitrary. II) If S is surjective, we have [forall n, S (P n) = n], S and P are bijective and reciprocal. IIa) if [exists k<>O, 0 == S^k 0], then we have a cyclic structure Z/nZ IIb) otherwise, we have Z *) (** * An alternative induction principle using [S] and [P]. *) (** It is weaker than [bi_induction]. For instance it cannot prove that we can go from one point by many [S] _or_ many [P], but only by many [S] mixed with many [P]. Think of a model with two copies of N: 0, 1=S 0, 2=S 1, ... 0', 1'=S 0', 2'=S 1', ... and P 0 = 0' and P 0' = 0. *) Lemma bi_induction_pred : forall A : t -> Prop, Proper (eq==>iff) A -> A 0 -> (forall n, A n -> A (S n)) -> (forall n, A n -> A (P n)) -> forall n, A n. Proof. intros. apply bi_induction; auto. clear n. intros n; split; auto. intros G; apply H2 in G. rewrite pred_succ in G; auto. Qed. Lemma central_induction_pred : forall A : t -> Prop, Proper (eq==>iff) A -> forall n0, A n0 -> (forall n, A n -> A (S n)) -> (forall n, A n -> A (P n)) -> forall n, A n. Proof. intros. assert (A 0). destruct (itersucc_or_iterpred 0 n0) as (k,[Hk|Hk]); rewrite Hk; clear Hk. clear H2. induction k; simpl in *; auto. clear H1. induction k; simpl in *; auto. apply bi_induction_pred; auto. Qed. End NZDomainProp. (** We now focus on the translation from [nat] into [NZ]. First, relationship with [0], [succ], [pred]. *) Module NZOfNat (Import NZ:NZDomainSig'). Definition ofnat (n : nat) : t := (S^n) 0. Notation "[ n ]" := (ofnat n) (at level 7) : ofnat. Local Open Scope ofnat. Lemma ofnat_zero : [O] == 0. Proof. reflexivity. Qed. Lemma ofnat_succ : forall n, [Datatypes.S n] == succ [n]. Proof. now unfold ofnat. Qed. Lemma ofnat_pred : forall n, n<>O -> [Peano.pred n] == P [n]. Proof. unfold ofnat. destruct n. destruct 1; auto. intros _. simpl. symmetry. apply pred_succ. Qed. (** Since [P 0] can be anything in NZ (either [-1], [0], or even other numbers, we cannot state previous lemma for [n=O]. *) End NZOfNat. (** If we require in addition a strict order on NZ, we can prove that [ofnat] is injective, and hence that NZ is infinite (i.e. we ban Z/nZ models) *) Module NZOfNatOrd (Import NZ:NZOrdSig'). Include NZOfNat NZ. Include NZBaseProp NZ <+ NZOrderProp NZ. Local Open Scope ofnat. Theorem ofnat_S_gt_0 : forall n : nat, 0 < [Datatypes.S n]. Proof. unfold ofnat. intros n; induction n as [| n IH]; simpl in *. apply lt_succ_diag_r. apply lt_trans with (S 0). apply lt_succ_diag_r. now rewrite <- succ_lt_mono. Qed. Theorem ofnat_S_neq_0 : forall n : nat, 0 ~= [Datatypes.S n]. Proof. intros. apply lt_neq, ofnat_S_gt_0. Qed. Lemma ofnat_injective : forall n m, [n]==[m] -> n = m. Proof. induction n as [|n IH]; destruct m; auto. intros H; elim (ofnat_S_neq_0 _ H). intros H; symmetry in H; elim (ofnat_S_neq_0 _ H). intros. f_equal. apply IH. now rewrite <- succ_inj_wd. Qed. Lemma ofnat_eq : forall n m, [n]==[m] <-> n = m. Proof. split. apply ofnat_injective. intros; now subst. Qed. (* In addition, we can prove that [ofnat] preserves order. *) Lemma ofnat_lt : forall n m : nat, [n]<[m] <-> (n<m)%nat. Proof. induction n as [|n IH]; destruct m; repeat rewrite ofnat_zero; split. intro H; elim (lt_irrefl _ H). inversion 1. auto with arith. intros; apply ofnat_S_gt_0. intro H; elim (lt_asymm _ _ H); apply ofnat_S_gt_0. inversion 1. rewrite !ofnat_succ, <- succ_lt_mono, IH; auto with arith. rewrite !ofnat_succ, <- succ_lt_mono, IH; auto with arith. Qed. Lemma ofnat_le : forall n m : nat, [n]<=[m] <-> (n<=m)%nat. Proof. intros. rewrite lt_eq_cases, ofnat_lt, ofnat_eq. split. destruct 1; subst; auto with arith. apply Lt.le_lt_or_eq. Qed. End NZOfNatOrd. (** For basic operations, we can prove correspondance with their counterpart in [nat]. *) Module NZOfNatOps (Import NZ:NZAxiomsSig'). Include NZOfNat NZ. Local Open Scope ofnat. Lemma ofnat_add_l : forall n m, [n]+m == (S^n) m. Proof. induction n; intros. apply add_0_l. rewrite ofnat_succ, add_succ_l. simpl. now f_equiv. Qed. Lemma ofnat_add : forall n m, [n+m] == [n]+[m]. Proof. intros. rewrite ofnat_add_l. induction n; simpl. reflexivity. now f_equiv. Qed. Lemma ofnat_mul : forall n m, [n*m] == [n]*[m]. Proof. induction n; simpl; intros. symmetry. apply mul_0_l. rewrite plus_comm. rewrite ofnat_add, mul_succ_l. now f_equiv. Qed. Lemma ofnat_sub_r : forall n m, n-[m] == (P^m) n. Proof. induction m; simpl; intros. apply sub_0_r. rewrite sub_succ_r. now f_equiv. Qed. Lemma ofnat_sub : forall n m, m<=n -> [n-m] == [n]-[m]. Proof. intros n m H. rewrite ofnat_sub_r. revert n H. induction m. intros. rewrite <- minus_n_O. now simpl. intros. destruct n. inversion H. rewrite nat_rect_succ_r. simpl. etransitivity. apply IHm. auto with arith. eapply nat_rect_wd; [symmetry;apply pred_succ|apply pred_wd]. Qed. End NZOfNatOps.
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2009 by Iztok Jeras. module t (/*AUTOARG*/ // Inputs clk ); input clk; `define checkh(gotv,expv) do if ((gotv) !== (expv)) begin $write("%%Error: %s:%0d: got='h%x exp='h%x\n", `__FILE__,`__LINE__, (gotv), (expv)); $stop; end while(0); // parameters for array sizes localparam WA = 4; localparam WB = 6; localparam WC = 8; // 2D packed arrays logic [WA+1:2] [WB+1:2] [WC+1:2] array_bg; // big endian array /* verilator lint_off LITENDIAN */ logic [2:WA+1] [2:WB+1] [2:WC+1] array_lt; // little endian array /* verilator lint_on LITENDIAN */ logic [1:0] array_unpk [3:2][1:0]; integer cnt = 0; integer slc = 0; // slice type integer dim = 0; // dimension integer wdt = 0; // width initial begin `checkh($dimensions (array_unpk), 3); `ifndef VCS `checkh($unpacked_dimensions (array_unpk), 2); // IEEE 2009 `endif `checkh($bits (array_unpk), 2*2*2); `checkh($low (array_unpk), 2); `checkh($high (array_unpk), 3); `checkh($left (array_unpk), 3); `checkh($right(array_unpk), 2); `checkh($increment(array_unpk), 1); `checkh($size (array_unpk), 2); end // event counter always @ (posedge clk) begin cnt <= cnt + 1; end // finish report always @ (posedge clk) if ( (cnt[30:4]==3) && (cnt[3:2]==2'd3) && (cnt[1:0]==2'd3) ) begin $write("*-* All Finished *-*\n"); $finish; end integer slc_next; // calculation of dimention sizes always @ (posedge clk) begin // slicing type counter case (cnt[3:2]) 2'd0 : begin slc_next = 0; end // full array 2'd1 : begin slc_next = 1; end // single array element 2'd2 : begin slc_next = 2; end // half array default: begin slc_next = 0; end endcase slc <= slc_next; // dimension counter case (cnt[1:0]) 2'd0 : begin dim <= 1; wdt <= (slc_next==1) ? WA/2 : (slc_next==2) ? WA/2 : WA; end 2'd1 : begin dim <= 2; wdt <= WB; end 2'd2 : begin dim <= 3; wdt <= WC; end default: begin dim <= 0; wdt <= 0; end endcase end always @ (posedge clk) begin `ifdef TEST_VERBOSE $write("cnt[30:4]=%0d slc=%0d dim=%0d wdt=%0d\n", cnt[30:4], slc, dim, wdt); `endif if (cnt[30:4]==1) begin // big endian if (slc==0) begin // full array `checkh($dimensions (array_bg), 3); `checkh($bits (array_bg), WA*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_bg, dim), wdt+1); `checkh($right (array_bg, dim), 2 ); `checkh($low (array_bg, dim), 2 ); `checkh($high (array_bg, dim), wdt+1); `checkh($increment (array_bg, dim), 1 ); `checkh($size (array_bg, dim), wdt ); end end else if (slc==1) begin // single array element `checkh($dimensions (array_bg[2]), 2); `checkh($bits (array_bg[2]), WB*WC); if ((dim>=2)&&(dim<=3)) begin `checkh($left (array_bg[2], dim-1), wdt+1); `checkh($right (array_bg[2], dim-1), 2 ); `checkh($low (array_bg[2], dim-1), 2 ); `checkh($high (array_bg[2], dim-1), wdt+1); `checkh($increment (array_bg[2], dim-1), 1 ); `checkh($size (array_bg[2], dim-1), wdt ); end `ifndef VERILATOR // Unsupported slices don't maintain size correctly end else if (slc==2) begin // half array `checkh($dimensions (array_bg[WA/2+1:2]), 3); `checkh($bits (array_bg[WA/2+1:2]), WA/2*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_bg[WA/2+1:2], dim), wdt+1); `checkh($right (array_bg[WA/2+1:2], dim), 2 ); `checkh($low (array_bg[WA/2+1:2], dim), 2 ); `checkh($high (array_bg[WA/2+1:2], dim), wdt+1); `checkh($increment (array_bg[WA/2+1:2], dim), 1 ); `checkh($size (array_bg[WA/2+1:2], dim), wdt); end `endif end end else if (cnt[30:4]==2) begin // little endian if (slc==0) begin // full array `checkh($dimensions (array_lt), 3); `checkh($bits (array_lt), WA*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_lt, dim), 2 ); `checkh($right (array_lt, dim), wdt+1); `checkh($low (array_lt, dim), 2 ); `checkh($high (array_lt, dim), wdt+1); `checkh($increment (array_lt, dim), -1 ); `checkh($size (array_lt, dim), wdt ); end end else if (slc==1) begin // single array element `checkh($dimensions (array_lt[2]), 2); `checkh($bits (array_lt[2]), WB*WC); if ((dim>=2)&&(dim<=3)) begin `checkh($left (array_lt[2], dim-1), 2 ); `checkh($right (array_lt[2], dim-1), wdt+1); `checkh($low (array_lt[2], dim-1), 2 ); `checkh($high (array_lt[2], dim-1), wdt+1); `checkh($increment (array_lt[2], dim-1), -1 ); `checkh($size (array_lt[2], dim-1), wdt ); end `ifndef VERILATOR // Unsupported slices don't maintain size correctly end else if (slc==2) begin // half array `checkh($dimensions (array_lt[2:WA/2+1]), 3); `checkh($bits (array_lt[2:WA/2+1]), WA/2*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_lt[2:WA/2+1], dim), 2 ); `checkh($right (array_lt[2:WA/2+1], dim), wdt+1); `checkh($low (array_lt[2:WA/2+1], dim), 2 ); `checkh($high (array_lt[2:WA/2+1], dim), wdt+1); `checkh($increment (array_lt[2:WA/2+1], dim), -1 ); `checkh($size (array_lt[2:WA/2+1], dim), wdt ); end `endif end end end endmodule
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2009 by Iztok Jeras. module t (/*AUTOARG*/ // Inputs clk ); input clk; `define checkh(gotv,expv) do if ((gotv) !== (expv)) begin $write("%%Error: %s:%0d: got='h%x exp='h%x\n", `__FILE__,`__LINE__, (gotv), (expv)); $stop; end while(0); // parameters for array sizes localparam WA = 4; localparam WB = 6; localparam WC = 8; // 2D packed arrays logic [WA+1:2] [WB+1:2] [WC+1:2] array_bg; // big endian array /* verilator lint_off LITENDIAN */ logic [2:WA+1] [2:WB+1] [2:WC+1] array_lt; // little endian array /* verilator lint_on LITENDIAN */ logic [1:0] array_unpk [3:2][1:0]; integer cnt = 0; integer slc = 0; // slice type integer dim = 0; // dimension integer wdt = 0; // width initial begin `checkh($dimensions (array_unpk), 3); `ifndef VCS `checkh($unpacked_dimensions (array_unpk), 2); // IEEE 2009 `endif `checkh($bits (array_unpk), 2*2*2); `checkh($low (array_unpk), 2); `checkh($high (array_unpk), 3); `checkh($left (array_unpk), 3); `checkh($right(array_unpk), 2); `checkh($increment(array_unpk), 1); `checkh($size (array_unpk), 2); end // event counter always @ (posedge clk) begin cnt <= cnt + 1; end // finish report always @ (posedge clk) if ( (cnt[30:4]==3) && (cnt[3:2]==2'd3) && (cnt[1:0]==2'd3) ) begin $write("*-* All Finished *-*\n"); $finish; end integer slc_next; // calculation of dimention sizes always @ (posedge clk) begin // slicing type counter case (cnt[3:2]) 2'd0 : begin slc_next = 0; end // full array 2'd1 : begin slc_next = 1; end // single array element 2'd2 : begin slc_next = 2; end // half array default: begin slc_next = 0; end endcase slc <= slc_next; // dimension counter case (cnt[1:0]) 2'd0 : begin dim <= 1; wdt <= (slc_next==1) ? WA/2 : (slc_next==2) ? WA/2 : WA; end 2'd1 : begin dim <= 2; wdt <= WB; end 2'd2 : begin dim <= 3; wdt <= WC; end default: begin dim <= 0; wdt <= 0; end endcase end always @ (posedge clk) begin `ifdef TEST_VERBOSE $write("cnt[30:4]=%0d slc=%0d dim=%0d wdt=%0d\n", cnt[30:4], slc, dim, wdt); `endif if (cnt[30:4]==1) begin // big endian if (slc==0) begin // full array `checkh($dimensions (array_bg), 3); `checkh($bits (array_bg), WA*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_bg, dim), wdt+1); `checkh($right (array_bg, dim), 2 ); `checkh($low (array_bg, dim), 2 ); `checkh($high (array_bg, dim), wdt+1); `checkh($increment (array_bg, dim), 1 ); `checkh($size (array_bg, dim), wdt ); end end else if (slc==1) begin // single array element `checkh($dimensions (array_bg[2]), 2); `checkh($bits (array_bg[2]), WB*WC); if ((dim>=2)&&(dim<=3)) begin `checkh($left (array_bg[2], dim-1), wdt+1); `checkh($right (array_bg[2], dim-1), 2 ); `checkh($low (array_bg[2], dim-1), 2 ); `checkh($high (array_bg[2], dim-1), wdt+1); `checkh($increment (array_bg[2], dim-1), 1 ); `checkh($size (array_bg[2], dim-1), wdt ); end `ifndef VERILATOR // Unsupported slices don't maintain size correctly end else if (slc==2) begin // half array `checkh($dimensions (array_bg[WA/2+1:2]), 3); `checkh($bits (array_bg[WA/2+1:2]), WA/2*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_bg[WA/2+1:2], dim), wdt+1); `checkh($right (array_bg[WA/2+1:2], dim), 2 ); `checkh($low (array_bg[WA/2+1:2], dim), 2 ); `checkh($high (array_bg[WA/2+1:2], dim), wdt+1); `checkh($increment (array_bg[WA/2+1:2], dim), 1 ); `checkh($size (array_bg[WA/2+1:2], dim), wdt); end `endif end end else if (cnt[30:4]==2) begin // little endian if (slc==0) begin // full array `checkh($dimensions (array_lt), 3); `checkh($bits (array_lt), WA*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_lt, dim), 2 ); `checkh($right (array_lt, dim), wdt+1); `checkh($low (array_lt, dim), 2 ); `checkh($high (array_lt, dim), wdt+1); `checkh($increment (array_lt, dim), -1 ); `checkh($size (array_lt, dim), wdt ); end end else if (slc==1) begin // single array element `checkh($dimensions (array_lt[2]), 2); `checkh($bits (array_lt[2]), WB*WC); if ((dim>=2)&&(dim<=3)) begin `checkh($left (array_lt[2], dim-1), 2 ); `checkh($right (array_lt[2], dim-1), wdt+1); `checkh($low (array_lt[2], dim-1), 2 ); `checkh($high (array_lt[2], dim-1), wdt+1); `checkh($increment (array_lt[2], dim-1), -1 ); `checkh($size (array_lt[2], dim-1), wdt ); end `ifndef VERILATOR // Unsupported slices don't maintain size correctly end else if (slc==2) begin // half array `checkh($dimensions (array_lt[2:WA/2+1]), 3); `checkh($bits (array_lt[2:WA/2+1]), WA/2*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_lt[2:WA/2+1], dim), 2 ); `checkh($right (array_lt[2:WA/2+1], dim), wdt+1); `checkh($low (array_lt[2:WA/2+1], dim), 2 ); `checkh($high (array_lt[2:WA/2+1], dim), wdt+1); `checkh($increment (array_lt[2:WA/2+1], dim), -1 ); `checkh($size (array_lt[2:WA/2+1], dim), wdt ); end `endif end end end endmodule
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2009 by Iztok Jeras. module t (/*AUTOARG*/ // Inputs clk ); input clk; `define checkh(gotv,expv) do if ((gotv) !== (expv)) begin $write("%%Error: %s:%0d: got='h%x exp='h%x\n", `__FILE__,`__LINE__, (gotv), (expv)); $stop; end while(0); // parameters for array sizes localparam WA = 4; localparam WB = 6; localparam WC = 8; // 2D packed arrays logic [WA+1:2] [WB+1:2] [WC+1:2] array_bg; // big endian array /* verilator lint_off LITENDIAN */ logic [2:WA+1] [2:WB+1] [2:WC+1] array_lt; // little endian array /* verilator lint_on LITENDIAN */ logic [1:0] array_unpk [3:2][1:0]; integer cnt = 0; integer slc = 0; // slice type integer dim = 0; // dimension integer wdt = 0; // width initial begin `checkh($dimensions (array_unpk), 3); `ifndef VCS `checkh($unpacked_dimensions (array_unpk), 2); // IEEE 2009 `endif `checkh($bits (array_unpk), 2*2*2); `checkh($low (array_unpk), 2); `checkh($high (array_unpk), 3); `checkh($left (array_unpk), 3); `checkh($right(array_unpk), 2); `checkh($increment(array_unpk), 1); `checkh($size (array_unpk), 2); end // event counter always @ (posedge clk) begin cnt <= cnt + 1; end // finish report always @ (posedge clk) if ( (cnt[30:4]==3) && (cnt[3:2]==2'd3) && (cnt[1:0]==2'd3) ) begin $write("*-* All Finished *-*\n"); $finish; end integer slc_next; // calculation of dimention sizes always @ (posedge clk) begin // slicing type counter case (cnt[3:2]) 2'd0 : begin slc_next = 0; end // full array 2'd1 : begin slc_next = 1; end // single array element 2'd2 : begin slc_next = 2; end // half array default: begin slc_next = 0; end endcase slc <= slc_next; // dimension counter case (cnt[1:0]) 2'd0 : begin dim <= 1; wdt <= (slc_next==1) ? WA/2 : (slc_next==2) ? WA/2 : WA; end 2'd1 : begin dim <= 2; wdt <= WB; end 2'd2 : begin dim <= 3; wdt <= WC; end default: begin dim <= 0; wdt <= 0; end endcase end always @ (posedge clk) begin `ifdef TEST_VERBOSE $write("cnt[30:4]=%0d slc=%0d dim=%0d wdt=%0d\n", cnt[30:4], slc, dim, wdt); `endif if (cnt[30:4]==1) begin // big endian if (slc==0) begin // full array `checkh($dimensions (array_bg), 3); `checkh($bits (array_bg), WA*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_bg, dim), wdt+1); `checkh($right (array_bg, dim), 2 ); `checkh($low (array_bg, dim), 2 ); `checkh($high (array_bg, dim), wdt+1); `checkh($increment (array_bg, dim), 1 ); `checkh($size (array_bg, dim), wdt ); end end else if (slc==1) begin // single array element `checkh($dimensions (array_bg[2]), 2); `checkh($bits (array_bg[2]), WB*WC); if ((dim>=2)&&(dim<=3)) begin `checkh($left (array_bg[2], dim-1), wdt+1); `checkh($right (array_bg[2], dim-1), 2 ); `checkh($low (array_bg[2], dim-1), 2 ); `checkh($high (array_bg[2], dim-1), wdt+1); `checkh($increment (array_bg[2], dim-1), 1 ); `checkh($size (array_bg[2], dim-1), wdt ); end `ifndef VERILATOR // Unsupported slices don't maintain size correctly end else if (slc==2) begin // half array `checkh($dimensions (array_bg[WA/2+1:2]), 3); `checkh($bits (array_bg[WA/2+1:2]), WA/2*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_bg[WA/2+1:2], dim), wdt+1); `checkh($right (array_bg[WA/2+1:2], dim), 2 ); `checkh($low (array_bg[WA/2+1:2], dim), 2 ); `checkh($high (array_bg[WA/2+1:2], dim), wdt+1); `checkh($increment (array_bg[WA/2+1:2], dim), 1 ); `checkh($size (array_bg[WA/2+1:2], dim), wdt); end `endif end end else if (cnt[30:4]==2) begin // little endian if (slc==0) begin // full array `checkh($dimensions (array_lt), 3); `checkh($bits (array_lt), WA*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_lt, dim), 2 ); `checkh($right (array_lt, dim), wdt+1); `checkh($low (array_lt, dim), 2 ); `checkh($high (array_lt, dim), wdt+1); `checkh($increment (array_lt, dim), -1 ); `checkh($size (array_lt, dim), wdt ); end end else if (slc==1) begin // single array element `checkh($dimensions (array_lt[2]), 2); `checkh($bits (array_lt[2]), WB*WC); if ((dim>=2)&&(dim<=3)) begin `checkh($left (array_lt[2], dim-1), 2 ); `checkh($right (array_lt[2], dim-1), wdt+1); `checkh($low (array_lt[2], dim-1), 2 ); `checkh($high (array_lt[2], dim-1), wdt+1); `checkh($increment (array_lt[2], dim-1), -1 ); `checkh($size (array_lt[2], dim-1), wdt ); end `ifndef VERILATOR // Unsupported slices don't maintain size correctly end else if (slc==2) begin // half array `checkh($dimensions (array_lt[2:WA/2+1]), 3); `checkh($bits (array_lt[2:WA/2+1]), WA/2*WB*WC); if ((dim>=1)&&(dim<=3)) begin `checkh($left (array_lt[2:WA/2+1], dim), 2 ); `checkh($right (array_lt[2:WA/2+1], dim), wdt+1); `checkh($low (array_lt[2:WA/2+1], dim), 2 ); `checkh($high (array_lt[2:WA/2+1], dim), wdt+1); `checkh($increment (array_lt[2:WA/2+1], dim), -1 ); `checkh($size (array_lt[2:WA/2+1], dim), wdt ); end `endif end end end endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ecc_gen.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps // Generate the ecc code. Note that the synthesizer should // generate this as a static logic. Code in this block should // never run during simulation phase, or directly impact timing. // // The code generated is a single correct, double detect code. // It is the classic Hamming code. Instead, the code is // optimized for minimal/balanced tree depth and size. See // Hsiao IBM Technial Journal 1970. // // The code is returned as a single bit vector, h_rows. This was // the only way to "subroutinize" this with the restrictions of // disallowed include files and that matrices cannot be passed // in ports. // // Factorial and the combos functions are defined. Combos // simply computes the number of combinations from the set // size and elements at a time. // // The function next_combo computes the next combination in // lexicographical order given the "current" combination. Its // output is undefined if given the last combination in the // lexicographical order. // // next_combo is insensitive to the number of elements in the // combinations. // // An H transpose matrix is generated because that's the easiest // way to do it. The H transpose matrix is generated by taking // the one at a time combinations, then the 3 at a time, then // the 5 at a time. The number combinations used is equal to // the width of the code (CODE_WIDTH). The boundaries between // the 1, 3 and 5 groups are hardcoded in the for loop. // // At the same time the h_rows vector is generated from the // H transpose matrix. module mig_7series_v1_9_ecc_gen #( parameter CODE_WIDTH = 72, parameter ECC_WIDTH = 8, parameter DATA_WIDTH = 64 ) ( /*AUTOARG*/ // Outputs h_rows ); function integer factorial (input integer i); integer index; if (i == 1) factorial = 1; else begin factorial = 1; for (index=2; index<=i; index=index+1) factorial = factorial * index; end endfunction // factorial function integer combos (input integer n, k); combos = factorial(n)/(factorial(k)*factorial(n-k)); endfunction // combinations // function next_combo // Given a combination, return the next combo in lexicographical // order. Scans from right to left. Assumes the first combination // is k ones all of the way to the left. // // Upon entry, initialize seen0, trig1, and ones. "seen0" means // that a zero has been observed while scanning from right to left. // "trig1" means that a one have been observed _after_ seen0 is set. // "ones" counts the number of ones observed while scanning the input. // // If trig1 is one, just copy the input bit to the output and increment // to the next bit. Otherwise set the the output bit to zero, if the // input is a one, increment ones. If the input bit is a one and seen0 // is true, dump out the accumulated ones. Set seen0 to the complement // of the input bit. Note that seen0 is not used subsequent to trig1 // getting set. function [ECC_WIDTH-1:0] next_combo (input [ECC_WIDTH-1:0] i); integer index; integer dump_index; reg seen0; reg trig1; // integer ones; reg [ECC_WIDTH-1:0] ones; begin seen0 = 1'b0; trig1 = 1'b0; ones = 0; for (index=0; index<ECC_WIDTH; index=index+1) begin // The "== 1'bx" is so this will converge at time zero. // XST assumes false, which should be OK. if ((&i == 1'bx) || trig1) next_combo[index] = i[index]; else begin next_combo[index] = 1'b0; ones = ones + i[index]; if (i[index] && seen0) begin trig1 = 1'b1; for (dump_index=index-1; dump_index>=0;dump_index=dump_index-1) if (dump_index>=index-ones) next_combo[dump_index] = 1'b1; end seen0 = ~i[index]; end // else: !if(trig1) end end // function endfunction // next_combo wire [ECC_WIDTH-1:0] ht_matrix [CODE_WIDTH-1:0]; output wire [CODE_WIDTH*ECC_WIDTH-1:0] h_rows; localparam COMBOS_3 = combos(ECC_WIDTH, 3); localparam COMBOS_5 = combos(ECC_WIDTH, 5); genvar n; genvar s; generate for (n=0; n<CODE_WIDTH; n=n+1) begin : ht if (n == 0) assign ht_matrix[n] = {{3{1'b1}}, {ECC_WIDTH-3{1'b0}}}; else if (n == COMBOS_3 && n < DATA_WIDTH) assign ht_matrix[n] = {{5{1'b1}}, {ECC_WIDTH-5{1'b0}}}; else if ((n == COMBOS_3+COMBOS_5) && n < DATA_WIDTH) assign ht_matrix[n] = {{7{1'b1}}, {ECC_WIDTH-7{1'b0}}}; else if (n == DATA_WIDTH) assign ht_matrix[n] = {{1{1'b1}}, {ECC_WIDTH-1{1'b0}}}; else assign ht_matrix[n] = next_combo(ht_matrix[n-1]); for (s=0; s<ECC_WIDTH; s=s+1) begin : h_row assign h_rows[s*CODE_WIDTH+n] = ht_matrix[n][s]; end end endgenerate endmodule // ecc_gen
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ecc_gen.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps // Generate the ecc code. Note that the synthesizer should // generate this as a static logic. Code in this block should // never run during simulation phase, or directly impact timing. // // The code generated is a single correct, double detect code. // It is the classic Hamming code. Instead, the code is // optimized for minimal/balanced tree depth and size. See // Hsiao IBM Technial Journal 1970. // // The code is returned as a single bit vector, h_rows. This was // the only way to "subroutinize" this with the restrictions of // disallowed include files and that matrices cannot be passed // in ports. // // Factorial and the combos functions are defined. Combos // simply computes the number of combinations from the set // size and elements at a time. // // The function next_combo computes the next combination in // lexicographical order given the "current" combination. Its // output is undefined if given the last combination in the // lexicographical order. // // next_combo is insensitive to the number of elements in the // combinations. // // An H transpose matrix is generated because that's the easiest // way to do it. The H transpose matrix is generated by taking // the one at a time combinations, then the 3 at a time, then // the 5 at a time. The number combinations used is equal to // the width of the code (CODE_WIDTH). The boundaries between // the 1, 3 and 5 groups are hardcoded in the for loop. // // At the same time the h_rows vector is generated from the // H transpose matrix. module mig_7series_v1_9_ecc_gen #( parameter CODE_WIDTH = 72, parameter ECC_WIDTH = 8, parameter DATA_WIDTH = 64 ) ( /*AUTOARG*/ // Outputs h_rows ); function integer factorial (input integer i); integer index; if (i == 1) factorial = 1; else begin factorial = 1; for (index=2; index<=i; index=index+1) factorial = factorial * index; end endfunction // factorial function integer combos (input integer n, k); combos = factorial(n)/(factorial(k)*factorial(n-k)); endfunction // combinations // function next_combo // Given a combination, return the next combo in lexicographical // order. Scans from right to left. Assumes the first combination // is k ones all of the way to the left. // // Upon entry, initialize seen0, trig1, and ones. "seen0" means // that a zero has been observed while scanning from right to left. // "trig1" means that a one have been observed _after_ seen0 is set. // "ones" counts the number of ones observed while scanning the input. // // If trig1 is one, just copy the input bit to the output and increment // to the next bit. Otherwise set the the output bit to zero, if the // input is a one, increment ones. If the input bit is a one and seen0 // is true, dump out the accumulated ones. Set seen0 to the complement // of the input bit. Note that seen0 is not used subsequent to trig1 // getting set. function [ECC_WIDTH-1:0] next_combo (input [ECC_WIDTH-1:0] i); integer index; integer dump_index; reg seen0; reg trig1; // integer ones; reg [ECC_WIDTH-1:0] ones; begin seen0 = 1'b0; trig1 = 1'b0; ones = 0; for (index=0; index<ECC_WIDTH; index=index+1) begin // The "== 1'bx" is so this will converge at time zero. // XST assumes false, which should be OK. if ((&i == 1'bx) || trig1) next_combo[index] = i[index]; else begin next_combo[index] = 1'b0; ones = ones + i[index]; if (i[index] && seen0) begin trig1 = 1'b1; for (dump_index=index-1; dump_index>=0;dump_index=dump_index-1) if (dump_index>=index-ones) next_combo[dump_index] = 1'b1; end seen0 = ~i[index]; end // else: !if(trig1) end end // function endfunction // next_combo wire [ECC_WIDTH-1:0] ht_matrix [CODE_WIDTH-1:0]; output wire [CODE_WIDTH*ECC_WIDTH-1:0] h_rows; localparam COMBOS_3 = combos(ECC_WIDTH, 3); localparam COMBOS_5 = combos(ECC_WIDTH, 5); genvar n; genvar s; generate for (n=0; n<CODE_WIDTH; n=n+1) begin : ht if (n == 0) assign ht_matrix[n] = {{3{1'b1}}, {ECC_WIDTH-3{1'b0}}}; else if (n == COMBOS_3 && n < DATA_WIDTH) assign ht_matrix[n] = {{5{1'b1}}, {ECC_WIDTH-5{1'b0}}}; else if ((n == COMBOS_3+COMBOS_5) && n < DATA_WIDTH) assign ht_matrix[n] = {{7{1'b1}}, {ECC_WIDTH-7{1'b0}}}; else if (n == DATA_WIDTH) assign ht_matrix[n] = {{1{1'b1}}, {ECC_WIDTH-1{1'b0}}}; else assign ht_matrix[n] = next_combo(ht_matrix[n-1]); for (s=0; s<ECC_WIDTH; s=s+1) begin : h_row assign h_rows[s*CODE_WIDTH+n] = ht_matrix[n][s]; end end endgenerate endmodule // ecc_gen
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ui_cmd.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1 ps / 1 ps // User interface command port. module mig_7series_v1_9_ui_cmd # ( parameter TCQ = 100, parameter ADDR_WIDTH = 33, parameter BANK_WIDTH = 3, parameter COL_WIDTH = 12, parameter DATA_BUF_ADDR_WIDTH = 5, parameter RANK_WIDTH = 2, parameter ROW_WIDTH = 16, parameter RANKS = 4, parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN" ) (/*AUTOARG*/ // Outputs app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority, rd_accepted, wr_accepted, data_buf_addr, // Inputs rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd, app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r ); input rst; input clk; input accept_ns; input rd_buf_full; input wr_req_16; wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16; (* keep = "true", max_fanout = 10 *) reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */; always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns; output wire app_rdy; assign app_rdy = app_rdy_r; input [ADDR_WIDTH-1:0] app_addr; input [2:0] app_cmd; input app_sz; input app_hi_pri; input app_en; reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}}; reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}}; reg [2:0] app_cmd_r1; reg [2:0] app_cmd_r2; reg app_sz_r1; reg app_sz_r2; reg app_hi_pri_r1; reg app_hi_pri_r2; reg app_en_r1; reg app_en_r2; wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1; wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2; wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1; wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2; wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1; wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2; wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1; wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2; wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1); wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2); always @(posedge clk) begin if (rst) begin app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}}; app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}}; end else begin app_addr_r1 <= #TCQ app_addr_ns1; app_addr_r2 <= #TCQ app_addr_ns2; end app_cmd_r1 <= #TCQ app_cmd_ns1; app_cmd_r2 <= #TCQ app_cmd_ns2; app_sz_r1 <= #TCQ app_sz_ns1; app_sz_r2 <= #TCQ app_sz_ns2; app_hi_pri_r1 <= #TCQ app_hi_pri_ns1; app_hi_pri_r2 <= #TCQ app_hi_pri_ns2; app_en_r1 <= #TCQ app_en_ns1; app_en_r2 <= #TCQ app_en_ns2; end // always @ (posedge clk) wire use_addr_lcl = app_en_r2 && app_rdy_r; output wire use_addr; assign use_addr = use_addr_lcl; output wire [RANK_WIDTH-1:0] rank; output wire [BANK_WIDTH-1:0] bank; output wire [ROW_WIDTH-1:0] row; output wire [COL_WIDTH-1:0] col; output wire size; output wire [2:0] cmd; output wire hi_priority; /* assign col = app_rdy_r ? app_addr_r1[0+:COL_WIDTH] : app_addr_r2[0+:COL_WIDTH];*/ generate begin if (MEM_ADDR_ORDER == "TG_TEST") begin assign col[4:0] = app_rdy_r ? app_addr_r1[0+:5] : app_addr_r2[0+:5]; if (RANKS==1) begin assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+:2] : app_addr_r2[5+3+BANK_WIDTH+:2]; assign col[COL_WIDTH-3:5] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7] : app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]; end else begin assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2]; assign col[COL_WIDTH-3:5] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]; end assign row[2:0] = app_rdy_r ? app_addr_r1[5+:3] : app_addr_r2[5+:3]; if (RANKS==1) begin assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+2+:2] : app_addr_r2[5+3+BANK_WIDTH+2+:2]; assign row[ROW_WIDTH-3:3] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5] : app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]; end else begin assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]; assign row[ROW_WIDTH-3:3] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]; end assign bank = app_rdy_r ? app_addr_r1[5+3+:BANK_WIDTH] : app_addr_r2[5+3+:BANK_WIDTH]; assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH]; end else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN") begin assign col = app_rdy_r ? app_addr_r1[0+:COL_WIDTH] : app_addr_r2[0+:COL_WIDTH]; assign row = app_rdy_r ? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH] : app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]; assign bank = app_rdy_r ? app_addr_r1[COL_WIDTH+:BANK_WIDTH] : app_addr_r2[COL_WIDTH+:BANK_WIDTH]; assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]; end else begin assign col = app_rdy_r ? app_addr_r1[0+:COL_WIDTH] : app_addr_r2[0+:COL_WIDTH]; assign row = app_rdy_r ? app_addr_r1[COL_WIDTH+:ROW_WIDTH] : app_addr_r2[COL_WIDTH+:ROW_WIDTH]; assign bank = app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]; assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]; end end endgenerate /* assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/ assign size = app_rdy_r ? app_sz_r1 : app_sz_r2; assign cmd = app_rdy_r ? app_cmd_r1 : app_cmd_r2; assign hi_priority = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2; wire request_accepted = use_addr_lcl && app_rdy_r; wire rd = app_cmd_r2[1:0] == 2'b01; wire wr = app_cmd_r2[1:0] == 2'b00; wire wr_bytes = app_cmd_r2[1:0] == 2'b11; wire write = wr || wr_bytes; output wire rd_accepted; assign rd_accepted = request_accepted && rd; output wire wr_accepted; assign wr_accepted = request_accepted && write; input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr; input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r; output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr; assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr; endmodule // ui_cmd // Local Variables: // verilog-library-directories:(".") // End:
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ui_cmd.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1 ps / 1 ps // User interface command port. module mig_7series_v1_9_ui_cmd # ( parameter TCQ = 100, parameter ADDR_WIDTH = 33, parameter BANK_WIDTH = 3, parameter COL_WIDTH = 12, parameter DATA_BUF_ADDR_WIDTH = 5, parameter RANK_WIDTH = 2, parameter ROW_WIDTH = 16, parameter RANKS = 4, parameter MEM_ADDR_ORDER = "BANK_ROW_COLUMN" ) (/*AUTOARG*/ // Outputs app_rdy, use_addr, rank, bank, row, col, size, cmd, hi_priority, rd_accepted, wr_accepted, data_buf_addr, // Inputs rst, clk, accept_ns, rd_buf_full, wr_req_16, app_addr, app_cmd, app_sz, app_hi_pri, app_en, wr_data_buf_addr, rd_data_buf_addr_r ); input rst; input clk; input accept_ns; input rd_buf_full; input wr_req_16; wire app_rdy_ns = accept_ns && ~rd_buf_full && ~wr_req_16; (* keep = "true", max_fanout = 10 *) reg app_rdy_r = 1'b0 /* synthesis syn_maxfan = 10 */; always @(posedge clk) app_rdy_r <= #TCQ app_rdy_ns; output wire app_rdy; assign app_rdy = app_rdy_r; input [ADDR_WIDTH-1:0] app_addr; input [2:0] app_cmd; input app_sz; input app_hi_pri; input app_en; reg [ADDR_WIDTH-1:0] app_addr_r1 = {ADDR_WIDTH{1'b0}}; reg [ADDR_WIDTH-1:0] app_addr_r2 = {ADDR_WIDTH{1'b0}}; reg [2:0] app_cmd_r1; reg [2:0] app_cmd_r2; reg app_sz_r1; reg app_sz_r2; reg app_hi_pri_r1; reg app_hi_pri_r2; reg app_en_r1; reg app_en_r2; wire [ADDR_WIDTH-1:0] app_addr_ns1 = app_rdy_r && app_en ? app_addr : app_addr_r1; wire [ADDR_WIDTH-1:0] app_addr_ns2 = app_rdy_r ? app_addr_r1 : app_addr_r2; wire [2:0] app_cmd_ns1 = app_rdy_r ? app_cmd : app_cmd_r1; wire [2:0] app_cmd_ns2 = app_rdy_r ? app_cmd_r1 : app_cmd_r2; wire app_sz_ns1 = app_rdy_r ? app_sz : app_sz_r1; wire app_sz_ns2 = app_rdy_r ? app_sz_r1 : app_sz_r2; wire app_hi_pri_ns1 = app_rdy_r ? app_hi_pri : app_hi_pri_r1; wire app_hi_pri_ns2 = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2; wire app_en_ns1 = ~rst && (app_rdy_r ? app_en : app_en_r1); wire app_en_ns2 = ~rst && (app_rdy_r ? app_en_r1 : app_en_r2); always @(posedge clk) begin if (rst) begin app_addr_r1 <= #TCQ {ADDR_WIDTH{1'b0}}; app_addr_r2 <= #TCQ {ADDR_WIDTH{1'b0}}; end else begin app_addr_r1 <= #TCQ app_addr_ns1; app_addr_r2 <= #TCQ app_addr_ns2; end app_cmd_r1 <= #TCQ app_cmd_ns1; app_cmd_r2 <= #TCQ app_cmd_ns2; app_sz_r1 <= #TCQ app_sz_ns1; app_sz_r2 <= #TCQ app_sz_ns2; app_hi_pri_r1 <= #TCQ app_hi_pri_ns1; app_hi_pri_r2 <= #TCQ app_hi_pri_ns2; app_en_r1 <= #TCQ app_en_ns1; app_en_r2 <= #TCQ app_en_ns2; end // always @ (posedge clk) wire use_addr_lcl = app_en_r2 && app_rdy_r; output wire use_addr; assign use_addr = use_addr_lcl; output wire [RANK_WIDTH-1:0] rank; output wire [BANK_WIDTH-1:0] bank; output wire [ROW_WIDTH-1:0] row; output wire [COL_WIDTH-1:0] col; output wire size; output wire [2:0] cmd; output wire hi_priority; /* assign col = app_rdy_r ? app_addr_r1[0+:COL_WIDTH] : app_addr_r2[0+:COL_WIDTH];*/ generate begin if (MEM_ADDR_ORDER == "TG_TEST") begin assign col[4:0] = app_rdy_r ? app_addr_r1[0+:5] : app_addr_r2[0+:5]; if (RANKS==1) begin assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+:2] : app_addr_r2[5+3+BANK_WIDTH+:2]; assign col[COL_WIDTH-3:5] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7] : app_addr_r2[5+3+BANK_WIDTH+2+2+:COL_WIDTH-7]; end else begin assign col[COL_WIDTH-1:COL_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+:2] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+:2]; assign col[COL_WIDTH-3:5] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+:COL_WIDTH-7]; end assign row[2:0] = app_rdy_r ? app_addr_r1[5+:3] : app_addr_r2[5+:3]; if (RANKS==1) begin assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+2+:2] : app_addr_r2[5+3+BANK_WIDTH+2+:2]; assign row[ROW_WIDTH-3:3] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5] : app_addr_r2[5+3+BANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]; end else begin assign row[ROW_WIDTH-1:ROW_WIDTH-2] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+:2] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+:2]; assign row[ROW_WIDTH-3:3] = app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5] : app_addr_r2[5+3+BANK_WIDTH+RANK_WIDTH+2+2+COL_WIDTH-7+:ROW_WIDTH-5]; end assign bank = app_rdy_r ? app_addr_r1[5+3+:BANK_WIDTH] : app_addr_r2[5+3+:BANK_WIDTH]; assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[5+3+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[5+3+BANK_WIDTH+:RANK_WIDTH]; end else if (MEM_ADDR_ORDER == "ROW_BANK_COLUMN") begin assign col = app_rdy_r ? app_addr_r1[0+:COL_WIDTH] : app_addr_r2[0+:COL_WIDTH]; assign row = app_rdy_r ? app_addr_r1[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH] : app_addr_r2[COL_WIDTH+BANK_WIDTH+:ROW_WIDTH]; assign bank = app_rdy_r ? app_addr_r1[COL_WIDTH+:BANK_WIDTH] : app_addr_r2[COL_WIDTH+:BANK_WIDTH]; assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]; end else begin assign col = app_rdy_r ? app_addr_r1[0+:COL_WIDTH] : app_addr_r2[0+:COL_WIDTH]; assign row = app_rdy_r ? app_addr_r1[COL_WIDTH+:ROW_WIDTH] : app_addr_r2[COL_WIDTH+:ROW_WIDTH]; assign bank = app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+:BANK_WIDTH]; assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH]; end end endgenerate /* assign rank = (RANKS == 1) ? 1'b0 : app_rdy_r ? app_addr_r1[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH] : app_addr_r2[COL_WIDTH+ROW_WIDTH+BANK_WIDTH+:RANK_WIDTH];*/ assign size = app_rdy_r ? app_sz_r1 : app_sz_r2; assign cmd = app_rdy_r ? app_cmd_r1 : app_cmd_r2; assign hi_priority = app_rdy_r ? app_hi_pri_r1 : app_hi_pri_r2; wire request_accepted = use_addr_lcl && app_rdy_r; wire rd = app_cmd_r2[1:0] == 2'b01; wire wr = app_cmd_r2[1:0] == 2'b00; wire wr_bytes = app_cmd_r2[1:0] == 2'b11; wire write = wr || wr_bytes; output wire rd_accepted; assign rd_accepted = request_accepted && rd; output wire wr_accepted; assign wr_accepted = request_accepted && write; input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_buf_addr; input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_buf_addr_r; output wire [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr; assign data_buf_addr = ~write ? rd_data_buf_addr_r : wr_data_buf_addr; endmodule // ui_cmd // Local Variables: // verilog-library-directories:(".") // End:
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : mc.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** //***************************************************************************** // Top level memory sequencer structural block. This block // instantiates the rank, bank, and column machines. //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_mc # ( parameter TCQ = 100, // clk->out delay(sim only) parameter ADDR_CMD_MODE = "1T", // registered or // 1Tfered mem? parameter BANK_WIDTH = 3, // bank address width parameter BM_CNT_WIDTH = 2, // # BM counter width // i.e., log2(nBANK_MACHS) parameter BURST_MODE = "8", // Burst length parameter CL = 5, // Read CAS latency // (in clk cyc) parameter CMD_PIPE_PLUS1 = "ON", // add register stage // between MC and PHY parameter COL_WIDTH = 12, // column address width parameter CS_WIDTH = 4, // # of unique CS outputs parameter CWL = 5, // Write CAS latency // (in clk cyc) parameter DATA_BUF_ADDR_WIDTH = 8, // User request tag (e.g. // user src/dest buf addr) parameter DATA_BUF_OFFSET_WIDTH = 1, // User buffer offset width parameter DATA_WIDTH = 64, // Data bus width parameter DQ_WIDTH = 64, // # of DQ (data) parameter DQS_WIDTH = 8, // # of DQS (strobe) parameter DRAM_TYPE = "DDR3", // Memory I/F type: // "DDR3", "DDR2" parameter ECC = "OFF", // ECC ON/OFF? parameter ECC_WIDTH = 8, // # of ECC bits parameter MAINT_PRESCALER_PERIOD= 200000, // maintenance period (ps) parameter MC_ERR_ADDR_WIDTH = 31, // # of error address bits parameter nBANK_MACHS = 4, // # of bank machines (BM) parameter nCK_PER_CLK = 4, // DRAM clock : MC clock // frequency ratio parameter nCS_PER_RANK = 1, // # of unique CS outputs // per rank parameter nREFRESH_BANK = 1, // # of REF cmds to pull-in parameter nSLOTS = 1, // # DIMM slots in system parameter ORDERING = "NORM", // request ordering mode parameter PAYLOAD_WIDTH = 64, // Width of data payload // from PHY parameter RANK_WIDTH = 2, // # of bits to count ranks parameter RANKS = 4, // # of ranks of DRAM parameter REG_CTRL = "ON", // "ON" for registered DIMM parameter ROW_WIDTH = 16, // row address width parameter RTT_NOM = "40", // Nominal ODT value parameter RTT_WR = "120", // Write ODT value parameter SLOT_0_CONFIG = 8'b0000_0101, // ranks allowed in slot 0 parameter SLOT_1_CONFIG = 8'b0000_1010, // ranks allowed in slot 1 parameter STARVE_LIMIT = 2, // max # of times a user // request is allowed to // lose arbitration when // reordering is enabled parameter tCK = 2500, // memory clk period(ps) parameter tCKE = 10000, // CKE minimum pulse (ps) parameter tFAW = 40000, // four activate window(ps) parameter tRAS = 37500, // ACT->PRE cmd period (ps) parameter tRCD = 12500, // ACT->R/W delay (ps) parameter tREFI = 7800000, // average periodic // refresh interval(ps) parameter CKE_ODT_AUX = "FALSE", //Parameter to turn on/off the aux_out signal parameter tRFC = 110000, // REF->ACT/REF delay (ps) parameter tRP = 12500, // PRE cmd period (ps) parameter tRRD = 10000, // ACT->ACT period (ps) parameter tRTP = 7500, // Read->PRE cmd delay (ps) parameter tWTR = 7500, // Internal write->read // delay (ps) // requiring DLL lock (CKs) parameter tZQCS = 64, // ZQCS cmd period (CKs) parameter tZQI = 128_000_000, // ZQCS interval (ps) parameter tPRDI = 1_000_000, // pS parameter USER_REFRESH = "OFF" // Whether user manages REF ) ( // System inputs input clk, input rst, // Physical memory slot presence input [7:0] slot_0_present, input [7:0] slot_1_present, // Native Interface input [2:0] cmd, input [DATA_BUF_ADDR_WIDTH-1:0] data_buf_addr, input hi_priority, input size, input [BANK_WIDTH-1:0] bank, input [COL_WIDTH-1:0] col, input [RANK_WIDTH-1:0] rank, input [ROW_WIDTH-1:0] row, input use_addr, input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] wr_data, input [2*nCK_PER_CLK*DATA_WIDTH/8-1:0] wr_data_mask, output accept, output accept_ns, output [BM_CNT_WIDTH-1:0] bank_mach_next, output wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data, output [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr, output rd_data_en, output rd_data_end, output [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset, (* keep = "true", max_fanout = 30 *) output reg [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr /* synthesis syn_maxfan = 30 */, output reg wr_data_en, (* keep = "true", max_fanout = 30 *) output reg [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset /* synthesis syn_maxfan = 30 */, output mc_read_idle, output mc_ref_zq_wip, // ECC interface input correct_en, input [2*nCK_PER_CLK-1:0] raw_not_ecc, output [MC_ERR_ADDR_WIDTH-1:0] ecc_err_addr, output [2*nCK_PER_CLK-1:0] ecc_single, output [2*nCK_PER_CLK-1:0] ecc_multiple, // User maintenance requests input app_periodic_rd_req, input app_ref_req, input app_zq_req, input app_sr_req, output app_sr_active, output app_ref_ack, output app_zq_ack, // MC <==> PHY Interface output reg [nCK_PER_CLK-1:0] mc_ras_n, output reg [nCK_PER_CLK-1:0] mc_cas_n, output reg [nCK_PER_CLK-1:0] mc_we_n, output reg [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address, output reg [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank, output reg [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n, output reg [1:0] mc_odt, output reg [nCK_PER_CLK-1:0] mc_cke, output wire mc_reset_n, output wire [2*nCK_PER_CLK*DQ_WIDTH-1:0] mc_wrdata, output wire [2*nCK_PER_CLK*DQ_WIDTH/8-1:0]mc_wrdata_mask, output reg mc_wrdata_en, output wire mc_cmd_wren, output wire mc_ctl_wren, output reg [2:0] mc_cmd, output reg [5:0] mc_data_offset, output reg [5:0] mc_data_offset_1, output reg [5:0] mc_data_offset_2, output reg [1:0] mc_cas_slot, output reg [3:0] mc_aux_out0, output reg [3:0] mc_aux_out1, output reg [1:0] mc_rank_cnt, input phy_mc_ctl_full, input phy_mc_cmd_full, input phy_mc_data_full, input [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_rd_data, input phy_rddata_valid, input init_calib_complete, input [6*RANKS-1:0] calib_rd_data_offset, input [6*RANKS-1:0] calib_rd_data_offset_1, input [6*RANKS-1:0] calib_rd_data_offset_2 ); assign mc_reset_n = 1'b1; // never reset memory assign mc_cmd_wren = 1'b1; // always write CMD FIFO(issue DSEL when idle) assign mc_ctl_wren = 1'b1; // always write CTL FIFO(issue nondata when idle) // Ensure there is always at least one rank present during operation `ifdef MC_SVA ranks_present: assert property (@(posedge clk) (rst || (|(slot_0_present | slot_1_present)))); `endif // Reserved. Do not change. localparam nPHY_WRLAT = 2; // always delay write data control unless ECC mode is enabled localparam DELAY_WR_DATA_CNTRL = ECC == "ON" ? 0 : 1; // Ensure that write control is delayed for appropriate CWL /*`ifdef MC_SVA delay_wr_data_zero_CWL_le_6: assert property (@(posedge clk) ((CWL > 6) || (DELAY_WR_DATA_CNTRL == 0))); `endif*/ // Never retrieve WR_DATA_ADDR early localparam EARLY_WR_DATA_ADDR = "OFF"; //*************************************************************************** // Convert timing parameters from time to clock cycles //*************************************************************************** localparam nCKE = cdiv(tCKE, tCK); localparam nRP = cdiv(tRP, tCK); localparam nRCD = cdiv(tRCD, tCK); localparam nRAS = cdiv(tRAS, tCK); localparam nFAW = cdiv(tFAW, tCK); localparam nRFC = cdiv(tRFC, tCK); // Convert tWR. As per specification, write recover for autoprecharge // cycles doesn't support values of 9 and 11. Round up 9 to 10 and 11 to 12 localparam nWR_CK = cdiv(15000, tCK) ; localparam nWR = (nWR_CK == 9) ? 10 : (nWR_CK == 11) ? 12 : nWR_CK; // tRRD, tWTR at tRTP have a 4 cycle floor in DDR3 and 2 cycle floor in DDR2 localparam nRRD_CK = cdiv(tRRD, tCK); localparam nRRD = (DRAM_TYPE == "DDR3") ? (nRRD_CK < 4) ? 4 : nRRD_CK : (nRRD_CK < 2) ? 2 : nRRD_CK; localparam nWTR_CK = cdiv(tWTR, tCK); localparam nWTR = (DRAM_TYPE == "DDR3") ? (nWTR_CK < 4) ? 4 : nWTR_CK : (nWTR_CK < 2) ? 2 : nWTR_CK; localparam nRTP_CK = cdiv(tRTP, tCK); localparam nRTP = (DRAM_TYPE == "DDR3") ? (nRTP_CK < 4) ? 4 : nRTP_CK : (nRTP_CK < 2) ? 2 : nRTP_CK; // Add a cycle to CL/CWL for the register in RDIMM devices localparam CWL_M = (REG_CTRL == "ON") ? CWL + 1 : CWL; localparam CL_M = (REG_CTRL == "ON") ? CL + 1 : CL; // Tuneable delay between read and write data on the DQ bus localparam DQRD2DQWR_DLY = 4; // CKE minimum pulse width for self-refresh (SRE->SRX minimum time) localparam nCKESR = nCKE + 1; // Delay from SRE to command requiring locked DLL. Currently fixed at 512 for // all devices per JEDEC spec. localparam tXSDLL = 512; //*************************************************************************** // Set up maintenance counter dividers //*************************************************************************** // CK clock divisor to generate maintenance prescaler period (round down) localparam MAINT_PRESCALER_DIV = MAINT_PRESCALER_PERIOD / (tCK*nCK_PER_CLK); // Maintenance prescaler divisor for refresh timer. Essentially, this is // just (tREFI / MAINT_PRESCALER_PERIOD), but we must account for the worst // case delay from the time we get a tick from the refresh counter to the // time that we can actually issue the REF command. Thus, subtract tRCD, CL, // data burst time and tRP for each implemented bank machine to ensure that // all transactions can complete before tREFI expires localparam REFRESH_TIMER_DIV = USER_REFRESH == "ON" ? 0 : (tREFI-((tRCD+((CL+4)*tCK)+tRP)*nBANK_MACHS)) / MAINT_PRESCALER_PERIOD; // Periodic read (RESERVED - not currently required or supported in 7 series) // tPRDI should only be set to 0 // localparam tPRDI = 0; // Do NOT change. localparam PERIODIC_RD_TIMER_DIV = tPRDI / MAINT_PRESCALER_PERIOD; // Convert maintenance prescaler from ps to ns localparam MAINT_PRESCALER_PERIOD_NS = MAINT_PRESCALER_PERIOD / 1000; // Maintenance prescaler divisor for ZQ calibration (ZQCS) timer localparam ZQ_TIMER_DIV = tZQI / MAINT_PRESCALER_PERIOD_NS; // Bus width required to broadcast a single bit rank signal among all the // bank machines - 1 bit per rank, per bank localparam RANK_BM_BV_WIDTH = nBANK_MACHS * RANKS; //*************************************************************************** // Define 2T, CWL-even mode to enable multi-fabric-cycle 2T commands //*************************************************************************** localparam EVEN_CWL_2T_MODE = ((ADDR_CMD_MODE == "2T") && (!(CWL % 2))) ? "ON" : "OFF"; //*************************************************************************** // Reserved feature control. //*************************************************************************** // Open page wait mode is reserved. // nOP_WAIT is the number of states a bank machine will park itself // on an otherwise inactive open page before closing the page. If // nOP_WAIT == 0, open page wait mode is disabled. If nOP_WAIT == -1, // the bank machine will remain parked until the pool of idle bank machines // are less than LOW_IDLE_CNT. At which point parked bank machines // are selected to exit until the number of idle bank machines exceeds the // LOW_IDLE_CNT. localparam nOP_WAIT = 0; // Open page mode localparam LOW_IDLE_CNT = 0; // Low idle bank machine threshold //*************************************************************************** // Internal wires //*************************************************************************** wire [RANK_BM_BV_WIDTH-1:0] act_this_rank_r; wire [ROW_WIDTH-1:0] col_a; wire [BANK_WIDTH-1:0] col_ba; wire [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr; wire col_periodic_rd; wire [RANK_WIDTH-1:0] col_ra; wire col_rmw; wire col_rd_wr; wire [ROW_WIDTH-1:0] col_row; wire col_size; wire [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr; wire dq_busy_data; wire ecc_status_valid; wire [RANKS-1:0] inhbt_act_faw_r; wire [RANKS-1:0] inhbt_rd; wire [RANKS-1:0] inhbt_wr; wire insert_maint_r1; wire [RANK_WIDTH-1:0] maint_rank_r; wire maint_req_r; wire maint_wip_r; wire maint_zq_r; wire maint_sre_r; wire maint_srx_r; wire periodic_rd_ack_r; wire periodic_rd_r; wire [RANK_WIDTH-1:0] periodic_rd_rank_r; wire [(RANKS*nBANK_MACHS)-1:0] rank_busy_r; wire rd_rmw; wire [RANK_BM_BV_WIDTH-1:0] rd_this_rank_r; wire [nBANK_MACHS-1:0] sending_col; wire [nBANK_MACHS-1:0] sending_row; wire sent_col; wire sent_col_r; wire wr_ecc_buf; wire [RANK_BM_BV_WIDTH-1:0] wr_this_rank_r; // MC/PHY optional pipeline stage support wire [nCK_PER_CLK-1:0] mc_ras_n_ns; wire [nCK_PER_CLK-1:0] mc_cas_n_ns; wire [nCK_PER_CLK-1:0] mc_we_n_ns; wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address_ns; wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank_ns; wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n_ns; wire [1:0] mc_odt_ns; wire [nCK_PER_CLK-1:0] mc_cke_ns; wire [3:0] mc_aux_out0_ns; wire [3:0] mc_aux_out1_ns; wire [1:0] mc_rank_cnt_ns = col_ra; wire [2:0] mc_cmd_ns; wire [5:0] mc_data_offset_ns; wire [5:0] mc_data_offset_1_ns; wire [5:0] mc_data_offset_2_ns; wire [1:0] mc_cas_slot_ns; wire mc_wrdata_en_ns; wire [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr_ns; wire wr_data_en_ns; wire [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset_ns; integer i; // MC Read idle support wire col_read_fifo_empty; wire mc_read_idle_ns; reg mc_read_idle_r; // MC Maintenance in progress with bus idle indication wire maint_ref_zq_wip; wire mc_ref_zq_wip_ns; reg mc_ref_zq_wip_r; //*************************************************************************** // Function cdiv // Description: // This function performs ceiling division (divide and round-up) // Inputs: // num: integer to be divided // div: divisor // Outputs: // cdiv: result of ceiling division (num/div, rounded up) //*************************************************************************** function integer cdiv (input integer num, input integer div); begin // perform division, then add 1 if and only if remainder is non-zero cdiv = (num/div) + (((num%div)>0) ? 1 : 0); end endfunction // cdiv //*************************************************************************** // Optional pipeline register stage on MC/PHY interface //*************************************************************************** generate if (CMD_PIPE_PLUS1 == "ON") begin : cmd_pipe_plus // register interface always @(posedge clk) begin mc_address <= #TCQ mc_address_ns; mc_bank <= #TCQ mc_bank_ns; mc_cas_n <= #TCQ mc_cas_n_ns; mc_cs_n <= #TCQ mc_cs_n_ns; mc_odt <= #TCQ mc_odt_ns; mc_cke <= #TCQ mc_cke_ns; mc_aux_out0 <= #TCQ mc_aux_out0_ns; mc_aux_out1 <= #TCQ mc_aux_out1_ns; mc_cmd <= #TCQ mc_cmd_ns; mc_ras_n <= #TCQ mc_ras_n_ns; mc_we_n <= #TCQ mc_we_n_ns; mc_data_offset <= #TCQ mc_data_offset_ns; mc_data_offset_1 <= #TCQ mc_data_offset_1_ns; mc_data_offset_2 <= #TCQ mc_data_offset_2_ns; mc_cas_slot <= #TCQ mc_cas_slot_ns; mc_wrdata_en <= #TCQ mc_wrdata_en_ns; mc_rank_cnt <= #TCQ mc_rank_cnt_ns; wr_data_addr <= #TCQ wr_data_addr_ns; wr_data_en <= #TCQ wr_data_en_ns; wr_data_offset <= #TCQ wr_data_offset_ns; end // always @ (posedge clk) end // block: cmd_pipe_plus else begin : cmd_pipe_plus0 // don't register interface always @( mc_address_ns or mc_aux_out0_ns or mc_aux_out1_ns or mc_bank_ns or mc_cas_n_ns or mc_cmd_ns or mc_cs_n_ns or mc_odt_ns or mc_cke_ns or mc_data_offset_ns or mc_data_offset_1_ns or mc_data_offset_2_ns or mc_rank_cnt_ns or mc_ras_n_ns or mc_we_n_ns or mc_wrdata_en_ns or wr_data_addr_ns or wr_data_en_ns or wr_data_offset_ns or mc_cas_slot_ns) begin mc_address = #TCQ mc_address_ns; mc_bank = #TCQ mc_bank_ns; mc_cas_n = #TCQ mc_cas_n_ns; mc_cs_n = #TCQ mc_cs_n_ns; mc_odt = #TCQ mc_odt_ns; mc_cke = #TCQ mc_cke_ns; mc_aux_out0 = #TCQ mc_aux_out0_ns; mc_aux_out1 = #TCQ mc_aux_out1_ns; mc_cmd = #TCQ mc_cmd_ns; mc_ras_n = #TCQ mc_ras_n_ns; mc_we_n = #TCQ mc_we_n_ns; mc_data_offset = #TCQ mc_data_offset_ns; mc_data_offset_1 = #TCQ mc_data_offset_1_ns; mc_data_offset_2 = #TCQ mc_data_offset_2_ns; mc_cas_slot = #TCQ mc_cas_slot_ns; mc_wrdata_en = #TCQ mc_wrdata_en_ns; mc_rank_cnt = #TCQ mc_rank_cnt_ns; wr_data_addr = #TCQ wr_data_addr_ns; wr_data_en = #TCQ wr_data_en_ns; wr_data_offset = #TCQ wr_data_offset_ns; end // always @ (... end // block: cmd_pipe_plus0 endgenerate //*************************************************************************** // Indicate when there are no pending reads so that input features can be // powered down //*************************************************************************** assign mc_read_idle_ns = col_read_fifo_empty & init_calib_complete; always @(posedge clk) mc_read_idle_r <= #TCQ mc_read_idle_ns; assign mc_read_idle = mc_read_idle_r; //*************************************************************************** // Indicate when there is a refresh in progress and the bus is idle so that // tap adjustments can be made //*************************************************************************** assign mc_ref_zq_wip_ns = maint_ref_zq_wip && col_read_fifo_empty; always @(posedge clk) mc_ref_zq_wip_r <= mc_ref_zq_wip_ns; assign mc_ref_zq_wip = mc_ref_zq_wip_r; //*************************************************************************** // Manage rank-level timing and maintanence //*************************************************************************** mig_7series_v1_9_rank_mach # ( // Parameters .BURST_MODE (BURST_MODE), .CL (CL), .CWL (CWL), .CS_WIDTH (CS_WIDTH), .DQRD2DQWR_DLY (DQRD2DQWR_DLY), .DRAM_TYPE (DRAM_TYPE), .MAINT_PRESCALER_DIV (MAINT_PRESCALER_DIV), .nBANK_MACHS (nBANK_MACHS), .nCKESR (nCKESR), .nCK_PER_CLK (nCK_PER_CLK), .nFAW (nFAW), .nREFRESH_BANK (nREFRESH_BANK), .nRRD (nRRD), .nWTR (nWTR), .PERIODIC_RD_TIMER_DIV (PERIODIC_RD_TIMER_DIV), .RANK_BM_BV_WIDTH (RANK_BM_BV_WIDTH), .RANK_WIDTH (RANK_WIDTH), .RANKS (RANKS), .REFRESH_TIMER_DIV (REFRESH_TIMER_DIV), .ZQ_TIMER_DIV (ZQ_TIMER_DIV) ) rank_mach0 ( // Outputs .inhbt_act_faw_r (inhbt_act_faw_r[RANKS-1:0]), .inhbt_rd (inhbt_rd[RANKS-1:0]), .inhbt_wr (inhbt_wr[RANKS-1:0]), .maint_rank_r (maint_rank_r[RANK_WIDTH-1:0]), .maint_req_r (maint_req_r), .maint_zq_r (maint_zq_r), .maint_sre_r (maint_sre_r), .maint_srx_r (maint_srx_r), .maint_ref_zq_wip (maint_ref_zq_wip), .periodic_rd_r (periodic_rd_r), .periodic_rd_rank_r (periodic_rd_rank_r[RANK_WIDTH-1:0]), // Inputs .act_this_rank_r (act_this_rank_r[RANK_BM_BV_WIDTH-1:0]), .app_periodic_rd_req (app_periodic_rd_req), .app_ref_req (app_ref_req), .app_ref_ack (app_ref_ack), .app_zq_req (app_zq_req), .app_zq_ack (app_zq_ack), .app_sr_req (app_sr_req), .app_sr_active (app_sr_active), .col_rd_wr (col_rd_wr), .clk (clk), .init_calib_complete (init_calib_complete), .insert_maint_r1 (insert_maint_r1), .maint_wip_r (maint_wip_r), .periodic_rd_ack_r (periodic_rd_ack_r), .rank_busy_r (rank_busy_r[(RANKS*nBANK_MACHS)-1:0]), .rd_this_rank_r (rd_this_rank_r[RANK_BM_BV_WIDTH-1:0]), .rst (rst), .sending_col (sending_col[nBANK_MACHS-1:0]), .sending_row (sending_row[nBANK_MACHS-1:0]), .slot_0_present (slot_0_present[7:0]), .slot_1_present (slot_1_present[7:0]), .wr_this_rank_r (wr_this_rank_r[RANK_BM_BV_WIDTH-1:0]) ); //*************************************************************************** // Manage requests, reordering and bank timing //*************************************************************************** mig_7series_v1_9_bank_mach # ( // Parameters .TCQ (TCQ), .EVEN_CWL_2T_MODE (EVEN_CWL_2T_MODE), .ADDR_CMD_MODE (ADDR_CMD_MODE), .BANK_WIDTH (BANK_WIDTH), .BM_CNT_WIDTH (BM_CNT_WIDTH), .BURST_MODE (BURST_MODE), .COL_WIDTH (COL_WIDTH), .CS_WIDTH (CS_WIDTH), .CL (CL_M), .CWL (CWL_M), .CKE_ODT_AUX (CKE_ODT_AUX), .DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH), .DRAM_TYPE (DRAM_TYPE), .EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR), .ECC (ECC), .LOW_IDLE_CNT (LOW_IDLE_CNT), .nBANK_MACHS (nBANK_MACHS), .nCK_PER_CLK (nCK_PER_CLK), .nCS_PER_RANK (nCS_PER_RANK), .nOP_WAIT (nOP_WAIT), .nRAS (nRAS), .nRCD (nRCD), .nRFC (nRFC), .nRP (nRP), .nRTP (nRTP), .nSLOTS (nSLOTS), .nWR (nWR), .nXSDLL (tXSDLL), .ORDERING (ORDERING), .RANK_BM_BV_WIDTH (RANK_BM_BV_WIDTH), .RANK_WIDTH (RANK_WIDTH), .RANKS (RANKS), .ROW_WIDTH (ROW_WIDTH), .RTT_NOM (RTT_NOM), .RTT_WR (RTT_WR), .SLOT_0_CONFIG (SLOT_0_CONFIG), .SLOT_1_CONFIG (SLOT_1_CONFIG), .STARVE_LIMIT (STARVE_LIMIT), .tZQCS (tZQCS) ) bank_mach0 ( // Outputs .accept (accept), .accept_ns (accept_ns), .act_this_rank_r (act_this_rank_r[RANK_BM_BV_WIDTH-1:0]), .bank_mach_next (bank_mach_next[BM_CNT_WIDTH-1:0]), .col_a (col_a[ROW_WIDTH-1:0]), .col_ba (col_ba[BANK_WIDTH-1:0]), .col_data_buf_addr (col_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .col_periodic_rd (col_periodic_rd), .col_ra (col_ra[RANK_WIDTH-1:0]), .col_rmw (col_rmw), .col_rd_wr (col_rd_wr), .col_row (col_row[ROW_WIDTH-1:0]), .col_size (col_size), .col_wr_data_buf_addr (col_wr_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .mc_bank (mc_bank_ns), .mc_address (mc_address_ns), .mc_ras_n (mc_ras_n_ns), .mc_cas_n (mc_cas_n_ns), .mc_we_n (mc_we_n_ns), .mc_cs_n (mc_cs_n_ns), .mc_odt (mc_odt_ns), .mc_cke (mc_cke_ns), .mc_aux_out0 (mc_aux_out0_ns), .mc_aux_out1 (mc_aux_out1_ns), .mc_cmd (mc_cmd_ns), .mc_data_offset (mc_data_offset_ns), .mc_data_offset_1 (mc_data_offset_1_ns), .mc_data_offset_2 (mc_data_offset_2_ns), .mc_cas_slot (mc_cas_slot_ns), .insert_maint_r1 (insert_maint_r1), .maint_wip_r (maint_wip_r), .periodic_rd_ack_r (periodic_rd_ack_r), .rank_busy_r (rank_busy_r[(RANKS*nBANK_MACHS)-1:0]), .rd_this_rank_r (rd_this_rank_r[RANK_BM_BV_WIDTH-1:0]), .sending_row (sending_row[nBANK_MACHS-1:0]), .sending_col (sending_col[nBANK_MACHS-1:0]), .sent_col (sent_col), .sent_col_r (sent_col_r), .wr_this_rank_r (wr_this_rank_r[RANK_BM_BV_WIDTH-1:0]), // Inputs .bank (bank[BANK_WIDTH-1:0]), .calib_rddata_offset (calib_rd_data_offset), .calib_rddata_offset_1 (calib_rd_data_offset_1), .calib_rddata_offset_2 (calib_rd_data_offset_2), .clk (clk), .cmd (cmd[2:0]), .col (col[COL_WIDTH-1:0]), .data_buf_addr (data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .init_calib_complete (init_calib_complete), .phy_rddata_valid (phy_rddata_valid), .dq_busy_data (dq_busy_data), .hi_priority (hi_priority), .inhbt_act_faw_r (inhbt_act_faw_r[RANKS-1:0]), .inhbt_rd (inhbt_rd[RANKS-1:0]), .inhbt_wr (inhbt_wr[RANKS-1:0]), .maint_rank_r (maint_rank_r[RANK_WIDTH-1:0]), .maint_req_r (maint_req_r), .maint_zq_r (maint_zq_r), .maint_sre_r (maint_sre_r), .maint_srx_r (maint_srx_r), .periodic_rd_r (periodic_rd_r), .periodic_rd_rank_r (periodic_rd_rank_r[RANK_WIDTH-1:0]), .phy_mc_cmd_full (phy_mc_cmd_full), .phy_mc_ctl_full (phy_mc_ctl_full), .phy_mc_data_full (phy_mc_data_full), .rank (rank[RANK_WIDTH-1:0]), .rd_data_addr (rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0]), .rd_rmw (rd_rmw), .row (row[ROW_WIDTH-1:0]), .rst (rst), .size (size), .slot_0_present (slot_0_present[7:0]), .slot_1_present (slot_1_present[7:0]), .use_addr (use_addr) ); //*************************************************************************** // Manage DQ bus //*************************************************************************** mig_7series_v1_9_col_mach # ( // Parameters .TCQ (TCQ), .BANK_WIDTH (BANK_WIDTH), .BURST_MODE (BURST_MODE), .COL_WIDTH (COL_WIDTH), .CS_WIDTH (CS_WIDTH), .DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH), .DATA_BUF_OFFSET_WIDTH (DATA_BUF_OFFSET_WIDTH), .DELAY_WR_DATA_CNTRL (DELAY_WR_DATA_CNTRL), .DQS_WIDTH (DQS_WIDTH), .DRAM_TYPE (DRAM_TYPE), .EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR), .ECC (ECC), .MC_ERR_ADDR_WIDTH (MC_ERR_ADDR_WIDTH), .nCK_PER_CLK (nCK_PER_CLK), .nPHY_WRLAT (nPHY_WRLAT), .RANK_WIDTH (RANK_WIDTH), .ROW_WIDTH (ROW_WIDTH) ) col_mach0 ( // Outputs .mc_wrdata_en (mc_wrdata_en_ns), .dq_busy_data (dq_busy_data), .ecc_err_addr (ecc_err_addr[MC_ERR_ADDR_WIDTH-1:0]), .ecc_status_valid (ecc_status_valid), .rd_data_addr (rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0]), .rd_data_en (rd_data_en), .rd_data_end (rd_data_end), .rd_data_offset (rd_data_offset), .rd_rmw (rd_rmw), .wr_data_addr (wr_data_addr_ns), .wr_data_en (wr_data_en_ns), .wr_data_offset (wr_data_offset_ns), .wr_ecc_buf (wr_ecc_buf), .col_read_fifo_empty (col_read_fifo_empty), // Inputs .clk (clk), .rst (rst), .col_a (col_a[ROW_WIDTH-1:0]), .col_ba (col_ba[BANK_WIDTH-1:0]), .col_data_buf_addr (col_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .col_periodic_rd (col_periodic_rd), .col_ra (col_ra[RANK_WIDTH-1:0]), .col_rmw (col_rmw), .col_rd_wr (col_rd_wr), .col_row (col_row[ROW_WIDTH-1:0]), .col_size (col_size), .col_wr_data_buf_addr (col_wr_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .phy_rddata_valid (phy_rddata_valid), .sent_col (EVEN_CWL_2T_MODE == "ON" ? sent_col_r : sent_col) ); //*************************************************************************** // Implement ECC //*************************************************************************** // Total ECC word length = ECC code width + Data width localparam CODE_WIDTH = DATA_WIDTH + ECC_WIDTH; generate if (ECC == "OFF") begin : ecc_off assign rd_data = phy_rd_data; assign mc_wrdata = wr_data; assign mc_wrdata_mask = wr_data_mask; assign ecc_single = 4'b0; assign ecc_multiple = 4'b0; end else begin : ecc_on wire [CODE_WIDTH*ECC_WIDTH-1:0] h_rows; wire [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data; // Merge and encode mig_7series_v1_9_ecc_merge_enc # ( // Parameters .TCQ (TCQ), .CODE_WIDTH (CODE_WIDTH), .DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH), .DATA_WIDTH (DATA_WIDTH), .DQ_WIDTH (DQ_WIDTH), .ECC_WIDTH (ECC_WIDTH), .PAYLOAD_WIDTH (PAYLOAD_WIDTH), .nCK_PER_CLK (nCK_PER_CLK) ) ecc_merge_enc0 ( // Outputs .mc_wrdata (mc_wrdata), .mc_wrdata_mask (mc_wrdata_mask), // Inputs .clk (clk), .rst (rst), .h_rows (h_rows), .rd_merge_data (rd_merge_data), .raw_not_ecc (raw_not_ecc), .wr_data (wr_data), .wr_data_mask (wr_data_mask) ); // Decode and fix mig_7series_v1_9_ecc_dec_fix # ( // Parameters .TCQ (TCQ), .CODE_WIDTH (CODE_WIDTH), .DATA_WIDTH (DATA_WIDTH), .DQ_WIDTH (DQ_WIDTH), .ECC_WIDTH (ECC_WIDTH), .PAYLOAD_WIDTH (PAYLOAD_WIDTH), .nCK_PER_CLK (nCK_PER_CLK) ) ecc_dec_fix0 ( // Outputs .ecc_multiple (ecc_multiple), .ecc_single (ecc_single), .rd_data (rd_data), // Inputs .clk (clk), .rst (rst), .correct_en (correct_en), .phy_rddata (phy_rd_data), .ecc_status_valid (ecc_status_valid), .h_rows (h_rows) ); // ECC Buffer mig_7series_v1_9_ecc_buf # ( // Parameters .TCQ (TCQ), .DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH), .DATA_BUF_OFFSET_WIDTH (DATA_BUF_OFFSET_WIDTH), .DATA_WIDTH (DATA_WIDTH), .PAYLOAD_WIDTH (PAYLOAD_WIDTH), .nCK_PER_CLK (nCK_PER_CLK) ) ecc_buf0 ( // Outputs .rd_merge_data (rd_merge_data), // Inputs .clk (clk), .rst (rst), .rd_data (rd_data), .rd_data_addr (rd_data_addr), .rd_data_offset (rd_data_offset), .wr_data_addr (wr_data_addr), .wr_data_offset (wr_data_offset), .wr_ecc_buf (wr_ecc_buf) ); // Generate ECC table mig_7series_v1_9_ecc_gen # ( // Parameters .CODE_WIDTH (CODE_WIDTH), .DATA_WIDTH (DATA_WIDTH), .ECC_WIDTH (ECC_WIDTH) ) ecc_gen0 ( // Outputs .h_rows (h_rows) ); `ifdef DISPLAY_H_MATRIX integer i; always @(negedge rst) begin $display ("**********************************************"); $display ("H Matrix:"); for (i=0; i<ECC_WIDTH; i=i+1) $display ("%b", h_rows[i*CODE_WIDTH+:CODE_WIDTH]); $display ("**********************************************"); end `endif end endgenerate endmodule // mc
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: async_fifo_fwft.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: An asynchronous capable parameterized FIFO. As with all // first word fall through FIFOs, the RD_DATA will be valid when RD_EMPTY is // low. Asserting RD_EN will consume the current RD_DATA value and cause the // next value (if it exists) to appear on RD_DATA on the following cycle. Be sure // to check if RD_EMPTY is low each cycle to determine if RD_DATA is valid. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module async_fifo_fwft #( parameter C_WIDTH = 32, // Data bus width parameter C_DEPTH = 1024, // Depth of the FIFO // Local parameters parameter C_REAL_DEPTH = 2**clog2(C_DEPTH), parameter C_DEPTH_BITS = clog2s(C_REAL_DEPTH), parameter C_DEPTH_P1_BITS = clog2s(C_REAL_DEPTH+1) ) ( input RD_CLK, // Read clock input RD_RST, // Read synchronous reset input WR_CLK, // Write clock input WR_RST, // Write synchronous reset input [C_WIDTH-1:0] WR_DATA, // Write data input (WR_CLK) input WR_EN, // Write enable, high active (WR_CLK) output [C_WIDTH-1:0] RD_DATA, // Read data output (RD_CLK) input RD_EN, // Read enable, high active (RD_CLK) output WR_FULL, // Full condition (WR_CLK) output RD_EMPTY // Empty condition (RD_CLK) ); `include "functions.vh" reg [C_WIDTH-1:0] rData=0; reg [C_WIDTH-1:0] rCache=0; reg [1:0] rCount=0; reg rFifoDataValid=0; reg rDataValid=0; reg rCacheValid=0; wire [C_WIDTH-1:0] wData; wire wEmpty; wire wRen = RD_EN || (rCount < 2'd2); assign RD_DATA = rData; assign RD_EMPTY = !rDataValid; // Wrapped non-FWFT FIFO (synthesis attributes applied to this module will // determine the memory option). async_fifo #(.C_WIDTH(C_WIDTH), .C_DEPTH(C_DEPTH)) fifo ( .WR_CLK(WR_CLK), .WR_RST(WR_RST), .RD_CLK(RD_CLK), .RD_RST(RD_RST), .WR_EN(WR_EN), .WR_DATA(WR_DATA), .WR_FULL(WR_FULL), .RD_EN(wRen), .RD_DATA(wData), .RD_EMPTY(wEmpty) ); always @ (posedge RD_CLK) begin if (RD_RST) begin rCount <= #1 0; rDataValid <= #1 0; rCacheValid <= #1 0; rFifoDataValid <= #1 0; end else begin // Keep track of the count rCount <= #1 rCount + (wRen & !wEmpty) - (!RD_EMPTY & RD_EN); // Signals when wData from FIFO is valid rFifoDataValid <= #1 (wRen & !wEmpty); // Keep rData up to date if (rFifoDataValid) begin if (RD_EN | !rDataValid) begin rData <= #1 wData; rDataValid <= #1 1'd1; rCacheValid <= #1 1'd0; end else begin rCacheValid <= #1 1'd1; end rCache <= #1 wData; end else begin if (RD_EN | !rDataValid) begin rData <= #1 rCache; rDataValid <= #1 rCacheValid; rCacheValid <= #1 1'd0; end end end end endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ecc_dec_fix.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_ecc_dec_fix #( parameter TCQ = 100, parameter PAYLOAD_WIDTH = 64, parameter CODE_WIDTH = 72, parameter DATA_WIDTH = 64, parameter DQ_WIDTH = 72, parameter ECC_WIDTH = 8, parameter nCK_PER_CLK = 4 ) ( /*AUTOARG*/ // Outputs rd_data, ecc_single, ecc_multiple, // Inputs clk, rst, h_rows, phy_rddata, correct_en, ecc_status_valid ); input clk; input rst; // Compute syndromes. input [CODE_WIDTH*ECC_WIDTH-1:0] h_rows; input [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_rddata; wire [2*nCK_PER_CLK*ECC_WIDTH-1:0] syndrome_ns; genvar k; genvar m; generate for (k=0; k<2*nCK_PER_CLK; k=k+1) begin : ecc_word for (m=0; m<ECC_WIDTH; m=m+1) begin : ecc_bit assign syndrome_ns[k*ECC_WIDTH+m] = ^(phy_rddata[k*DQ_WIDTH+:CODE_WIDTH] & h_rows[m*CODE_WIDTH+:CODE_WIDTH]); end end endgenerate reg [2*nCK_PER_CLK*ECC_WIDTH-1:0] syndrome_r; always @(posedge clk) syndrome_r <= #TCQ syndrome_ns; // Extract payload bits from raw DRAM bits and register. wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] ecc_rddata_ns; genvar i; generate for (i=0; i<2*nCK_PER_CLK; i=i+1) begin : extract_payload assign ecc_rddata_ns[i*PAYLOAD_WIDTH+:PAYLOAD_WIDTH] = phy_rddata[i*DQ_WIDTH+:PAYLOAD_WIDTH]; end endgenerate reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] ecc_rddata_r; always @(posedge clk) ecc_rddata_r <= #TCQ ecc_rddata_ns; // Regenerate h_matrix from h_rows leaving out the identity part // since we're not going to correct the ECC bits themselves. genvar n; genvar p; wire [ECC_WIDTH-1:0] h_matrix [DATA_WIDTH-1:0]; generate for (n=0; n<DATA_WIDTH; n=n+1) begin : h_col for (p=0; p<ECC_WIDTH; p=p+1) begin : h_bit assign h_matrix [n][p] = h_rows [p*CODE_WIDTH+n]; end end endgenerate // Compute flip bits. wire [2*nCK_PER_CLK*DATA_WIDTH-1:0] flip_bits; genvar q; genvar r; generate for (q=0; q<2*nCK_PER_CLK; q=q+1) begin : flip_word for (r=0; r<DATA_WIDTH; r=r+1) begin : flip_bit assign flip_bits[q*DATA_WIDTH+r] = h_matrix[r] == syndrome_r[q*ECC_WIDTH+:ECC_WIDTH]; end end endgenerate // Correct data. output reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data; input correct_en; integer s; always @(/*AS*/correct_en or ecc_rddata_r or flip_bits) for (s=0; s<2*nCK_PER_CLK; s=s+1) if (correct_en) rd_data[s*PAYLOAD_WIDTH+:DATA_WIDTH] = ecc_rddata_r[s*PAYLOAD_WIDTH+:DATA_WIDTH] ^ flip_bits[s*DATA_WIDTH+:DATA_WIDTH]; else rd_data[s*PAYLOAD_WIDTH+:DATA_WIDTH] = ecc_rddata_r[s*PAYLOAD_WIDTH+:DATA_WIDTH]; // Copy raw payload bits if ECC_TEST is ON. localparam RAW_BIT_WIDTH = PAYLOAD_WIDTH - DATA_WIDTH; genvar t; generate if (RAW_BIT_WIDTH > 0) for (t=0; t<2*nCK_PER_CLK; t=t+1) begin : copy_raw_bits always @(/*AS*/ecc_rddata_r) rd_data[(t+1)*PAYLOAD_WIDTH-1-:RAW_BIT_WIDTH] = ecc_rddata_r[(t+1)*PAYLOAD_WIDTH-1-:RAW_BIT_WIDTH]; end endgenerate // Generate status information. input ecc_status_valid; output wire [2*nCK_PER_CLK-1:0] ecc_single; output wire [2*nCK_PER_CLK-1:0] ecc_multiple; genvar v; generate for (v=0; v<2*nCK_PER_CLK; v=v+1) begin : compute_status wire zero = ~|syndrome_r[v*ECC_WIDTH+:ECC_WIDTH]; wire odd = ^syndrome_r[v*ECC_WIDTH+:ECC_WIDTH]; assign ecc_single[v] = ecc_status_valid && ~zero && odd; assign ecc_multiple[v] = ecc_status_valid && ~zero && ~odd; end endgenerate endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ecc_dec_fix.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_ecc_dec_fix #( parameter TCQ = 100, parameter PAYLOAD_WIDTH = 64, parameter CODE_WIDTH = 72, parameter DATA_WIDTH = 64, parameter DQ_WIDTH = 72, parameter ECC_WIDTH = 8, parameter nCK_PER_CLK = 4 ) ( /*AUTOARG*/ // Outputs rd_data, ecc_single, ecc_multiple, // Inputs clk, rst, h_rows, phy_rddata, correct_en, ecc_status_valid ); input clk; input rst; // Compute syndromes. input [CODE_WIDTH*ECC_WIDTH-1:0] h_rows; input [2*nCK_PER_CLK*DQ_WIDTH-1:0] phy_rddata; wire [2*nCK_PER_CLK*ECC_WIDTH-1:0] syndrome_ns; genvar k; genvar m; generate for (k=0; k<2*nCK_PER_CLK; k=k+1) begin : ecc_word for (m=0; m<ECC_WIDTH; m=m+1) begin : ecc_bit assign syndrome_ns[k*ECC_WIDTH+m] = ^(phy_rddata[k*DQ_WIDTH+:CODE_WIDTH] & h_rows[m*CODE_WIDTH+:CODE_WIDTH]); end end endgenerate reg [2*nCK_PER_CLK*ECC_WIDTH-1:0] syndrome_r; always @(posedge clk) syndrome_r <= #TCQ syndrome_ns; // Extract payload bits from raw DRAM bits and register. wire [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] ecc_rddata_ns; genvar i; generate for (i=0; i<2*nCK_PER_CLK; i=i+1) begin : extract_payload assign ecc_rddata_ns[i*PAYLOAD_WIDTH+:PAYLOAD_WIDTH] = phy_rddata[i*DQ_WIDTH+:PAYLOAD_WIDTH]; end endgenerate reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] ecc_rddata_r; always @(posedge clk) ecc_rddata_r <= #TCQ ecc_rddata_ns; // Regenerate h_matrix from h_rows leaving out the identity part // since we're not going to correct the ECC bits themselves. genvar n; genvar p; wire [ECC_WIDTH-1:0] h_matrix [DATA_WIDTH-1:0]; generate for (n=0; n<DATA_WIDTH; n=n+1) begin : h_col for (p=0; p<ECC_WIDTH; p=p+1) begin : h_bit assign h_matrix [n][p] = h_rows [p*CODE_WIDTH+n]; end end endgenerate // Compute flip bits. wire [2*nCK_PER_CLK*DATA_WIDTH-1:0] flip_bits; genvar q; genvar r; generate for (q=0; q<2*nCK_PER_CLK; q=q+1) begin : flip_word for (r=0; r<DATA_WIDTH; r=r+1) begin : flip_bit assign flip_bits[q*DATA_WIDTH+r] = h_matrix[r] == syndrome_r[q*ECC_WIDTH+:ECC_WIDTH]; end end endgenerate // Correct data. output reg [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data; input correct_en; integer s; always @(/*AS*/correct_en or ecc_rddata_r or flip_bits) for (s=0; s<2*nCK_PER_CLK; s=s+1) if (correct_en) rd_data[s*PAYLOAD_WIDTH+:DATA_WIDTH] = ecc_rddata_r[s*PAYLOAD_WIDTH+:DATA_WIDTH] ^ flip_bits[s*DATA_WIDTH+:DATA_WIDTH]; else rd_data[s*PAYLOAD_WIDTH+:DATA_WIDTH] = ecc_rddata_r[s*PAYLOAD_WIDTH+:DATA_WIDTH]; // Copy raw payload bits if ECC_TEST is ON. localparam RAW_BIT_WIDTH = PAYLOAD_WIDTH - DATA_WIDTH; genvar t; generate if (RAW_BIT_WIDTH > 0) for (t=0; t<2*nCK_PER_CLK; t=t+1) begin : copy_raw_bits always @(/*AS*/ecc_rddata_r) rd_data[(t+1)*PAYLOAD_WIDTH-1-:RAW_BIT_WIDTH] = ecc_rddata_r[(t+1)*PAYLOAD_WIDTH-1-:RAW_BIT_WIDTH]; end endgenerate // Generate status information. input ecc_status_valid; output wire [2*nCK_PER_CLK-1:0] ecc_single; output wire [2*nCK_PER_CLK-1:0] ecc_multiple; genvar v; generate for (v=0; v<2*nCK_PER_CLK; v=v+1) begin : compute_status wire zero = ~|syndrome_r[v*ECC_WIDTH+:ECC_WIDTH]; wire odd = ^syndrome_r[v*ECC_WIDTH+:ECC_WIDTH]; assign ecc_single[v] = ecc_status_valid && ~zero && odd; assign ecc_multiple[v] = ecc_status_valid && ~zero && ~odd; end endgenerate endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : col_mach.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** // The column machine manages the dq bus. Since there is a single DQ // bus, and the column part of the DRAM is tightly coupled to this DQ // bus, conceptually, the DQ bus and all of the column hardware in // a multi rank DRAM array are managed as a single unit. // // // The column machine does not "enforce" the column timing directly. // It generates information and sends it to the bank machines. If the // bank machines incorrectly make a request, the column machine will // simply overwrite the existing request with the new request even // if this would result in a timing or protocol violation. // // The column machine // hosts the block that controls read and write data transfer // to and from the dq bus. // // And if configured, there is provision for tracking the address // of a command as it moves through the column pipeline. This // address will be logged for detected ECC errors. `timescale 1 ps / 1 ps module mig_7series_v1_9_col_mach # ( parameter TCQ = 100, parameter BANK_WIDTH = 3, parameter BURST_MODE = "8", parameter COL_WIDTH = 12, parameter CS_WIDTH = 4, parameter DATA_BUF_ADDR_WIDTH = 8, parameter DATA_BUF_OFFSET_WIDTH = 1, parameter DELAY_WR_DATA_CNTRL = 0, parameter DQS_WIDTH = 8, parameter DRAM_TYPE = "DDR3", parameter EARLY_WR_DATA_ADDR = "OFF", parameter ECC = "OFF", parameter MC_ERR_ADDR_WIDTH = 31, parameter nCK_PER_CLK = 2, parameter nPHY_WRLAT = 0, parameter RANK_WIDTH = 2, parameter ROW_WIDTH = 16 ) (/*AUTOARG*/ // Outputs dq_busy_data, wr_data_offset, mc_wrdata_en, wr_data_en, wr_data_addr, rd_rmw, ecc_err_addr, ecc_status_valid, wr_ecc_buf, rd_data_end, rd_data_addr, rd_data_offset, rd_data_en, col_read_fifo_empty, // Inputs clk, rst, sent_col, col_size, col_wr_data_buf_addr, phy_rddata_valid, col_periodic_rd, col_data_buf_addr, col_rmw, col_rd_wr, col_ra, col_ba, col_row, col_a ); input clk; input rst; input sent_col; input col_rd_wr; output reg dq_busy_data = 1'b0; // The following generates a column command disable based mostly on the type // of DRAM and the fabric to DRAM CK ratio. generate if ((nCK_PER_CLK == 1) && ((BURST_MODE == "8") || (DRAM_TYPE == "DDR3"))) begin : three_bumps reg [1:0] granted_col_d_r; wire [1:0] granted_col_d_ns = {sent_col, granted_col_d_r[1]}; always @(posedge clk) granted_col_d_r <= #TCQ granted_col_d_ns; always @(/*AS*/granted_col_d_r or sent_col) dq_busy_data = sent_col || |granted_col_d_r; end if (((nCK_PER_CLK == 2) && ((BURST_MODE == "8") || (DRAM_TYPE == "DDR3"))) || ((nCK_PER_CLK == 1) && ((BURST_MODE == "4") || (DRAM_TYPE == "DDR2")))) begin : one_bump always @(/*AS*/sent_col) dq_busy_data = sent_col; end endgenerate // This generates a data offset based on fabric clock to DRAM CK ratio and // the size bit. Note that this is different that the dq_busy_data signal // generated above. reg [1:0] offset_r = 2'b0; reg [1:0] offset_ns = 2'b0; input col_size; wire data_end; generate if(nCK_PER_CLK == 4) begin : data_valid_4_1 // For 4:1 mode all data is transfered in a single beat so the default // values of 0 for offset_r/offset_ns suffice - just tie off data_end assign data_end = 1'b1; end else begin if(DATA_BUF_OFFSET_WIDTH == 2) begin : data_valid_1_1 always @(col_size or offset_r or rst or sent_col) begin if (rst) offset_ns = 2'b0; else begin offset_ns = offset_r; if (sent_col) offset_ns = 2'b1; else if (|offset_r && (offset_r != {col_size, 1'b1})) offset_ns = offset_r + 2'b1; else offset_ns = 2'b0; end end always @(posedge clk) offset_r <= #TCQ offset_ns; assign data_end = col_size ? (offset_r == 2'b11) : offset_r[0]; end else begin : data_valid_2_1 always @(col_size or rst or sent_col) offset_ns[0] = rst ? 1'b0 : sent_col && col_size; always @(posedge clk) offset_r[0] <= #TCQ offset_ns[0]; assign data_end = col_size ? offset_r[0] : 1'b1; end end endgenerate reg [DATA_BUF_OFFSET_WIDTH-1:0] offset_r1 = {DATA_BUF_OFFSET_WIDTH{1'b0}}; reg [DATA_BUF_OFFSET_WIDTH-1:0] offset_r2 = {DATA_BUF_OFFSET_WIDTH{1'b0}}; reg col_rd_wr_r1; reg col_rd_wr_r2; generate if ((nPHY_WRLAT >= 1) || (DELAY_WR_DATA_CNTRL == 1)) begin : offset_pipe_0 always @(posedge clk) offset_r1 <= #TCQ offset_r[DATA_BUF_OFFSET_WIDTH-1:0]; always @(posedge clk) col_rd_wr_r1 <= #TCQ col_rd_wr; end if(nPHY_WRLAT == 2) begin : offset_pipe_1 always @(posedge clk) offset_r2 <= #TCQ offset_r1[DATA_BUF_OFFSET_WIDTH-1:0]; always @(posedge clk) col_rd_wr_r2 <= #TCQ col_rd_wr_r1; end endgenerate output wire [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset; assign wr_data_offset = (DELAY_WR_DATA_CNTRL == 1) ? offset_r1[DATA_BUF_OFFSET_WIDTH-1:0] : (EARLY_WR_DATA_ADDR == "OFF") ? offset_r[DATA_BUF_OFFSET_WIDTH-1:0] : offset_ns[DATA_BUF_OFFSET_WIDTH-1:0]; reg sent_col_r1; reg sent_col_r2; always @(posedge clk) sent_col_r1 <= #TCQ sent_col; always @(posedge clk) sent_col_r2 <= #TCQ sent_col_r1; wire wrdata_en = (nPHY_WRLAT == 0) ? (sent_col || |offset_r) & ~col_rd_wr : (nPHY_WRLAT == 1) ? (sent_col_r1 || |offset_r1) & ~col_rd_wr_r1 : //(nPHY_WRLAT >= 2) ? (sent_col_r2 || |offset_r2) & ~col_rd_wr_r2; output wire mc_wrdata_en; assign mc_wrdata_en = wrdata_en; output wire wr_data_en; assign wr_data_en = (DELAY_WR_DATA_CNTRL == 1) ? ((sent_col_r1 || |offset_r1) && ~col_rd_wr_r1) : ((sent_col || |offset_r) && ~col_rd_wr); input [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr; output wire [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr; generate if (DELAY_WR_DATA_CNTRL == 1) begin : delay_wr_data_cntrl_eq_1 reg [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr_r; always @(posedge clk) col_wr_data_buf_addr_r <= #TCQ col_wr_data_buf_addr; assign wr_data_addr = col_wr_data_buf_addr_r; end else begin : delay_wr_data_cntrl_ne_1 assign wr_data_addr = col_wr_data_buf_addr; end endgenerate // CAS-RD to mc_rddata_en wire read_data_valid = (sent_col || |offset_r) && col_rd_wr; function integer clogb2 (input integer size); // ceiling logb2 begin size = size - 1; for (clogb2=1; size>1; clogb2=clogb2+1) size = size >> 1; end endfunction // clogb2 // Implement FIFO that records reads as they are sent to the DRAM. // When phy_rddata_valid is returned some unknown time later, the // FIFO output is used to control how the data is interpreted. input phy_rddata_valid; output wire rd_rmw; output reg [MC_ERR_ADDR_WIDTH-1:0] ecc_err_addr; output reg ecc_status_valid; output reg wr_ecc_buf; output reg rd_data_end; output reg [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr; output reg [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset; (* keep = "true", max_fanout = 10 *) output reg rd_data_en /* synthesis syn_maxfan = 10 */; output col_read_fifo_empty; input col_periodic_rd; input [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr; input col_rmw; input [RANK_WIDTH-1:0] col_ra; input [BANK_WIDTH-1:0] col_ba; input [ROW_WIDTH-1:0] col_row; input [ROW_WIDTH-1:0] col_a; // Real column address (skip A10/AP and A12/BC#). The maximum width is 12; // the width will be tailored for the target DRAM downstream. wire [11:0] col_a_full; // Minimum row width is 12; take remaining 11 bits after omitting A10/AP assign col_a_full[10:0] = {col_a[11], col_a[9:0]}; // Get the 12th bit when row address width accommodates it; omit A12/BC# generate if (ROW_WIDTH >= 14) begin : COL_A_FULL_11_1 assign col_a_full[11] = col_a[13]; end else begin : COL_A_FULL_11_0 assign col_a_full[11] = 0; end endgenerate // Extract only the width of the target DRAM wire [COL_WIDTH-1:0] col_a_extracted = col_a_full[COL_WIDTH-1:0]; localparam MC_ERR_LINE_WIDTH = MC_ERR_ADDR_WIDTH-DATA_BUF_OFFSET_WIDTH; localparam FIFO_WIDTH = 1 /*data_end*/ + 1 /*periodic_rd*/ + DATA_BUF_ADDR_WIDTH + DATA_BUF_OFFSET_WIDTH + ((ECC == "OFF") ? 0 : 1+MC_ERR_LINE_WIDTH); localparam FULL_RAM_CNT = (FIFO_WIDTH/6); localparam REMAINDER = FIFO_WIDTH % 6; localparam RAM_CNT = FULL_RAM_CNT + ((REMAINDER == 0 ) ? 0 : 1); localparam RAM_WIDTH = (RAM_CNT*6); generate begin : read_fifo wire [MC_ERR_LINE_WIDTH:0] ecc_line; if (CS_WIDTH == 1) assign ecc_line = {col_rmw, col_ba, col_row, col_a_extracted}; else assign ecc_line = {col_rmw, col_ra, col_ba, col_row, col_a_extracted}; wire [FIFO_WIDTH-1:0] real_fifo_data; if (ECC == "OFF") assign real_fifo_data = {data_end, col_periodic_rd, col_data_buf_addr, offset_r[DATA_BUF_OFFSET_WIDTH-1:0]}; else assign real_fifo_data = {data_end, col_periodic_rd, col_data_buf_addr, offset_r[DATA_BUF_OFFSET_WIDTH-1:0], ecc_line}; wire [RAM_WIDTH-1:0] fifo_in_data; if (REMAINDER == 0) assign fifo_in_data = real_fifo_data; else assign fifo_in_data = {{6-REMAINDER{1'b0}}, real_fifo_data}; wire [RAM_WIDTH-1:0] fifo_out_data_ns; reg [4:0] head_r; wire [4:0] head_ns = rst ? 5'b0 : read_data_valid ? (head_r + 5'b1) : head_r; always @(posedge clk) head_r <= #TCQ head_ns; reg [4:0] tail_r; wire [4:0] tail_ns = rst ? 5'b0 : phy_rddata_valid ? (tail_r + 5'b1) : tail_r; always @(posedge clk) tail_r <= #TCQ tail_ns; assign col_read_fifo_empty = head_r == tail_r ? 1'b1 : 1'b0; genvar i; for (i=0; i<RAM_CNT; i=i+1) begin : fifo_ram RAM32M #(.INIT_A(64'h0000000000000000), .INIT_B(64'h0000000000000000), .INIT_C(64'h0000000000000000), .INIT_D(64'h0000000000000000) ) RAM32M0 ( .DOA(fifo_out_data_ns[((i*6)+4)+:2]), .DOB(fifo_out_data_ns[((i*6)+2)+:2]), .DOC(fifo_out_data_ns[((i*6)+0)+:2]), .DOD(), .DIA(fifo_in_data[((i*6)+4)+:2]), .DIB(fifo_in_data[((i*6)+2)+:2]), .DIC(fifo_in_data[((i*6)+0)+:2]), .DID(2'b0), .ADDRA(tail_ns), .ADDRB(tail_ns), .ADDRC(tail_ns), .ADDRD(head_r), .WE(1'b1), .WCLK(clk) ); end // block: fifo_ram reg [RAM_WIDTH-1:0] fifo_out_data_r; always @(posedge clk) fifo_out_data_r <= #TCQ fifo_out_data_ns; // When ECC is ON, most of the FIFO output is delayed // by one state. if (ECC == "OFF") begin reg periodic_rd; always @(/*AS*/phy_rddata_valid or fifo_out_data_r) begin {rd_data_end, periodic_rd, rd_data_addr, rd_data_offset} = fifo_out_data_r[FIFO_WIDTH-1:0]; ecc_err_addr = {MC_ERR_ADDR_WIDTH{1'b0}}; rd_data_en = phy_rddata_valid && ~periodic_rd; ecc_status_valid = 1'b0; wr_ecc_buf = 1'b0; end assign rd_rmw = 1'b0; end else begin wire rd_data_end_ns; wire periodic_rd; wire [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr_ns; wire [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset_ns; wire [MC_ERR_ADDR_WIDTH-1:0] ecc_err_addr_ns; assign {rd_data_end_ns, periodic_rd, rd_data_addr_ns, rd_data_offset_ns, rd_rmw, ecc_err_addr_ns[DATA_BUF_OFFSET_WIDTH+:MC_ERR_LINE_WIDTH]} = {fifo_out_data_r[FIFO_WIDTH-1:0]}; assign ecc_err_addr_ns[0+:DATA_BUF_OFFSET_WIDTH] = rd_data_offset_ns; always @(posedge clk) rd_data_end <= #TCQ rd_data_end_ns; always @(posedge clk) rd_data_addr <= #TCQ rd_data_addr_ns; always @(posedge clk) rd_data_offset <= #TCQ rd_data_offset_ns; always @(posedge clk) ecc_err_addr <= #TCQ ecc_err_addr_ns; wire rd_data_en_ns = phy_rddata_valid && ~(periodic_rd || rd_rmw); always @(posedge clk) rd_data_en <= rd_data_en_ns; wire ecc_status_valid_ns = phy_rddata_valid && ~periodic_rd; always @(posedge clk) ecc_status_valid <= #TCQ ecc_status_valid_ns; wire wr_ecc_buf_ns = phy_rddata_valid && ~periodic_rd && rd_rmw; always @(posedge clk) wr_ecc_buf <= #TCQ wr_ecc_buf_ns; end end endgenerate endmodule
/***************************************************************** -- (c) Copyright 2011 - 2013 Xilinx, Inc. All rights reserved. -- -- This file contains confidential and proprietary information -- of Xilinx, Inc. and is protected under U.S. and -- international copyright and other intellectual property -- laws. -- -- DISCLAIMER -- This disclaimer is not a license and does not grant any -- rights to the materials distributed herewith. Except as -- otherwise provided in a valid license issued to you by -- Xilinx, and to the maximum extent permitted by applicable -- law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND -- WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES -- AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING -- BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- -- INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and -- (2) Xilinx shall not be liable (whether in contract or tort, -- including negligence, or under any other theory of -- liability) for any loss or damage of any kind or nature -- related to, arising under or in connection with these -- materials, including for any direct, or any indirect, -- special, incidental, or consequential loss or damage -- (including loss of data, profits, goodwill, or any type of -- loss or damage suffered as a result of any action brought -- by a third party) even if such damage or loss was -- reasonably foreseeable or Xilinx had been advised of the -- possibility of the same. -- -- CRITICAL APPLICATIONS -- Xilinx products are not designed or intended to be fail- -- safe, or for use in any application requiring fail-safe -- performance, such as life-support or safety devices or -- systems, Class III medical devices, nuclear facilities, -- applications related to the deployment of airbags, or any -- other applications that could lead to death, personal -- injury, or severe property or environmental damage -- (individually and collectively, "Critical -- Applications"). A Customer assumes the sole risk and -- liability of any use of Xilinx products in Critical -- Applications, subject only to applicable laws and -- regulations governing limitations on product liability. -- -- THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS -- PART OF THIS FILE AT ALL TIMES. // // // Owner: Gary Martin // Revision: $Id: //depot/icm/proj/common/head/rtl/v32_cmt/rtl/phy/byte_group_io.v#4 $ // $Author: $ // $DateTime: $ // $Change: $ // Description: // This verilog file is a paramertizable I/O termination for // the single byte lane. // to create a N byte-lane wide phy. // // History: // Date Engineer Description // 04/01/2010 G. Martin Initial Checkin. // ////////////////////////////////////////////////////////////////// *****************************************************************/ `timescale 1ps/1ps module mig_7series_v1_9_ddr_byte_group_io #( // bit lane existance parameter BITLANES = 12'b1111_1111_1111, parameter BITLANES_OUTONLY = 12'b0000_0000_0000, parameter PO_DATA_CTL = "FALSE", parameter OSERDES_DATA_RATE = "DDR", parameter OSERDES_DATA_WIDTH = 4, parameter IDELAYE2_IDELAY_TYPE = "VARIABLE", parameter IDELAYE2_IDELAY_VALUE = 00, parameter IODELAY_GRP = "IODELAY_MIG", // local usage only, don't pass down parameter BUS_WIDTH = 12, parameter SYNTHESIS = "FALSE" ) ( input [9:0] mem_dq_in, output [BUS_WIDTH-1:0] mem_dq_out, output [BUS_WIDTH-1:0] mem_dq_ts, input mem_dqs_in, output mem_dqs_out, output mem_dqs_ts, output [(4*10)-1:0] iserdes_dout, // 2 extra 12-bit lanes not used output dqs_to_phaser, input iserdes_clk, input iserdes_clkb, input iserdes_clkdiv, input phy_clk, input rst, input oserdes_rst, input iserdes_rst, input [1:0] oserdes_dqs, input [1:0] oserdes_dqsts, input [(4*BUS_WIDTH)-1:0] oserdes_dq, input [1:0] oserdes_dqts, input oserdes_clk, input oserdes_clk_delayed, input oserdes_clkdiv, input idelay_inc, input idelay_ce, input idelay_ld, input idelayctrl_refclk ); /// INSTANCES localparam ISERDES_DQ_DATA_RATE = "DDR"; localparam ISERDES_DQ_DATA_WIDTH = 4; localparam ISERDES_DQ_DYN_CLKDIV_INV_EN = "FALSE"; localparam ISERDES_DQ_DYN_CLK_INV_EN = "FALSE"; localparam ISERDES_DQ_INIT_Q1 = 1'b0; localparam ISERDES_DQ_INIT_Q2 = 1'b0; localparam ISERDES_DQ_INIT_Q3 = 1'b0; localparam ISERDES_DQ_INIT_Q4 = 1'b0; localparam ISERDES_DQ_INTERFACE_TYPE = "MEMORY_DDR3"; localparam ISERDES_NUM_CE = 2; localparam ISERDES_DQ_IOBDELAY = "IFD"; localparam ISERDES_DQ_OFB_USED = "FALSE"; localparam ISERDES_DQ_SERDES_MODE = "MASTER"; localparam ISERDES_DQ_SRVAL_Q1 = 1'b0; localparam ISERDES_DQ_SRVAL_Q2 = 1'b0; localparam ISERDES_DQ_SRVAL_Q3 = 1'b0; localparam ISERDES_DQ_SRVAL_Q4 = 1'b0; wire [BUS_WIDTH-1:0] data_in_dly; wire [BUS_WIDTH-1:0] oserdes_dq_buf; wire [BUS_WIDTH-1:0] oserdes_dqts_buf; wire oserdes_dqs_buf; wire oserdes_dqsts_buf; wire [9:0] data_in; wire tbyte_out; assign mem_dq_out = oserdes_dq_buf; assign mem_dq_ts = oserdes_dqts_buf; assign data_in = mem_dq_in; assign mem_dqs_out = oserdes_dqs_buf; assign mem_dqs_ts = oserdes_dqsts_buf; assign dqs_to_phaser = mem_dqs_in; reg iserdes_clk_d; always @(*) iserdes_clk_d <= #(025) iserdes_clk; reg idelay_ld_rst; reg rst_r1; reg rst_r2; reg rst_r3; reg rst_r4; always @(posedge phy_clk) begin rst_r1 <= #1 rst; rst_r2 <= #1 rst_r1; rst_r3 <= #1 rst_r2; rst_r4 <= #1 rst_r3; end always @(posedge phy_clk) begin if (rst) idelay_ld_rst <= #1 1'b1; else if (rst_r4) idelay_ld_rst <= #1 1'b0; end genvar i; generate for ( i = 0; i != 10 && PO_DATA_CTL == "TRUE" ; i=i+1) begin : input_ if ( BITLANES[i] && !BITLANES_OUTONLY[i]) begin : iserdes_dq_ ISERDESE2 #( .DATA_RATE ( ISERDES_DQ_DATA_RATE), .DATA_WIDTH ( ISERDES_DQ_DATA_WIDTH), .DYN_CLKDIV_INV_EN ( ISERDES_DQ_DYN_CLKDIV_INV_EN), .DYN_CLK_INV_EN ( ISERDES_DQ_DYN_CLK_INV_EN), .INIT_Q1 ( ISERDES_DQ_INIT_Q1), .INIT_Q2 ( ISERDES_DQ_INIT_Q2), .INIT_Q3 ( ISERDES_DQ_INIT_Q3), .INIT_Q4 ( ISERDES_DQ_INIT_Q4), .INTERFACE_TYPE ( ISERDES_DQ_INTERFACE_TYPE), .NUM_CE ( ISERDES_NUM_CE), .IOBDELAY ( ISERDES_DQ_IOBDELAY), .OFB_USED ( ISERDES_DQ_OFB_USED), .SERDES_MODE ( ISERDES_DQ_SERDES_MODE), .SRVAL_Q1 ( ISERDES_DQ_SRVAL_Q1), .SRVAL_Q2 ( ISERDES_DQ_SRVAL_Q2), .SRVAL_Q3 ( ISERDES_DQ_SRVAL_Q3), .SRVAL_Q4 ( ISERDES_DQ_SRVAL_Q4) ) iserdesdq ( .O (), .Q1 (iserdes_dout[4*i + 3]), .Q2 (iserdes_dout[4*i + 2]), .Q3 (iserdes_dout[4*i + 1]), .Q4 (iserdes_dout[4*i + 0]), .Q5 (), .Q6 (), .SHIFTOUT1 (), .SHIFTOUT2 (), .BITSLIP (1'b0), .CE1 (1'b1), .CE2 (1'b1), .CLK (iserdes_clk_d), .CLKB (!iserdes_clk_d), .CLKDIVP (iserdes_clkdiv), .CLKDIV (), .DDLY (data_in_dly[i]), .D (data_in[i]), // dedicated route to iob for debugging // or as needed, select with IOBDELAY .DYNCLKDIVSEL (1'b0), .DYNCLKSEL (1'b0), // NOTE: OCLK is not used in this design, but is required to meet // a design rule check in map and bitgen. Do not disconnect it. .OCLK (oserdes_clk), .OFB (), .RST (1'b0), // .RST (iserdes_rst), .SHIFTIN1 (1'b0), .SHIFTIN2 (1'b0) ); localparam IDELAYE2_CINVCTRL_SEL = "FALSE"; localparam IDELAYE2_DELAY_SRC = "IDATAIN"; localparam IDELAYE2_HIGH_PERFORMANCE_MODE = "TRUE"; localparam IDELAYE2_PIPE_SEL = "FALSE"; localparam IDELAYE2_ODELAY_TYPE = "FIXED"; localparam IDELAYE2_REFCLK_FREQUENCY = 200.0; localparam IDELAYE2_SIGNAL_PATTERN = "DATA"; (* IODELAY_GROUP = IODELAY_GRP *) IDELAYE2 #( .CINVCTRL_SEL ( IDELAYE2_CINVCTRL_SEL), .DELAY_SRC ( IDELAYE2_DELAY_SRC), .HIGH_PERFORMANCE_MODE ( IDELAYE2_HIGH_PERFORMANCE_MODE), .IDELAY_TYPE ( IDELAYE2_IDELAY_TYPE), .IDELAY_VALUE ( IDELAYE2_IDELAY_VALUE), .PIPE_SEL ( IDELAYE2_PIPE_SEL), .REFCLK_FREQUENCY ( IDELAYE2_REFCLK_FREQUENCY ), .SIGNAL_PATTERN ( IDELAYE2_SIGNAL_PATTERN) ) idelaye2 ( .CNTVALUEOUT (), .DATAOUT (data_in_dly[i]), .C (phy_clk), // automatically wired by ISE .CE (idelay_ce), .CINVCTRL (), .CNTVALUEIN (5'b00000), .DATAIN (1'b0), .IDATAIN (data_in[i]), .INC (idelay_inc), .LD (idelay_ld | idelay_ld_rst), .LDPIPEEN (1'b0), .REGRST (rst) ); end // iserdes_dq else begin assign iserdes_dout[4*i + 3] = 0; assign iserdes_dout[4*i + 2] = 0; assign iserdes_dout[4*i + 1] = 0; assign iserdes_dout[4*i + 0] = 0; end end // input_ endgenerate // iserdes_dq_ localparam OSERDES_DQ_DATA_RATE_OQ = OSERDES_DATA_RATE; localparam OSERDES_DQ_DATA_RATE_TQ = OSERDES_DQ_DATA_RATE_OQ; localparam OSERDES_DQ_DATA_WIDTH = OSERDES_DATA_WIDTH; localparam OSERDES_DQ_INIT_OQ = 1'b1; localparam OSERDES_DQ_INIT_TQ = 1'b1; localparam OSERDES_DQ_INTERFACE_TYPE = "DEFAULT"; localparam OSERDES_DQ_ODELAY_USED = 0; localparam OSERDES_DQ_SERDES_MODE = "MASTER"; localparam OSERDES_DQ_SRVAL_OQ = 1'b1; localparam OSERDES_DQ_SRVAL_TQ = 1'b1; // note: obuf used in control path case, no ts input so width irrelevant localparam OSERDES_DQ_TRISTATE_WIDTH = (OSERDES_DQ_DATA_RATE_OQ == "DDR") ? 4 : 1; localparam OSERDES_DQS_DATA_RATE_OQ = "DDR"; localparam OSERDES_DQS_DATA_RATE_TQ = "DDR"; localparam OSERDES_DQS_TRISTATE_WIDTH = 4; // this is always ddr localparam OSERDES_DQS_DATA_WIDTH = 4; localparam ODDR_CLK_EDGE = "SAME_EDGE"; localparam OSERDES_TBYTE_CTL = "TRUE"; generate localparam NUM_BITLANES = PO_DATA_CTL == "TRUE" ? 10 : BUS_WIDTH; if ( PO_DATA_CTL == "TRUE" ) begin : slave_ts OSERDESE2 #( .DATA_RATE_OQ (OSERDES_DQ_DATA_RATE_OQ), .DATA_RATE_TQ (OSERDES_DQ_DATA_RATE_TQ), .DATA_WIDTH (OSERDES_DQ_DATA_WIDTH), .INIT_OQ (OSERDES_DQ_INIT_OQ), .INIT_TQ (OSERDES_DQ_INIT_TQ), .SERDES_MODE (OSERDES_DQ_SERDES_MODE), .SRVAL_OQ (OSERDES_DQ_SRVAL_OQ), .SRVAL_TQ (OSERDES_DQ_SRVAL_TQ), .TRISTATE_WIDTH (OSERDES_DQ_TRISTATE_WIDTH), .TBYTE_CTL ("TRUE"), .TBYTE_SRC ("TRUE") ) oserdes_slave_ts ( .OFB (), .OQ (), .SHIFTOUT1 (), // not extended .SHIFTOUT2 (), // not extended .TFB (), .TQ (), .CLK (oserdes_clk), .CLKDIV (oserdes_clkdiv), .D1 (), .D2 (), .D3 (), .D4 (), .D5 (), .D6 (), .OCE (1'b1), .RST (oserdes_rst), .SHIFTIN1 (), // not extended .SHIFTIN2 (), // not extended .T1 (oserdes_dqts[0]), .T2 (oserdes_dqts[0]), .T3 (oserdes_dqts[1]), .T4 (oserdes_dqts[1]), .TCE (1'b1), .TBYTEOUT (tbyte_out), .TBYTEIN (tbyte_out) ); end // slave_ts for (i = 0; i != NUM_BITLANES; i=i+1) begin : output_ if ( BITLANES[i]) begin : oserdes_dq_ if ( PO_DATA_CTL == "TRUE" ) begin : ddr OSERDESE2 #( .DATA_RATE_OQ (OSERDES_DQ_DATA_RATE_OQ), .DATA_RATE_TQ (OSERDES_DQ_DATA_RATE_TQ), .DATA_WIDTH (OSERDES_DQ_DATA_WIDTH), .INIT_OQ (OSERDES_DQ_INIT_OQ), .INIT_TQ (OSERDES_DQ_INIT_TQ), .SERDES_MODE (OSERDES_DQ_SERDES_MODE), .SRVAL_OQ (OSERDES_DQ_SRVAL_OQ), .SRVAL_TQ (OSERDES_DQ_SRVAL_TQ), .TRISTATE_WIDTH (OSERDES_DQ_TRISTATE_WIDTH), .TBYTE_CTL (OSERDES_TBYTE_CTL), .TBYTE_SRC ("FALSE") ) oserdes_dq_i ( .OFB (), .OQ (oserdes_dq_buf[i]), .SHIFTOUT1 (), // not extended .SHIFTOUT2 (), // not extended .TFB (), .TQ (oserdes_dqts_buf[i]), .CLK (oserdes_clk), .CLKDIV (oserdes_clkdiv), .D1 (oserdes_dq[4 * i + 0]), .D2 (oserdes_dq[4 * i + 1]), .D3 (oserdes_dq[4 * i + 2]), .D4 (oserdes_dq[4 * i + 3]), .D5 (), .D6 (), .OCE (1'b1), .RST (oserdes_rst), .SHIFTIN1 (), // not extended .SHIFTIN2 (), // not extended .T1 (/*oserdes_dqts[0]*/), .T2 (/*oserdes_dqts[0]*/), .T3 (/*oserdes_dqts[1]*/), .T4 (/*oserdes_dqts[1]*/), .TCE (1'b1), .TBYTEIN (tbyte_out) ); end else begin : sdr OSERDESE2 #( .DATA_RATE_OQ (OSERDES_DQ_DATA_RATE_OQ), .DATA_RATE_TQ (OSERDES_DQ_DATA_RATE_TQ), .DATA_WIDTH (OSERDES_DQ_DATA_WIDTH), .INIT_OQ (1'b0 /*OSERDES_DQ_INIT_OQ*/), .INIT_TQ (OSERDES_DQ_INIT_TQ), .SERDES_MODE (OSERDES_DQ_SERDES_MODE), .SRVAL_OQ (1'b0 /*OSERDES_DQ_SRVAL_OQ*/), .SRVAL_TQ (OSERDES_DQ_SRVAL_TQ), .TRISTATE_WIDTH (OSERDES_DQ_TRISTATE_WIDTH) ) oserdes_dq_i ( .OFB (), .OQ (oserdes_dq_buf[i]), .SHIFTOUT1 (), // not extended .SHIFTOUT2 (), // not extended .TFB (), .TQ (), .CLK (oserdes_clk), .CLKDIV (oserdes_clkdiv), .D1 (oserdes_dq[4 * i + 0]), .D2 (oserdes_dq[4 * i + 1]), .D3 (oserdes_dq[4 * i + 2]), .D4 (oserdes_dq[4 * i + 3]), .D5 (), .D6 (), .OCE (1'b1), .RST (oserdes_rst), .SHIFTIN1 (), // not extended .SHIFTIN2 (), // not extended .T1 (), .T2 (), .T3 (), .T4 (), .TCE (1'b1) ); end // ddr end // oserdes_dq_ end // output_ endgenerate generate if ( PO_DATA_CTL == "TRUE" ) begin : dqs_gen ODDR #(.DDR_CLK_EDGE (ODDR_CLK_EDGE)) oddr_dqs ( .Q (oserdes_dqs_buf), .D1 (oserdes_dqs[0]), .D2 (oserdes_dqs[1]), .C (oserdes_clk_delayed), .R (1'b0), .S (), .CE (1'b1) ); ODDR #(.DDR_CLK_EDGE (ODDR_CLK_EDGE)) oddr_dqsts ( .Q (oserdes_dqsts_buf), .D1 (oserdes_dqsts[0]), .D2 (oserdes_dqsts[0]), .C (oserdes_clk_delayed), .R (), .S (1'b0), .CE (1'b1) ); end // sdr rate else begin:null_dqs end endgenerate endmodule // byte_group_io
/***************************************************************** -- (c) Copyright 2011 - 2013 Xilinx, Inc. All rights reserved. -- -- This file contains confidential and proprietary information -- of Xilinx, Inc. and is protected under U.S. and -- international copyright and other intellectual property -- laws. -- -- DISCLAIMER -- This disclaimer is not a license and does not grant any -- rights to the materials distributed herewith. Except as -- otherwise provided in a valid license issued to you by -- Xilinx, and to the maximum extent permitted by applicable -- law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND -- WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES -- AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING -- BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- -- INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and -- (2) Xilinx shall not be liable (whether in contract or tort, -- including negligence, or under any other theory of -- liability) for any loss or damage of any kind or nature -- related to, arising under or in connection with these -- materials, including for any direct, or any indirect, -- special, incidental, or consequential loss or damage -- (including loss of data, profits, goodwill, or any type of -- loss or damage suffered as a result of any action brought -- by a third party) even if such damage or loss was -- reasonably foreseeable or Xilinx had been advised of the -- possibility of the same. -- -- CRITICAL APPLICATIONS -- Xilinx products are not designed or intended to be fail- -- safe, or for use in any application requiring fail-safe -- performance, such as life-support or safety devices or -- systems, Class III medical devices, nuclear facilities, -- applications related to the deployment of airbags, or any -- other applications that could lead to death, personal -- injury, or severe property or environmental damage -- (individually and collectively, "Critical -- Applications"). A Customer assumes the sole risk and -- liability of any use of Xilinx products in Critical -- Applications, subject only to applicable laws and -- regulations governing limitations on product liability. -- -- THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS -- PART OF THIS FILE AT ALL TIMES. // // // Owner: Gary Martin // Revision: $Id: //depot/icm/proj/common/head/rtl/v32_cmt/rtl/phy/byte_group_io.v#4 $ // $Author: $ // $DateTime: $ // $Change: $ // Description: // This verilog file is a paramertizable I/O termination for // the single byte lane. // to create a N byte-lane wide phy. // // History: // Date Engineer Description // 04/01/2010 G. Martin Initial Checkin. // ////////////////////////////////////////////////////////////////// *****************************************************************/ `timescale 1ps/1ps module mig_7series_v1_9_ddr_byte_group_io #( // bit lane existance parameter BITLANES = 12'b1111_1111_1111, parameter BITLANES_OUTONLY = 12'b0000_0000_0000, parameter PO_DATA_CTL = "FALSE", parameter OSERDES_DATA_RATE = "DDR", parameter OSERDES_DATA_WIDTH = 4, parameter IDELAYE2_IDELAY_TYPE = "VARIABLE", parameter IDELAYE2_IDELAY_VALUE = 00, parameter IODELAY_GRP = "IODELAY_MIG", // local usage only, don't pass down parameter BUS_WIDTH = 12, parameter SYNTHESIS = "FALSE" ) ( input [9:0] mem_dq_in, output [BUS_WIDTH-1:0] mem_dq_out, output [BUS_WIDTH-1:0] mem_dq_ts, input mem_dqs_in, output mem_dqs_out, output mem_dqs_ts, output [(4*10)-1:0] iserdes_dout, // 2 extra 12-bit lanes not used output dqs_to_phaser, input iserdes_clk, input iserdes_clkb, input iserdes_clkdiv, input phy_clk, input rst, input oserdes_rst, input iserdes_rst, input [1:0] oserdes_dqs, input [1:0] oserdes_dqsts, input [(4*BUS_WIDTH)-1:0] oserdes_dq, input [1:0] oserdes_dqts, input oserdes_clk, input oserdes_clk_delayed, input oserdes_clkdiv, input idelay_inc, input idelay_ce, input idelay_ld, input idelayctrl_refclk ); /// INSTANCES localparam ISERDES_DQ_DATA_RATE = "DDR"; localparam ISERDES_DQ_DATA_WIDTH = 4; localparam ISERDES_DQ_DYN_CLKDIV_INV_EN = "FALSE"; localparam ISERDES_DQ_DYN_CLK_INV_EN = "FALSE"; localparam ISERDES_DQ_INIT_Q1 = 1'b0; localparam ISERDES_DQ_INIT_Q2 = 1'b0; localparam ISERDES_DQ_INIT_Q3 = 1'b0; localparam ISERDES_DQ_INIT_Q4 = 1'b0; localparam ISERDES_DQ_INTERFACE_TYPE = "MEMORY_DDR3"; localparam ISERDES_NUM_CE = 2; localparam ISERDES_DQ_IOBDELAY = "IFD"; localparam ISERDES_DQ_OFB_USED = "FALSE"; localparam ISERDES_DQ_SERDES_MODE = "MASTER"; localparam ISERDES_DQ_SRVAL_Q1 = 1'b0; localparam ISERDES_DQ_SRVAL_Q2 = 1'b0; localparam ISERDES_DQ_SRVAL_Q3 = 1'b0; localparam ISERDES_DQ_SRVAL_Q4 = 1'b0; wire [BUS_WIDTH-1:0] data_in_dly; wire [BUS_WIDTH-1:0] oserdes_dq_buf; wire [BUS_WIDTH-1:0] oserdes_dqts_buf; wire oserdes_dqs_buf; wire oserdes_dqsts_buf; wire [9:0] data_in; wire tbyte_out; assign mem_dq_out = oserdes_dq_buf; assign mem_dq_ts = oserdes_dqts_buf; assign data_in = mem_dq_in; assign mem_dqs_out = oserdes_dqs_buf; assign mem_dqs_ts = oserdes_dqsts_buf; assign dqs_to_phaser = mem_dqs_in; reg iserdes_clk_d; always @(*) iserdes_clk_d <= #(025) iserdes_clk; reg idelay_ld_rst; reg rst_r1; reg rst_r2; reg rst_r3; reg rst_r4; always @(posedge phy_clk) begin rst_r1 <= #1 rst; rst_r2 <= #1 rst_r1; rst_r3 <= #1 rst_r2; rst_r4 <= #1 rst_r3; end always @(posedge phy_clk) begin if (rst) idelay_ld_rst <= #1 1'b1; else if (rst_r4) idelay_ld_rst <= #1 1'b0; end genvar i; generate for ( i = 0; i != 10 && PO_DATA_CTL == "TRUE" ; i=i+1) begin : input_ if ( BITLANES[i] && !BITLANES_OUTONLY[i]) begin : iserdes_dq_ ISERDESE2 #( .DATA_RATE ( ISERDES_DQ_DATA_RATE), .DATA_WIDTH ( ISERDES_DQ_DATA_WIDTH), .DYN_CLKDIV_INV_EN ( ISERDES_DQ_DYN_CLKDIV_INV_EN), .DYN_CLK_INV_EN ( ISERDES_DQ_DYN_CLK_INV_EN), .INIT_Q1 ( ISERDES_DQ_INIT_Q1), .INIT_Q2 ( ISERDES_DQ_INIT_Q2), .INIT_Q3 ( ISERDES_DQ_INIT_Q3), .INIT_Q4 ( ISERDES_DQ_INIT_Q4), .INTERFACE_TYPE ( ISERDES_DQ_INTERFACE_TYPE), .NUM_CE ( ISERDES_NUM_CE), .IOBDELAY ( ISERDES_DQ_IOBDELAY), .OFB_USED ( ISERDES_DQ_OFB_USED), .SERDES_MODE ( ISERDES_DQ_SERDES_MODE), .SRVAL_Q1 ( ISERDES_DQ_SRVAL_Q1), .SRVAL_Q2 ( ISERDES_DQ_SRVAL_Q2), .SRVAL_Q3 ( ISERDES_DQ_SRVAL_Q3), .SRVAL_Q4 ( ISERDES_DQ_SRVAL_Q4) ) iserdesdq ( .O (), .Q1 (iserdes_dout[4*i + 3]), .Q2 (iserdes_dout[4*i + 2]), .Q3 (iserdes_dout[4*i + 1]), .Q4 (iserdes_dout[4*i + 0]), .Q5 (), .Q6 (), .SHIFTOUT1 (), .SHIFTOUT2 (), .BITSLIP (1'b0), .CE1 (1'b1), .CE2 (1'b1), .CLK (iserdes_clk_d), .CLKB (!iserdes_clk_d), .CLKDIVP (iserdes_clkdiv), .CLKDIV (), .DDLY (data_in_dly[i]), .D (data_in[i]), // dedicated route to iob for debugging // or as needed, select with IOBDELAY .DYNCLKDIVSEL (1'b0), .DYNCLKSEL (1'b0), // NOTE: OCLK is not used in this design, but is required to meet // a design rule check in map and bitgen. Do not disconnect it. .OCLK (oserdes_clk), .OFB (), .RST (1'b0), // .RST (iserdes_rst), .SHIFTIN1 (1'b0), .SHIFTIN2 (1'b0) ); localparam IDELAYE2_CINVCTRL_SEL = "FALSE"; localparam IDELAYE2_DELAY_SRC = "IDATAIN"; localparam IDELAYE2_HIGH_PERFORMANCE_MODE = "TRUE"; localparam IDELAYE2_PIPE_SEL = "FALSE"; localparam IDELAYE2_ODELAY_TYPE = "FIXED"; localparam IDELAYE2_REFCLK_FREQUENCY = 200.0; localparam IDELAYE2_SIGNAL_PATTERN = "DATA"; (* IODELAY_GROUP = IODELAY_GRP *) IDELAYE2 #( .CINVCTRL_SEL ( IDELAYE2_CINVCTRL_SEL), .DELAY_SRC ( IDELAYE2_DELAY_SRC), .HIGH_PERFORMANCE_MODE ( IDELAYE2_HIGH_PERFORMANCE_MODE), .IDELAY_TYPE ( IDELAYE2_IDELAY_TYPE), .IDELAY_VALUE ( IDELAYE2_IDELAY_VALUE), .PIPE_SEL ( IDELAYE2_PIPE_SEL), .REFCLK_FREQUENCY ( IDELAYE2_REFCLK_FREQUENCY ), .SIGNAL_PATTERN ( IDELAYE2_SIGNAL_PATTERN) ) idelaye2 ( .CNTVALUEOUT (), .DATAOUT (data_in_dly[i]), .C (phy_clk), // automatically wired by ISE .CE (idelay_ce), .CINVCTRL (), .CNTVALUEIN (5'b00000), .DATAIN (1'b0), .IDATAIN (data_in[i]), .INC (idelay_inc), .LD (idelay_ld | idelay_ld_rst), .LDPIPEEN (1'b0), .REGRST (rst) ); end // iserdes_dq else begin assign iserdes_dout[4*i + 3] = 0; assign iserdes_dout[4*i + 2] = 0; assign iserdes_dout[4*i + 1] = 0; assign iserdes_dout[4*i + 0] = 0; end end // input_ endgenerate // iserdes_dq_ localparam OSERDES_DQ_DATA_RATE_OQ = OSERDES_DATA_RATE; localparam OSERDES_DQ_DATA_RATE_TQ = OSERDES_DQ_DATA_RATE_OQ; localparam OSERDES_DQ_DATA_WIDTH = OSERDES_DATA_WIDTH; localparam OSERDES_DQ_INIT_OQ = 1'b1; localparam OSERDES_DQ_INIT_TQ = 1'b1; localparam OSERDES_DQ_INTERFACE_TYPE = "DEFAULT"; localparam OSERDES_DQ_ODELAY_USED = 0; localparam OSERDES_DQ_SERDES_MODE = "MASTER"; localparam OSERDES_DQ_SRVAL_OQ = 1'b1; localparam OSERDES_DQ_SRVAL_TQ = 1'b1; // note: obuf used in control path case, no ts input so width irrelevant localparam OSERDES_DQ_TRISTATE_WIDTH = (OSERDES_DQ_DATA_RATE_OQ == "DDR") ? 4 : 1; localparam OSERDES_DQS_DATA_RATE_OQ = "DDR"; localparam OSERDES_DQS_DATA_RATE_TQ = "DDR"; localparam OSERDES_DQS_TRISTATE_WIDTH = 4; // this is always ddr localparam OSERDES_DQS_DATA_WIDTH = 4; localparam ODDR_CLK_EDGE = "SAME_EDGE"; localparam OSERDES_TBYTE_CTL = "TRUE"; generate localparam NUM_BITLANES = PO_DATA_CTL == "TRUE" ? 10 : BUS_WIDTH; if ( PO_DATA_CTL == "TRUE" ) begin : slave_ts OSERDESE2 #( .DATA_RATE_OQ (OSERDES_DQ_DATA_RATE_OQ), .DATA_RATE_TQ (OSERDES_DQ_DATA_RATE_TQ), .DATA_WIDTH (OSERDES_DQ_DATA_WIDTH), .INIT_OQ (OSERDES_DQ_INIT_OQ), .INIT_TQ (OSERDES_DQ_INIT_TQ), .SERDES_MODE (OSERDES_DQ_SERDES_MODE), .SRVAL_OQ (OSERDES_DQ_SRVAL_OQ), .SRVAL_TQ (OSERDES_DQ_SRVAL_TQ), .TRISTATE_WIDTH (OSERDES_DQ_TRISTATE_WIDTH), .TBYTE_CTL ("TRUE"), .TBYTE_SRC ("TRUE") ) oserdes_slave_ts ( .OFB (), .OQ (), .SHIFTOUT1 (), // not extended .SHIFTOUT2 (), // not extended .TFB (), .TQ (), .CLK (oserdes_clk), .CLKDIV (oserdes_clkdiv), .D1 (), .D2 (), .D3 (), .D4 (), .D5 (), .D6 (), .OCE (1'b1), .RST (oserdes_rst), .SHIFTIN1 (), // not extended .SHIFTIN2 (), // not extended .T1 (oserdes_dqts[0]), .T2 (oserdes_dqts[0]), .T3 (oserdes_dqts[1]), .T4 (oserdes_dqts[1]), .TCE (1'b1), .TBYTEOUT (tbyte_out), .TBYTEIN (tbyte_out) ); end // slave_ts for (i = 0; i != NUM_BITLANES; i=i+1) begin : output_ if ( BITLANES[i]) begin : oserdes_dq_ if ( PO_DATA_CTL == "TRUE" ) begin : ddr OSERDESE2 #( .DATA_RATE_OQ (OSERDES_DQ_DATA_RATE_OQ), .DATA_RATE_TQ (OSERDES_DQ_DATA_RATE_TQ), .DATA_WIDTH (OSERDES_DQ_DATA_WIDTH), .INIT_OQ (OSERDES_DQ_INIT_OQ), .INIT_TQ (OSERDES_DQ_INIT_TQ), .SERDES_MODE (OSERDES_DQ_SERDES_MODE), .SRVAL_OQ (OSERDES_DQ_SRVAL_OQ), .SRVAL_TQ (OSERDES_DQ_SRVAL_TQ), .TRISTATE_WIDTH (OSERDES_DQ_TRISTATE_WIDTH), .TBYTE_CTL (OSERDES_TBYTE_CTL), .TBYTE_SRC ("FALSE") ) oserdes_dq_i ( .OFB (), .OQ (oserdes_dq_buf[i]), .SHIFTOUT1 (), // not extended .SHIFTOUT2 (), // not extended .TFB (), .TQ (oserdes_dqts_buf[i]), .CLK (oserdes_clk), .CLKDIV (oserdes_clkdiv), .D1 (oserdes_dq[4 * i + 0]), .D2 (oserdes_dq[4 * i + 1]), .D3 (oserdes_dq[4 * i + 2]), .D4 (oserdes_dq[4 * i + 3]), .D5 (), .D6 (), .OCE (1'b1), .RST (oserdes_rst), .SHIFTIN1 (), // not extended .SHIFTIN2 (), // not extended .T1 (/*oserdes_dqts[0]*/), .T2 (/*oserdes_dqts[0]*/), .T3 (/*oserdes_dqts[1]*/), .T4 (/*oserdes_dqts[1]*/), .TCE (1'b1), .TBYTEIN (tbyte_out) ); end else begin : sdr OSERDESE2 #( .DATA_RATE_OQ (OSERDES_DQ_DATA_RATE_OQ), .DATA_RATE_TQ (OSERDES_DQ_DATA_RATE_TQ), .DATA_WIDTH (OSERDES_DQ_DATA_WIDTH), .INIT_OQ (1'b0 /*OSERDES_DQ_INIT_OQ*/), .INIT_TQ (OSERDES_DQ_INIT_TQ), .SERDES_MODE (OSERDES_DQ_SERDES_MODE), .SRVAL_OQ (1'b0 /*OSERDES_DQ_SRVAL_OQ*/), .SRVAL_TQ (OSERDES_DQ_SRVAL_TQ), .TRISTATE_WIDTH (OSERDES_DQ_TRISTATE_WIDTH) ) oserdes_dq_i ( .OFB (), .OQ (oserdes_dq_buf[i]), .SHIFTOUT1 (), // not extended .SHIFTOUT2 (), // not extended .TFB (), .TQ (), .CLK (oserdes_clk), .CLKDIV (oserdes_clkdiv), .D1 (oserdes_dq[4 * i + 0]), .D2 (oserdes_dq[4 * i + 1]), .D3 (oserdes_dq[4 * i + 2]), .D4 (oserdes_dq[4 * i + 3]), .D5 (), .D6 (), .OCE (1'b1), .RST (oserdes_rst), .SHIFTIN1 (), // not extended .SHIFTIN2 (), // not extended .T1 (), .T2 (), .T3 (), .T4 (), .TCE (1'b1) ); end // ddr end // oserdes_dq_ end // output_ endgenerate generate if ( PO_DATA_CTL == "TRUE" ) begin : dqs_gen ODDR #(.DDR_CLK_EDGE (ODDR_CLK_EDGE)) oddr_dqs ( .Q (oserdes_dqs_buf), .D1 (oserdes_dqs[0]), .D2 (oserdes_dqs[1]), .C (oserdes_clk_delayed), .R (1'b0), .S (), .CE (1'b1) ); ODDR #(.DDR_CLK_EDGE (ODDR_CLK_EDGE)) oddr_dqsts ( .Q (oserdes_dqsts_buf), .D1 (oserdes_dqsts[0]), .D2 (oserdes_dqsts[0]), .C (oserdes_clk_delayed), .R (), .S (1'b0), .CE (1'b1) ); end // sdr rate else begin:null_dqs end endgenerate endmodule // byte_group_io
/********************************************************** -- (c) Copyright 2011 - 2013 Xilinx, Inc. All rights reserved. -- -- This file contains confidential and proprietary information -- of Xilinx, Inc. and is protected under U.S. and -- international copyright and other intellectual property -- laws. -- -- DISCLAIMER -- This disclaimer is not a license and does not grant any -- rights to the materials distributed herewith. Except as -- otherwise provided in a valid license issued to you by -- Xilinx, and to the maximum extent permitted by applicable -- law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND -- WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES -- AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING -- BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- -- INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and -- (2) Xilinx shall not be liable (whether in contract or tort, -- including negligence, or under any other theory of -- liability) for any loss or damage of any kind or nature -- related to, arising under or in connection with these -- materials, including for any direct, or any indirect, -- special, incidental, or consequential loss or damage -- (including loss of data, profits, goodwill, or any type of -- loss or damage suffered as a result of any action brought -- by a third party) even if such damage or loss was -- reasonably foreseeable or Xilinx had been advised of the -- possibility of the same. -- -- CRITICAL APPLICATIONS -- Xilinx products are not designed or intended to be fail- -- safe, or for use in any application requiring fail-safe -- performance, such as life-support or safety devices or -- systems, Class III medical devices, nuclear facilities, -- applications related to the deployment of airbags, or any -- other applications that could lead to death, personal -- injury, or severe property or environmental damage -- (individually and collectively, "Critical -- Applications"). A Customer assumes the sole risk and -- liability of any use of Xilinx products in Critical -- Applications, subject only to applicable laws and -- regulations governing limitations on product liability. -- -- THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS -- PART OF THIS FILE AT ALL TIMES. // // THIS NOTICE MUST BE RETAINED AS PART OF THIS FILE AT ALL TIMES. // // // Owner: Gary Martin // Revision: $Id: //depot/icm/proj/common/head/rtl/v32_cmt/rtl/phy/phy_4lanes.v#6 $ // $Author: gary $ // $DateTime: 2010/05/11 18:05:17 $ // $Change: 490882 $ // Description: // This verilog file is the parameterizable 4-byte lane phy primitive top // This module may be ganged to create an N-lane phy. // // History: // Date Engineer Description // 04/01/2010 G. Martin Initial Checkin. // /////////////////////////////////////////////////////////// **********************************************************/ `timescale 1ps/1ps `define PC_DATA_OFFSET_RANGE 22:17 module mig_7series_v1_9_ddr_phy_4lanes #( parameter GENERATE_IDELAYCTRL = "TRUE", parameter IODELAY_GRP = "IODELAY_MIG", parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO" parameter BYTELANES_DDR_CK = 24'b0010_0010_0010_0010_0010_0010, parameter NUM_DDR_CK = 1, // next three parameter fields correspond to byte lanes for lane order DCBA parameter BYTE_LANES = 4'b1111, // lane existence, one per lane parameter DATA_CTL_N = 4'b1111, // data or control, per lane parameter BITLANES = 48'hffff_ffff_ffff, parameter BITLANES_OUTONLY = 48'h0000_0000_0000, parameter LANE_REMAP = 16'h3210,// 4-bit index // used to rewire to one of four // input/output buss lanes // example: 0321 remaps lanes as: // D->A // C->D // B->C // A->B parameter LAST_BANK = "FALSE", parameter USE_PRE_POST_FIFO = "FALSE", parameter RCLK_SELECT_LANE = "B", parameter real TCK = 0.00, parameter SYNTHESIS = "FALSE", parameter PO_CTL_COARSE_BYPASS = "FALSE", parameter PO_FINE_DELAY = 0, parameter PI_SEL_CLK_OFFSET = 0, // phy_control paramter used in other paramsters parameter PC_CLK_RATIO = 4, //phaser_in parameters parameter A_PI_FREQ_REF_DIV = "NONE", parameter A_PI_CLKOUT_DIV = 2, parameter A_PI_BURST_MODE = "TRUE", parameter A_PI_OUTPUT_CLK_SRC = "DELAYED_REF" , //"DELAYED_REF", parameter A_PI_FINE_DELAY = 60, parameter A_PI_SYNC_IN_DIV_RST = "TRUE", parameter B_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV, parameter B_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV, parameter B_PI_BURST_MODE = A_PI_BURST_MODE, parameter B_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC, parameter B_PI_FINE_DELAY = A_PI_FINE_DELAY, parameter B_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST, parameter C_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV, parameter C_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV, parameter C_PI_BURST_MODE = A_PI_BURST_MODE, parameter C_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC, parameter C_PI_FINE_DELAY = 0, parameter C_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST, parameter D_PI_FREQ_REF_DIV = A_PI_FREQ_REF_DIV, parameter D_PI_CLKOUT_DIV = A_PI_CLKOUT_DIV, parameter D_PI_BURST_MODE = A_PI_BURST_MODE, parameter D_PI_OUTPUT_CLK_SRC = A_PI_OUTPUT_CLK_SRC, parameter D_PI_FINE_DELAY = 0, parameter D_PI_SYNC_IN_DIV_RST = A_PI_SYNC_IN_DIV_RST, //phaser_out parameters parameter A_PO_CLKOUT_DIV = (DATA_CTL_N[0] == 0) ? PC_CLK_RATIO : 2, parameter A_PO_FINE_DELAY = PO_FINE_DELAY, parameter A_PO_COARSE_DELAY = 0, parameter A_PO_OCLK_DELAY = 0, parameter A_PO_OCLKDELAY_INV = "FALSE", parameter A_PO_OUTPUT_CLK_SRC = "DELAYED_REF", parameter A_PO_SYNC_IN_DIV_RST = "TRUE", //parameter A_PO_SYNC_IN_DIV_RST = "FALSE", parameter B_PO_CLKOUT_DIV = (DATA_CTL_N[1] == 0) ? PC_CLK_RATIO : 2, parameter B_PO_FINE_DELAY = PO_FINE_DELAY, parameter B_PO_COARSE_DELAY = A_PO_COARSE_DELAY, parameter B_PO_OCLK_DELAY = A_PO_OCLK_DELAY, parameter B_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV, parameter B_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC, parameter B_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST, parameter C_PO_CLKOUT_DIV = (DATA_CTL_N[2] == 0) ? PC_CLK_RATIO : 2, parameter C_PO_FINE_DELAY = PO_FINE_DELAY, parameter C_PO_COARSE_DELAY = A_PO_COARSE_DELAY, parameter C_PO_OCLK_DELAY = A_PO_OCLK_DELAY, parameter C_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV, parameter C_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC, parameter C_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST, parameter D_PO_CLKOUT_DIV = (DATA_CTL_N[3] == 0) ? PC_CLK_RATIO : 2, parameter D_PO_FINE_DELAY = PO_FINE_DELAY, parameter D_PO_COARSE_DELAY = A_PO_COARSE_DELAY, parameter D_PO_OCLK_DELAY = A_PO_OCLK_DELAY, parameter D_PO_OCLKDELAY_INV = A_PO_OCLKDELAY_INV, parameter D_PO_OUTPUT_CLK_SRC = A_PO_OUTPUT_CLK_SRC, parameter D_PO_SYNC_IN_DIV_RST = A_PO_SYNC_IN_DIV_RST, parameter A_IDELAYE2_IDELAY_TYPE = "VARIABLE", parameter A_IDELAYE2_IDELAY_VALUE = 00, parameter B_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE, parameter B_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE, parameter C_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE, parameter C_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE, parameter D_IDELAYE2_IDELAY_TYPE = A_IDELAYE2_IDELAY_TYPE, parameter D_IDELAYE2_IDELAY_VALUE = A_IDELAYE2_IDELAY_VALUE, // phy_control parameters parameter PC_BURST_MODE = "TRUE", parameter PC_DATA_CTL_N = DATA_CTL_N, parameter PC_CMD_OFFSET = 0, parameter PC_RD_CMD_OFFSET_0 = 0, parameter PC_RD_CMD_OFFSET_1 = 0, parameter PC_RD_CMD_OFFSET_2 = 0, parameter PC_RD_CMD_OFFSET_3 = 0, parameter PC_CO_DURATION = 1, parameter PC_DI_DURATION = 1, parameter PC_DO_DURATION = 1, parameter PC_RD_DURATION_0 = 0, parameter PC_RD_DURATION_1 = 0, parameter PC_RD_DURATION_2 = 0, parameter PC_RD_DURATION_3 = 0, parameter PC_WR_CMD_OFFSET_0 = 5, parameter PC_WR_CMD_OFFSET_1 = 5, parameter PC_WR_CMD_OFFSET_2 = 5, parameter PC_WR_CMD_OFFSET_3 = 5, parameter PC_WR_DURATION_0 = 6, parameter PC_WR_DURATION_1 = 6, parameter PC_WR_DURATION_2 = 6, parameter PC_WR_DURATION_3 = 6, parameter PC_AO_WRLVL_EN = 0, parameter PC_AO_TOGGLE = 4'b0101, // odd bits are toggle (CKE) parameter PC_FOUR_WINDOW_CLOCKS = 63, parameter PC_EVENTS_DELAY = 18, parameter PC_PHY_COUNT_EN = "TRUE", parameter PC_SYNC_MODE = "TRUE", parameter PC_DISABLE_SEQ_MATCH = "TRUE", parameter PC_MULTI_REGION = "FALSE", // io fifo parameters parameter A_OF_ARRAY_MODE = (DATA_CTL_N[0] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4", parameter B_OF_ARRAY_MODE = (DATA_CTL_N[1] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4", parameter C_OF_ARRAY_MODE = (DATA_CTL_N[2] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4", parameter D_OF_ARRAY_MODE = (DATA_CTL_N[3] == 1) ? "ARRAY_MODE_8_X_4" : "ARRAY_MODE_4_X_4", parameter OF_ALMOST_EMPTY_VALUE = 1, parameter OF_ALMOST_FULL_VALUE = 1, parameter OF_OUTPUT_DISABLE = "TRUE", parameter OF_SYNCHRONOUS_MODE = PC_SYNC_MODE, parameter A_OS_DATA_RATE = "DDR", parameter A_OS_DATA_WIDTH = 4, parameter B_OS_DATA_RATE = A_OS_DATA_RATE, parameter B_OS_DATA_WIDTH = A_OS_DATA_WIDTH, parameter C_OS_DATA_RATE = A_OS_DATA_RATE, parameter C_OS_DATA_WIDTH = A_OS_DATA_WIDTH, parameter D_OS_DATA_RATE = A_OS_DATA_RATE, parameter D_OS_DATA_WIDTH = A_OS_DATA_WIDTH, parameter A_IF_ARRAY_MODE = "ARRAY_MODE_4_X_8", parameter B_IF_ARRAY_MODE = A_IF_ARRAY_MODE, parameter C_IF_ARRAY_MODE = A_IF_ARRAY_MODE, parameter D_IF_ARRAY_MODE = A_IF_ARRAY_MODE, parameter IF_ALMOST_EMPTY_VALUE = 1, parameter IF_ALMOST_FULL_VALUE = 1, parameter IF_SYNCHRONOUS_MODE = PC_SYNC_MODE, // this is used locally, not for external pushdown // NOTE: the 0+ is needed in each to coerce to integer for addition. // otherwise 4x 1'b values are added producing a 1'b value. parameter HIGHEST_LANE = LAST_BANK == "FALSE" ? 4 : (BYTE_LANES[3] ? 4 : BYTE_LANES[2] ? 3 : BYTE_LANES[1] ? 2 : 1), parameter N_CTL_LANES = ((0+(!DATA_CTL_N[0]) & BYTE_LANES[0]) + (0+(!DATA_CTL_N[1]) & BYTE_LANES[1]) + (0+(!DATA_CTL_N[2]) & BYTE_LANES[2]) + (0+(!DATA_CTL_N[3]) & BYTE_LANES[3])), parameter N_BYTE_LANES = (0+BYTE_LANES[0]) + (0+BYTE_LANES[1]) + (0+BYTE_LANES[2]) + (0+BYTE_LANES[3]), parameter N_DATA_LANES = N_BYTE_LANES - N_CTL_LANES, // assume odt per rank + any declared cke's parameter AUXOUT_WIDTH = 4, parameter LP_DDR_CK_WIDTH = 2 ,parameter CKE_ODT_AUX = "FALSE" ) ( //`include "phy.vh" input rst, input phy_clk, input phy_ctl_clk, input freq_refclk, input mem_refclk, input mem_refclk_div4, input pll_lock, input sync_pulse, input idelayctrl_refclk, input [HIGHEST_LANE*80-1:0] phy_dout, input phy_cmd_wr_en, input phy_data_wr_en, input phy_rd_en, input phy_ctl_mstr_empty, input [31:0] phy_ctl_wd, input [`PC_DATA_OFFSET_RANGE] data_offset, input phy_ctl_wr, input if_empty_def, input phyGo, input input_sink, output [(LP_DDR_CK_WIDTH*24)-1:0] ddr_clk, // to memory output rclk, output if_a_empty, output if_empty, output byte_rd_en, output if_empty_or, output if_empty_and, output of_ctl_a_full, output of_data_a_full, output of_ctl_full, output of_data_full, output pre_data_a_full, output [HIGHEST_LANE*80-1:0]phy_din, // assume input bus same size as output bus output phy_ctl_empty, output phy_ctl_a_full, output phy_ctl_full, output [HIGHEST_LANE*12-1:0]mem_dq_out, output [HIGHEST_LANE*12-1:0]mem_dq_ts, input [HIGHEST_LANE*10-1:0]mem_dq_in, output [HIGHEST_LANE-1:0] mem_dqs_out, output [HIGHEST_LANE-1:0] mem_dqs_ts, input [HIGHEST_LANE-1:0] mem_dqs_in, input [1:0] byte_rd_en_oth_banks, output [AUXOUT_WIDTH-1:0] aux_out, output reg rst_out = 0, output reg mcGo=0, output phy_ctl_ready, output ref_dll_lock, input if_rst, input phy_read_calib, input phy_write_calib, input idelay_inc, input idelay_ce, input idelay_ld, input [2:0] calib_sel, input calib_zero_ctrl, input [HIGHEST_LANE-1:0] calib_zero_lanes, input calib_in_common, input po_fine_enable, input po_coarse_enable, input po_fine_inc, input po_coarse_inc, input po_counter_load_en, input po_counter_read_en, input [8:0] po_counter_load_val, input po_sel_fine_oclk_delay, output reg po_coarse_overflow, output reg po_fine_overflow, output reg [8:0] po_counter_read_val, input pi_rst_dqs_find, input pi_fine_enable, input pi_fine_inc, input pi_counter_load_en, input pi_counter_read_en, input [5:0] pi_counter_load_val, output reg pi_fine_overflow, output reg [5:0] pi_counter_read_val, output reg pi_dqs_found, output pi_dqs_found_all, output pi_dqs_found_any, output [HIGHEST_LANE-1:0] pi_phase_locked_lanes, output [HIGHEST_LANE-1:0] pi_dqs_found_lanes, output reg pi_dqs_out_of_range, output reg pi_phase_locked, output pi_phase_locked_all ); localparam DATA_CTL_A = (~DATA_CTL_N[0]); localparam DATA_CTL_B = (~DATA_CTL_N[1]); localparam DATA_CTL_C = (~DATA_CTL_N[2]); localparam DATA_CTL_D = (~DATA_CTL_N[3]); localparam PRESENT_CTL_A = BYTE_LANES[0] && ! DATA_CTL_N[0]; localparam PRESENT_CTL_B = BYTE_LANES[1] && ! DATA_CTL_N[1]; localparam PRESENT_CTL_C = BYTE_LANES[2] && ! DATA_CTL_N[2]; localparam PRESENT_CTL_D = BYTE_LANES[3] && ! DATA_CTL_N[3]; localparam PRESENT_DATA_A = BYTE_LANES[0] && DATA_CTL_N[0]; localparam PRESENT_DATA_B = BYTE_LANES[1] && DATA_CTL_N[1]; localparam PRESENT_DATA_C = BYTE_LANES[2] && DATA_CTL_N[2]; localparam PRESENT_DATA_D = BYTE_LANES[3] && DATA_CTL_N[3]; localparam PC_DATA_CTL_A = (DATA_CTL_A) ? "FALSE" : "TRUE"; localparam PC_DATA_CTL_B = (DATA_CTL_B) ? "FALSE" : "TRUE"; localparam PC_DATA_CTL_C = (DATA_CTL_C) ? "FALSE" : "TRUE"; localparam PC_DATA_CTL_D = (DATA_CTL_D) ? "FALSE" : "TRUE"; localparam A_PO_COARSE_BYPASS = (DATA_CTL_A) ? PO_CTL_COARSE_BYPASS : "FALSE"; localparam B_PO_COARSE_BYPASS = (DATA_CTL_B) ? PO_CTL_COARSE_BYPASS : "FALSE"; localparam C_PO_COARSE_BYPASS = (DATA_CTL_C) ? PO_CTL_COARSE_BYPASS : "FALSE"; localparam D_PO_COARSE_BYPASS = (DATA_CTL_D) ? PO_CTL_COARSE_BYPASS : "FALSE"; localparam IO_A_START = 41; localparam IO_A_END = 40; localparam IO_B_START = 43; localparam IO_B_END = 42; localparam IO_C_START = 45; localparam IO_C_END = 44; localparam IO_D_START = 47; localparam IO_D_END = 46; localparam IO_A_X_START = (HIGHEST_LANE * 10) + 1; localparam IO_A_X_END = (IO_A_X_START-1); localparam IO_B_X_START = (IO_A_X_START + 2); localparam IO_B_X_END = (IO_B_X_START -1); localparam IO_C_X_START = (IO_B_X_START + 2); localparam IO_C_X_END = (IO_C_X_START -1); localparam IO_D_X_START = (IO_C_X_START + 2); localparam IO_D_X_END = (IO_D_X_START -1); localparam MSB_BURST_PEND_PO = 3; localparam MSB_BURST_PEND_PI = 7; localparam MSB_RANK_SEL_I = MSB_BURST_PEND_PI + 8; localparam PHASER_CTL_BUS_WIDTH = MSB_RANK_SEL_I + 1; wire [1:0] oserdes_dqs; wire [1:0] oserdes_dqs_ts; wire [1:0] oserdes_dq_ts; wire [PHASER_CTL_BUS_WIDTH-1:0] phaser_ctl_bus; wire [7:0] in_rank; wire [11:0] IO_A; wire [11:0] IO_B; wire [11:0] IO_C; wire [11:0] IO_D; wire [319:0] phy_din_remap; reg A_po_counter_read_en; wire [8:0] A_po_counter_read_val; reg A_pi_counter_read_en; wire [5:0] A_pi_counter_read_val; wire A_pi_fine_overflow; wire A_po_coarse_overflow; wire A_po_fine_overflow; wire A_pi_dqs_found; wire A_pi_dqs_out_of_range; wire A_pi_phase_locked; wire A_pi_iserdes_rst; reg A_pi_fine_enable; reg A_pi_fine_inc; reg A_pi_counter_load_en; reg [5:0] A_pi_counter_load_val; reg A_pi_rst_dqs_find; reg A_po_fine_enable; reg A_po_coarse_enable; (* keep = "true", max_fanout = 3 *) reg A_po_fine_inc /* synthesis syn_maxfan = 3 */; reg A_po_sel_fine_oclk_delay; reg A_po_coarse_inc; reg A_po_counter_load_en; reg [8:0] A_po_counter_load_val; wire A_rclk; reg A_idelay_ce; reg A_idelay_ld; reg B_po_counter_read_en; wire [8:0] B_po_counter_read_val; reg B_pi_counter_read_en; wire [5:0] B_pi_counter_read_val; wire B_pi_fine_overflow; wire B_po_coarse_overflow; wire B_po_fine_overflow; wire B_pi_phase_locked; wire B_pi_iserdes_rst; wire B_pi_dqs_found; wire B_pi_dqs_out_of_range; reg B_pi_fine_enable; reg B_pi_fine_inc; reg B_pi_counter_load_en; reg [5:0] B_pi_counter_load_val; reg B_pi_rst_dqs_find; reg B_po_fine_enable; reg B_po_coarse_enable; (* keep = "true", max_fanout = 3 *) reg B_po_fine_inc /* synthesis syn_maxfan = 3 */; reg B_po_coarse_inc; reg B_po_sel_fine_oclk_delay; reg B_po_counter_load_en; reg [8:0] B_po_counter_load_val; wire B_rclk; reg B_idelay_ce; reg B_idelay_ld; reg C_pi_fine_inc; reg D_pi_fine_inc; reg C_pi_fine_enable; reg D_pi_fine_enable; reg C_po_counter_load_en; reg D_po_counter_load_en; reg C_po_coarse_inc; reg D_po_coarse_inc; (* keep = "true", max_fanout = 3 *) reg C_po_fine_inc /* synthesis syn_maxfan = 3 */; (* keep = "true", max_fanout = 3 *) reg D_po_fine_inc /* synthesis syn_maxfan = 3 */; reg C_po_sel_fine_oclk_delay; reg D_po_sel_fine_oclk_delay; reg [5:0] C_pi_counter_load_val; reg [5:0] D_pi_counter_load_val; reg [8:0] C_po_counter_load_val; reg [8:0] D_po_counter_load_val; reg C_po_coarse_enable; reg D_po_coarse_enable; reg C_po_fine_enable; reg D_po_fine_enable; wire C_po_coarse_overflow; wire D_po_coarse_overflow; wire C_po_fine_overflow; wire D_po_fine_overflow; wire [8:0] C_po_counter_read_val; wire [8:0] D_po_counter_read_val; reg C_po_counter_read_en; reg D_po_counter_read_en; wire C_pi_dqs_found; wire D_pi_dqs_found; wire C_pi_fine_overflow; wire D_pi_fine_overflow; reg C_pi_counter_read_en; reg D_pi_counter_read_en; reg C_pi_counter_load_en; reg D_pi_counter_load_en; wire C_pi_phase_locked; wire C_pi_iserdes_rst; wire D_pi_phase_locked; wire D_pi_iserdes_rst; wire C_pi_dqs_out_of_range; wire D_pi_dqs_out_of_range; wire [5:0] C_pi_counter_read_val; wire [5:0] D_pi_counter_read_val; wire C_rclk; wire D_rclk; reg C_idelay_ce; reg D_idelay_ce; reg C_idelay_ld; reg D_idelay_ld; reg C_pi_rst_dqs_find; reg D_pi_rst_dqs_find; wire pi_iserdes_rst; wire A_if_empty; wire B_if_empty; wire C_if_empty; wire D_if_empty; wire A_byte_rd_en; wire B_byte_rd_en; wire C_byte_rd_en; wire D_byte_rd_en; wire A_if_a_empty; wire B_if_a_empty; wire C_if_a_empty; wire D_if_a_empty; wire A_if_full; wire B_if_full; wire C_if_full; wire D_if_full; wire A_of_empty; wire B_of_empty; wire C_of_empty; wire D_of_empty; wire A_of_full; wire B_of_full; wire C_of_full; wire D_of_full; wire A_of_ctl_full; wire B_of_ctl_full; wire C_of_ctl_full; wire D_of_ctl_full; wire A_of_data_full; wire B_of_data_full; wire C_of_data_full; wire D_of_data_full; wire A_of_a_full; wire B_of_a_full; wire C_of_a_full; wire D_of_a_full; wire A_pre_fifo_a_full; wire B_pre_fifo_a_full; wire C_pre_fifo_a_full; wire D_pre_fifo_a_full; wire A_of_ctl_a_full; wire B_of_ctl_a_full; wire C_of_ctl_a_full; wire D_of_ctl_a_full; wire A_of_data_a_full; wire B_of_data_a_full; wire C_of_data_a_full; wire D_of_data_a_full; wire A_pre_data_a_full; wire B_pre_data_a_full; wire C_pre_data_a_full; wire D_pre_data_a_full; wire [LP_DDR_CK_WIDTH*6-1:0] A_ddr_clk; // for generation wire [LP_DDR_CK_WIDTH*6-1:0] B_ddr_clk; // wire [LP_DDR_CK_WIDTH*6-1:0] C_ddr_clk; // wire [LP_DDR_CK_WIDTH*6-1:0] D_ddr_clk; // wire [3:0] dummy_data; wire [31:0] _phy_ctl_wd; wire [1:0] phy_encalib; assign pi_dqs_found_all = (! PRESENT_DATA_A | A_pi_dqs_found) & (! PRESENT_DATA_B | B_pi_dqs_found) & (! PRESENT_DATA_C | C_pi_dqs_found) & (! PRESENT_DATA_D | D_pi_dqs_found) ; assign pi_dqs_found_any = ( PRESENT_DATA_A & A_pi_dqs_found) | ( PRESENT_DATA_B & B_pi_dqs_found) | ( PRESENT_DATA_C & C_pi_dqs_found) | ( PRESENT_DATA_D & D_pi_dqs_found) ; assign pi_phase_locked_all = (! PRESENT_DATA_A | A_pi_phase_locked) & (! PRESENT_DATA_B | B_pi_phase_locked) & (! PRESENT_DATA_C | C_pi_phase_locked) & (! PRESENT_DATA_D | D_pi_phase_locked); wire dangling_inputs = (& dummy_data) & input_sink & 1'b0; // this reduces all constant 0 values to 1 signal // which is combined into another signals such that // the other signal isn't changed. The purpose // is to fake the tools into ignoring dangling inputs. // Because it is anded with 1'b0, the contributing signals // are folded as constants or trimmed. assign if_empty = !if_empty_def ? (A_if_empty | B_if_empty | C_if_empty | D_if_empty) : (A_if_empty & B_if_empty & C_if_empty & D_if_empty); assign byte_rd_en = !if_empty_def ? (A_byte_rd_en & B_byte_rd_en & C_byte_rd_en & D_byte_rd_en) : (A_byte_rd_en | B_byte_rd_en | C_byte_rd_en | D_byte_rd_en); assign if_empty_or = (A_if_empty | B_if_empty | C_if_empty | D_if_empty); assign if_empty_and = (A_if_empty & B_if_empty & C_if_empty & D_if_empty); assign if_a_empty = A_if_a_empty | B_if_a_empty | C_if_a_empty | D_if_a_empty; assign if_full = A_if_full | B_if_full | C_if_full | D_if_full ; assign of_empty = A_of_empty & B_of_empty & C_of_empty & D_of_empty; assign of_ctl_full = A_of_ctl_full | B_of_ctl_full | C_of_ctl_full | D_of_ctl_full ; assign of_data_full = A_of_data_full | B_of_data_full | C_of_data_full | D_of_data_full ; assign of_ctl_a_full = A_of_ctl_a_full | B_of_ctl_a_full | C_of_ctl_a_full | D_of_ctl_a_full ; assign of_data_a_full = A_of_data_a_full | B_of_data_a_full | C_of_data_a_full | D_of_data_a_full | dangling_inputs ; assign pre_data_a_full = A_pre_data_a_full | B_pre_data_a_full | C_pre_data_a_full | D_pre_data_a_full; function [79:0] part_select_80; input [319:0] vector; input [1:0] select; begin case (select) 2'b00 : part_select_80[79:0] = vector[1*80-1:0*80]; 2'b01 : part_select_80[79:0] = vector[2*80-1:1*80]; 2'b10 : part_select_80[79:0] = vector[3*80-1:2*80]; 2'b11 : part_select_80[79:0] = vector[4*80-1:3*80]; endcase end endfunction wire [319:0] phy_dout_remap; reg rst_out_trig = 1'b0; reg [31:0] rclk_delay; reg rst_edge1 = 1'b0; reg rst_edge2 = 1'b0; reg rst_edge3 = 1'b0; reg rst_edge_detect = 1'b0; wire rclk_; reg rst_out_start = 1'b0 ; reg rst_primitives=0; reg A_rst_primitives=0; reg B_rst_primitives=0; reg C_rst_primitives=0; reg D_rst_primitives=0; `ifdef USE_PHY_CONTROL_TEST wire [15:0] test_output; wire [15:0] test_input; wire [2:0] test_select=0; wire scan_enable = 0; `endif generate genvar i; if (RCLK_SELECT_LANE == "A") begin assign rclk_ = A_rclk; assign pi_iserdes_rst = A_pi_iserdes_rst; end else if (RCLK_SELECT_LANE == "B") begin assign rclk_ = B_rclk; assign pi_iserdes_rst = B_pi_iserdes_rst; end else if (RCLK_SELECT_LANE == "C") begin assign rclk_ = C_rclk; assign pi_iserdes_rst = C_pi_iserdes_rst; end else if (RCLK_SELECT_LANE == "D") begin assign rclk_ = D_rclk; assign pi_iserdes_rst = D_pi_iserdes_rst; end else begin assign rclk_ = B_rclk; // default end endgenerate assign ddr_clk[LP_DDR_CK_WIDTH*6-1:0] = A_ddr_clk; assign ddr_clk[LP_DDR_CK_WIDTH*12-1:LP_DDR_CK_WIDTH*6] = B_ddr_clk; assign ddr_clk[LP_DDR_CK_WIDTH*18-1:LP_DDR_CK_WIDTH*12] = C_ddr_clk; assign ddr_clk[LP_DDR_CK_WIDTH*24-1:LP_DDR_CK_WIDTH*18] = D_ddr_clk; assign pi_phase_locked_lanes = {(! PRESENT_DATA_A[0] | A_pi_phase_locked), (! PRESENT_DATA_B[0] | B_pi_phase_locked) , (! PRESENT_DATA_C[0] | C_pi_phase_locked) , (! PRESENT_DATA_D[0] | D_pi_phase_locked)}; assign pi_dqs_found_lanes = {D_pi_dqs_found, C_pi_dqs_found, B_pi_dqs_found, A_pi_dqs_found}; // this block scrubs X from rclk_delay[11] reg rclk_delay_11; always @(rclk_delay[11]) begin : rclk_delay_11_blk if ( rclk_delay[11]) rclk_delay_11 = 1; else rclk_delay_11 = 0; end always @(posedge phy_clk or posedge rst ) begin // scrub 4-state values from rclk_delay[11] if ( rst) begin rst_out <= #1 0; end else begin if ( rclk_delay_11) rst_out <= #1 1; end end always @(posedge phy_clk ) begin // phy_ctl_ready drives reset of the system rst_primitives <= !phy_ctl_ready ; A_rst_primitives <= rst_primitives ; B_rst_primitives <= rst_primitives ; C_rst_primitives <= rst_primitives ; D_rst_primitives <= rst_primitives ; rclk_delay <= #1 (rclk_delay << 1) | (!rst_primitives && phyGo); mcGo <= #1 rst_out ; end generate if (BYTE_LANES[0]) begin assign dummy_data[0] = 0; end else begin assign dummy_data[0] = &phy_dout_remap[1*80-1:0*80]; end if (BYTE_LANES[1]) begin assign dummy_data[1] = 0; end else begin assign dummy_data[1] = &phy_dout_remap[2*80-1:1*80]; end if (BYTE_LANES[2]) begin assign dummy_data[2] = 0; end else begin assign dummy_data[2] = &phy_dout_remap[3*80-1:2*80]; end if (BYTE_LANES[3]) begin assign dummy_data[3] = 0; end else begin assign dummy_data[3] = &phy_dout_remap[4*80-1:3*80]; end if (PRESENT_DATA_A) begin assign A_of_data_full = A_of_full; assign A_of_ctl_full = 0; assign A_of_data_a_full = A_of_a_full; assign A_of_ctl_a_full = 0; assign A_pre_data_a_full = A_pre_fifo_a_full; end else begin assign A_of_ctl_full = A_of_full; assign A_of_data_full = 0; assign A_of_ctl_a_full = A_of_a_full; assign A_of_data_a_full = 0; assign A_pre_data_a_full = 0; end if (PRESENT_DATA_B) begin assign B_of_data_full = B_of_full; assign B_of_ctl_full = 0; assign B_of_data_a_full = B_of_a_full; assign B_of_ctl_a_full = 0; assign B_pre_data_a_full = B_pre_fifo_a_full; end else begin assign B_of_ctl_full = B_of_full; assign B_of_data_full = 0; assign B_of_ctl_a_full = B_of_a_full; assign B_of_data_a_full = 0; assign B_pre_data_a_full = 0; end if (PRESENT_DATA_C) begin assign C_of_data_full = C_of_full; assign C_of_ctl_full = 0; assign C_of_data_a_full = C_of_a_full; assign C_of_ctl_a_full = 0; assign C_pre_data_a_full = C_pre_fifo_a_full; end else begin assign C_of_ctl_full = C_of_full; assign C_of_data_full = 0; assign C_of_ctl_a_full = C_of_a_full; assign C_of_data_a_full = 0; assign C_pre_data_a_full = 0; end if (PRESENT_DATA_D) begin assign D_of_data_full = D_of_full; assign D_of_ctl_full = 0; assign D_of_data_a_full = D_of_a_full; assign D_of_ctl_a_full = 0; assign D_pre_data_a_full = D_pre_fifo_a_full; end else begin assign D_of_ctl_full = D_of_full; assign D_of_data_full = 0; assign D_of_ctl_a_full = D_of_a_full; assign D_of_data_a_full = 0; assign D_pre_data_a_full = 0; end // byte lane must exist and be data lane. if (PRESENT_DATA_A ) case ( LANE_REMAP[1:0] ) 2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[79:0]; 2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[79:0]; 2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[79:0]; 2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[79:0]; endcase else case ( LANE_REMAP[1:0] ) 2'b00 : assign phy_din[1*80-1:0] = 80'h0; 2'b01 : assign phy_din[2*80-1:80] = 80'h0; 2'b10 : assign phy_din[3*80-1:160] = 80'h0; 2'b11 : assign phy_din[4*80-1:240] = 80'h0; endcase if (PRESENT_DATA_B ) case ( LANE_REMAP[5:4] ) 2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[159:80]; 2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[159:80]; 2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[159:80]; 2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[159:80]; endcase else if (HIGHEST_LANE > 1) case ( LANE_REMAP[5:4] ) 2'b00 : assign phy_din[1*80-1:0] = 80'h0; 2'b01 : assign phy_din[2*80-1:80] = 80'h0; 2'b10 : assign phy_din[3*80-1:160] = 80'h0; 2'b11 : assign phy_din[4*80-1:240] = 80'h0; endcase if (PRESENT_DATA_C) case ( LANE_REMAP[9:8] ) 2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[239:160]; 2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[239:160]; 2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[239:160]; 2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[239:160]; endcase else if (HIGHEST_LANE > 2) case ( LANE_REMAP[9:8] ) 2'b00 : assign phy_din[1*80-1:0] = 80'h0; 2'b01 : assign phy_din[2*80-1:80] = 80'h0; 2'b10 : assign phy_din[3*80-1:160] = 80'h0; 2'b11 : assign phy_din[4*80-1:240] = 80'h0; endcase if (PRESENT_DATA_D ) case ( LANE_REMAP[13:12] ) 2'b00 : assign phy_din[1*80-1:0] = phy_din_remap[319:240]; 2'b01 : assign phy_din[2*80-1:80] = phy_din_remap[319:240]; 2'b10 : assign phy_din[3*80-1:160] = phy_din_remap[319:240]; 2'b11 : assign phy_din[4*80-1:240] = phy_din_remap[319:240]; endcase else if (HIGHEST_LANE > 3) case ( LANE_REMAP[13:12] ) 2'b00 : assign phy_din[1*80-1:0] = 80'h0; 2'b01 : assign phy_din[2*80-1:80] = 80'h0; 2'b10 : assign phy_din[3*80-1:160] = 80'h0; 2'b11 : assign phy_din[4*80-1:240] = 80'h0; endcase if (HIGHEST_LANE > 1) assign _phy_ctl_wd = {phy_ctl_wd[31:23], data_offset, phy_ctl_wd[16:0]}; if (HIGHEST_LANE == 1) assign _phy_ctl_wd_ = phy_ctl_wd; //BUFR #(.BUFR_DIVIDE ("1")) rclk_buf(.I(rclk_), .O(rclk), .CE (1'b1), .CLR (pi_iserdes_rst)); BUFIO rclk_buf(.I(rclk_), .O(rclk) ); if ( BYTE_LANES[0] ) begin : ddr_byte_lane_A assign phy_dout_remap[79:0] = part_select_80(phy_dout, (LANE_REMAP[1:0])); mig_7series_v1_9_ddr_byte_lane # ( .ABCD ("A"), .PO_DATA_CTL (PC_DATA_CTL_N[0] ? "TRUE" : "FALSE"), .BITLANES (BITLANES[11:0]), .BITLANES_OUTONLY (BITLANES_OUTONLY[11:0]), .OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE), .OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE), .OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE), //.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE), //.OF_ARRAY_MODE (A_OF_ARRAY_MODE), //.IF_ARRAY_MODE (IF_ARRAY_MODE), .IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE), .IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE), .IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE), .IODELAY_GRP (IODELAY_GRP), .BANK_TYPE (BANK_TYPE), .BYTELANES_DDR_CK (BYTELANES_DDR_CK), .RCLK_SELECT_LANE (RCLK_SELECT_LANE), .USE_PRE_POST_FIFO (USE_PRE_POST_FIFO), .SYNTHESIS (SYNTHESIS), .TCK (TCK), .PC_CLK_RATIO (PC_CLK_RATIO), .PI_BURST_MODE (A_PI_BURST_MODE), .PI_CLKOUT_DIV (A_PI_CLKOUT_DIV), .PI_FREQ_REF_DIV (A_PI_FREQ_REF_DIV), .PI_FINE_DELAY (A_PI_FINE_DELAY), .PI_OUTPUT_CLK_SRC (A_PI_OUTPUT_CLK_SRC), .PI_SYNC_IN_DIV_RST (A_PI_SYNC_IN_DIV_RST), .PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET), .PO_CLKOUT_DIV (A_PO_CLKOUT_DIV), .PO_FINE_DELAY (A_PO_FINE_DELAY), .PO_COARSE_BYPASS (A_PO_COARSE_BYPASS), .PO_COARSE_DELAY (A_PO_COARSE_DELAY), .PO_OCLK_DELAY (A_PO_OCLK_DELAY), .PO_OCLKDELAY_INV (A_PO_OCLKDELAY_INV), .PO_OUTPUT_CLK_SRC (A_PO_OUTPUT_CLK_SRC), .PO_SYNC_IN_DIV_RST (A_PO_SYNC_IN_DIV_RST), .OSERDES_DATA_RATE (A_OS_DATA_RATE), .OSERDES_DATA_WIDTH (A_OS_DATA_WIDTH), .IDELAYE2_IDELAY_TYPE (A_IDELAYE2_IDELAY_TYPE), .IDELAYE2_IDELAY_VALUE (A_IDELAYE2_IDELAY_VALUE) ,.CKE_ODT_AUX (CKE_ODT_AUX) ) ddr_byte_lane_A( .mem_dq_out (mem_dq_out[11:0]), .mem_dq_ts (mem_dq_ts[11:0]), .mem_dq_in (mem_dq_in[9:0]), .mem_dqs_out (mem_dqs_out[0]), .mem_dqs_ts (mem_dqs_ts[0]), .mem_dqs_in (mem_dqs_in[0]), .rst (A_rst_primitives), .phy_clk (phy_clk), .freq_refclk (freq_refclk), .mem_refclk (mem_refclk), .idelayctrl_refclk (idelayctrl_refclk), .sync_pulse (sync_pulse), .ddr_ck_out (A_ddr_clk), .rclk (A_rclk), .pi_dqs_found (A_pi_dqs_found), .dqs_out_of_range (A_pi_dqs_out_of_range), .if_empty_def (if_empty_def), .if_a_empty (A_if_a_empty), .if_empty (A_if_empty), .if_a_full (if_a_full), .if_full (A_if_full), .of_a_empty (of_a_empty), .of_empty (A_of_empty), .of_a_full (A_of_a_full), .of_full (A_of_full), .pre_fifo_a_full (A_pre_fifo_a_full), .phy_din (phy_din_remap[79:0]), .phy_dout (phy_dout_remap[79:0]), .phy_cmd_wr_en (phy_cmd_wr_en), .phy_data_wr_en (phy_data_wr_en), .phy_rd_en (phy_rd_en), .phaser_ctl_bus (phaser_ctl_bus), .if_rst (if_rst), .byte_rd_en_oth_lanes ({B_byte_rd_en,C_byte_rd_en,D_byte_rd_en}), .byte_rd_en_oth_banks (byte_rd_en_oth_banks), .byte_rd_en (A_byte_rd_en), // calibration signals .idelay_inc (idelay_inc), .idelay_ce (A_idelay_ce), .idelay_ld (A_idelay_ld), .pi_rst_dqs_find (A_pi_rst_dqs_find), .po_en_calib (phy_encalib), .po_fine_enable (A_po_fine_enable), .po_coarse_enable (A_po_coarse_enable), .po_fine_inc (A_po_fine_inc), .po_coarse_inc (A_po_coarse_inc), .po_counter_load_en (A_po_counter_load_en), .po_counter_read_en (A_po_counter_read_en), .po_counter_load_val (A_po_counter_load_val), .po_coarse_overflow (A_po_coarse_overflow), .po_fine_overflow (A_po_fine_overflow), .po_counter_read_val (A_po_counter_read_val), .po_sel_fine_oclk_delay(A_po_sel_fine_oclk_delay), .pi_en_calib (phy_encalib), .pi_fine_enable (A_pi_fine_enable), .pi_fine_inc (A_pi_fine_inc), .pi_counter_load_en (A_pi_counter_load_en), .pi_counter_read_en (A_pi_counter_read_en), .pi_counter_load_val (A_pi_counter_load_val), .pi_fine_overflow (A_pi_fine_overflow), .pi_counter_read_val (A_pi_counter_read_val), .pi_iserdes_rst (A_pi_iserdes_rst), .pi_phase_locked (A_pi_phase_locked) ); end else begin : no_ddr_byte_lane_A assign A_of_a_full = 1'b0; assign A_of_full = 1'b0; assign A_pre_fifo_a_full = 1'b0; assign A_if_empty = 1'b0; assign A_byte_rd_en = 1'b1; assign A_if_a_empty = 1'b0; assign A_pi_phase_locked = 1; assign A_pi_dqs_found = 1; assign A_rclk = 0; assign A_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}}; assign A_pi_counter_read_val = 0; assign A_po_counter_read_val = 0; assign A_pi_fine_overflow = 0; assign A_po_coarse_overflow = 0; assign A_po_fine_overflow = 0; end if ( BYTE_LANES[1] ) begin : ddr_byte_lane_B assign phy_dout_remap[159:80] = part_select_80(phy_dout, (LANE_REMAP[5:4])); mig_7series_v1_9_ddr_byte_lane # ( .ABCD ("B"), .PO_DATA_CTL (PC_DATA_CTL_N[1] ? "TRUE" : "FALSE"), .BITLANES (BITLANES[23:12]), .BITLANES_OUTONLY (BITLANES_OUTONLY[23:12]), .OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE), .OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE), .OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE), //.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE), //.OF_ARRAY_MODE (B_OF_ARRAY_MODE), //.IF_ARRAY_MODE (IF_ARRAY_MODE), .IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE), .IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE), .IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE), .IODELAY_GRP (IODELAY_GRP), .BANK_TYPE (BANK_TYPE), .BYTELANES_DDR_CK (BYTELANES_DDR_CK), .RCLK_SELECT_LANE (RCLK_SELECT_LANE), .USE_PRE_POST_FIFO (USE_PRE_POST_FIFO), .SYNTHESIS (SYNTHESIS), .TCK (TCK), .PC_CLK_RATIO (PC_CLK_RATIO), .PI_BURST_MODE (B_PI_BURST_MODE), .PI_CLKOUT_DIV (B_PI_CLKOUT_DIV), .PI_FREQ_REF_DIV (B_PI_FREQ_REF_DIV), .PI_FINE_DELAY (B_PI_FINE_DELAY), .PI_OUTPUT_CLK_SRC (B_PI_OUTPUT_CLK_SRC), .PI_SYNC_IN_DIV_RST (B_PI_SYNC_IN_DIV_RST), .PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET), .PO_CLKOUT_DIV (B_PO_CLKOUT_DIV), .PO_FINE_DELAY (B_PO_FINE_DELAY), .PO_COARSE_BYPASS (B_PO_COARSE_BYPASS), .PO_COARSE_DELAY (B_PO_COARSE_DELAY), .PO_OCLK_DELAY (B_PO_OCLK_DELAY), .PO_OCLKDELAY_INV (B_PO_OCLKDELAY_INV), .PO_OUTPUT_CLK_SRC (B_PO_OUTPUT_CLK_SRC), .PO_SYNC_IN_DIV_RST (B_PO_SYNC_IN_DIV_RST), .OSERDES_DATA_RATE (B_OS_DATA_RATE), .OSERDES_DATA_WIDTH (B_OS_DATA_WIDTH), .IDELAYE2_IDELAY_TYPE (B_IDELAYE2_IDELAY_TYPE), .IDELAYE2_IDELAY_VALUE (B_IDELAYE2_IDELAY_VALUE) ,.CKE_ODT_AUX (CKE_ODT_AUX) ) ddr_byte_lane_B( .mem_dq_out (mem_dq_out[23:12]), .mem_dq_ts (mem_dq_ts[23:12]), .mem_dq_in (mem_dq_in[19:10]), .mem_dqs_out (mem_dqs_out[1]), .mem_dqs_ts (mem_dqs_ts[1]), .mem_dqs_in (mem_dqs_in[1]), .rst (B_rst_primitives), .phy_clk (phy_clk), .freq_refclk (freq_refclk), .mem_refclk (mem_refclk), .idelayctrl_refclk (idelayctrl_refclk), .sync_pulse (sync_pulse), .ddr_ck_out (B_ddr_clk), .rclk (B_rclk), .pi_dqs_found (B_pi_dqs_found), .dqs_out_of_range (B_pi_dqs_out_of_range), .if_empty_def (if_empty_def), .if_a_empty (B_if_a_empty), .if_empty (B_if_empty), .if_a_full (/*if_a_full*/), .if_full (B_if_full), .of_a_empty (/*of_a_empty*/), .of_empty (B_of_empty), .of_a_full (B_of_a_full), .of_full (B_of_full), .pre_fifo_a_full (B_pre_fifo_a_full), .phy_din (phy_din_remap[159:80]), .phy_dout (phy_dout_remap[159:80]), .phy_cmd_wr_en (phy_cmd_wr_en), .phy_data_wr_en (phy_data_wr_en), .phy_rd_en (phy_rd_en), .phaser_ctl_bus (phaser_ctl_bus), .if_rst (if_rst), .byte_rd_en_oth_lanes ({A_byte_rd_en,C_byte_rd_en,D_byte_rd_en}), .byte_rd_en_oth_banks (byte_rd_en_oth_banks), .byte_rd_en (B_byte_rd_en), // calibration signals .idelay_inc (idelay_inc), .idelay_ce (B_idelay_ce), .idelay_ld (B_idelay_ld), .pi_rst_dqs_find (B_pi_rst_dqs_find), .po_en_calib (phy_encalib), .po_fine_enable (B_po_fine_enable), .po_coarse_enable (B_po_coarse_enable), .po_fine_inc (B_po_fine_inc), .po_coarse_inc (B_po_coarse_inc), .po_counter_load_en (B_po_counter_load_en), .po_counter_read_en (B_po_counter_read_en), .po_counter_load_val (B_po_counter_load_val), .po_coarse_overflow (B_po_coarse_overflow), .po_fine_overflow (B_po_fine_overflow), .po_counter_read_val (B_po_counter_read_val), .po_sel_fine_oclk_delay(B_po_sel_fine_oclk_delay), .pi_en_calib (phy_encalib), .pi_fine_enable (B_pi_fine_enable), .pi_fine_inc (B_pi_fine_inc), .pi_counter_load_en (B_pi_counter_load_en), .pi_counter_read_en (B_pi_counter_read_en), .pi_counter_load_val (B_pi_counter_load_val), .pi_fine_overflow (B_pi_fine_overflow), .pi_counter_read_val (B_pi_counter_read_val), .pi_iserdes_rst (B_pi_iserdes_rst), .pi_phase_locked (B_pi_phase_locked) ); end else begin : no_ddr_byte_lane_B assign B_of_a_full = 1'b0; assign B_of_full = 1'b0; assign B_pre_fifo_a_full = 1'b0; assign B_if_empty = 1'b0; assign B_if_a_empty = 1'b0; assign B_byte_rd_en = 1'b1; assign B_pi_phase_locked = 1; assign B_pi_dqs_found = 1; assign B_rclk = 0; assign B_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}}; assign B_pi_counter_read_val = 0; assign B_po_counter_read_val = 0; assign B_pi_fine_overflow = 0; assign B_po_coarse_overflow = 0; assign B_po_fine_overflow = 0; end if ( BYTE_LANES[2] ) begin : ddr_byte_lane_C assign phy_dout_remap[239:160] = part_select_80(phy_dout, (LANE_REMAP[9:8])); mig_7series_v1_9_ddr_byte_lane # ( .ABCD ("C"), .PO_DATA_CTL (PC_DATA_CTL_N[2] ? "TRUE" : "FALSE"), .BITLANES (BITLANES[35:24]), .BITLANES_OUTONLY (BITLANES_OUTONLY[35:24]), .OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE), .OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE), .OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE), //.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE), //.OF_ARRAY_MODE (C_OF_ARRAY_MODE), //.IF_ARRAY_MODE (IF_ARRAY_MODE), .IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE), .IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE), .IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE), .IODELAY_GRP (IODELAY_GRP), .BANK_TYPE (BANK_TYPE), .BYTELANES_DDR_CK (BYTELANES_DDR_CK), .RCLK_SELECT_LANE (RCLK_SELECT_LANE), .USE_PRE_POST_FIFO (USE_PRE_POST_FIFO), .SYNTHESIS (SYNTHESIS), .TCK (TCK), .PC_CLK_RATIO (PC_CLK_RATIO), .PI_BURST_MODE (C_PI_BURST_MODE), .PI_CLKOUT_DIV (C_PI_CLKOUT_DIV), .PI_FREQ_REF_DIV (C_PI_FREQ_REF_DIV), .PI_FINE_DELAY (C_PI_FINE_DELAY), .PI_OUTPUT_CLK_SRC (C_PI_OUTPUT_CLK_SRC), .PI_SYNC_IN_DIV_RST (C_PI_SYNC_IN_DIV_RST), .PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET), .PO_CLKOUT_DIV (C_PO_CLKOUT_DIV), .PO_FINE_DELAY (C_PO_FINE_DELAY), .PO_COARSE_BYPASS (C_PO_COARSE_BYPASS), .PO_COARSE_DELAY (C_PO_COARSE_DELAY), .PO_OCLK_DELAY (C_PO_OCLK_DELAY), .PO_OCLKDELAY_INV (C_PO_OCLKDELAY_INV), .PO_OUTPUT_CLK_SRC (C_PO_OUTPUT_CLK_SRC), .PO_SYNC_IN_DIV_RST (C_PO_SYNC_IN_DIV_RST), .OSERDES_DATA_RATE (C_OS_DATA_RATE), .OSERDES_DATA_WIDTH (C_OS_DATA_WIDTH), .IDELAYE2_IDELAY_TYPE (C_IDELAYE2_IDELAY_TYPE), .IDELAYE2_IDELAY_VALUE (C_IDELAYE2_IDELAY_VALUE) ,.CKE_ODT_AUX (CKE_ODT_AUX) ) ddr_byte_lane_C( .mem_dq_out (mem_dq_out[35:24]), .mem_dq_ts (mem_dq_ts[35:24]), .mem_dq_in (mem_dq_in[29:20]), .mem_dqs_out (mem_dqs_out[2]), .mem_dqs_ts (mem_dqs_ts[2]), .mem_dqs_in (mem_dqs_in[2]), .rst (C_rst_primitives), .phy_clk (phy_clk), .freq_refclk (freq_refclk), .mem_refclk (mem_refclk), .idelayctrl_refclk (idelayctrl_refclk), .sync_pulse (sync_pulse), .ddr_ck_out (C_ddr_clk), .rclk (C_rclk), .pi_dqs_found (C_pi_dqs_found), .dqs_out_of_range (C_pi_dqs_out_of_range), .if_empty_def (if_empty_def), .if_a_empty (C_if_a_empty), .if_empty (C_if_empty), .if_a_full (/*if_a_full*/), .if_full (C_if_full), .of_a_empty (/*of_a_empty*/), .of_empty (C_of_empty), .of_a_full (C_of_a_full), .of_full (C_of_full), .pre_fifo_a_full (C_pre_fifo_a_full), .phy_din (phy_din_remap[239:160]), .phy_dout (phy_dout_remap[239:160]), .phy_cmd_wr_en (phy_cmd_wr_en), .phy_data_wr_en (phy_data_wr_en), .phy_rd_en (phy_rd_en), .phaser_ctl_bus (phaser_ctl_bus), .if_rst (if_rst), .byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,D_byte_rd_en}), .byte_rd_en_oth_banks (byte_rd_en_oth_banks), .byte_rd_en (C_byte_rd_en), // calibration signals .idelay_inc (idelay_inc), .idelay_ce (C_idelay_ce), .idelay_ld (C_idelay_ld), .pi_rst_dqs_find (C_pi_rst_dqs_find), .po_en_calib (phy_encalib), .po_fine_enable (C_po_fine_enable), .po_coarse_enable (C_po_coarse_enable), .po_fine_inc (C_po_fine_inc), .po_coarse_inc (C_po_coarse_inc), .po_counter_load_en (C_po_counter_load_en), .po_counter_read_en (C_po_counter_read_en), .po_counter_load_val (C_po_counter_load_val), .po_coarse_overflow (C_po_coarse_overflow), .po_fine_overflow (C_po_fine_overflow), .po_counter_read_val (C_po_counter_read_val), .po_sel_fine_oclk_delay(C_po_sel_fine_oclk_delay), .pi_en_calib (phy_encalib), .pi_fine_enable (C_pi_fine_enable), .pi_fine_inc (C_pi_fine_inc), .pi_counter_load_en (C_pi_counter_load_en), .pi_counter_read_en (C_pi_counter_read_en), .pi_counter_load_val (C_pi_counter_load_val), .pi_fine_overflow (C_pi_fine_overflow), .pi_counter_read_val (C_pi_counter_read_val), .pi_iserdes_rst (C_pi_iserdes_rst), .pi_phase_locked (C_pi_phase_locked) ); end else begin : no_ddr_byte_lane_C assign C_of_a_full = 1'b0; assign C_of_full = 1'b0; assign C_pre_fifo_a_full = 1'b0; assign C_if_empty = 1'b0; assign C_byte_rd_en = 1'b1; assign C_if_a_empty = 1'b0; assign C_pi_phase_locked = 1; assign C_pi_dqs_found = 1; assign C_rclk = 0; assign C_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}}; assign C_pi_counter_read_val = 0; assign C_po_counter_read_val = 0; assign C_pi_fine_overflow = 0; assign C_po_coarse_overflow = 0; assign C_po_fine_overflow = 0; end if ( BYTE_LANES[3] ) begin : ddr_byte_lane_D assign phy_dout_remap[319:240] = part_select_80(phy_dout, (LANE_REMAP[13:12])); mig_7series_v1_9_ddr_byte_lane # ( .ABCD ("D"), .PO_DATA_CTL (PC_DATA_CTL_N[3] ? "TRUE" : "FALSE"), .BITLANES (BITLANES[47:36]), .BITLANES_OUTONLY (BITLANES_OUTONLY[47:36]), .OF_ALMOST_EMPTY_VALUE (OF_ALMOST_EMPTY_VALUE), .OF_ALMOST_FULL_VALUE (OF_ALMOST_FULL_VALUE), .OF_SYNCHRONOUS_MODE (OF_SYNCHRONOUS_MODE), //.OF_OUTPUT_DISABLE (OF_OUTPUT_DISABLE), //.OF_ARRAY_MODE (D_OF_ARRAY_MODE), //.IF_ARRAY_MODE (IF_ARRAY_MODE), .IF_ALMOST_EMPTY_VALUE (IF_ALMOST_EMPTY_VALUE), .IF_ALMOST_FULL_VALUE (IF_ALMOST_FULL_VALUE), .IF_SYNCHRONOUS_MODE (IF_SYNCHRONOUS_MODE), .IODELAY_GRP (IODELAY_GRP), .BANK_TYPE (BANK_TYPE), .BYTELANES_DDR_CK (BYTELANES_DDR_CK), .RCLK_SELECT_LANE (RCLK_SELECT_LANE), .USE_PRE_POST_FIFO (USE_PRE_POST_FIFO), .SYNTHESIS (SYNTHESIS), .TCK (TCK), .PC_CLK_RATIO (PC_CLK_RATIO), .PI_BURST_MODE (D_PI_BURST_MODE), .PI_CLKOUT_DIV (D_PI_CLKOUT_DIV), .PI_FREQ_REF_DIV (D_PI_FREQ_REF_DIV), .PI_FINE_DELAY (D_PI_FINE_DELAY), .PI_OUTPUT_CLK_SRC (D_PI_OUTPUT_CLK_SRC), .PI_SYNC_IN_DIV_RST (D_PI_SYNC_IN_DIV_RST), .PI_SEL_CLK_OFFSET (PI_SEL_CLK_OFFSET), .PO_CLKOUT_DIV (D_PO_CLKOUT_DIV), .PO_FINE_DELAY (D_PO_FINE_DELAY), .PO_COARSE_BYPASS (D_PO_COARSE_BYPASS), .PO_COARSE_DELAY (D_PO_COARSE_DELAY), .PO_OCLK_DELAY (D_PO_OCLK_DELAY), .PO_OCLKDELAY_INV (D_PO_OCLKDELAY_INV), .PO_OUTPUT_CLK_SRC (D_PO_OUTPUT_CLK_SRC), .PO_SYNC_IN_DIV_RST (D_PO_SYNC_IN_DIV_RST), .OSERDES_DATA_RATE (D_OS_DATA_RATE), .OSERDES_DATA_WIDTH (D_OS_DATA_WIDTH), .IDELAYE2_IDELAY_TYPE (D_IDELAYE2_IDELAY_TYPE), .IDELAYE2_IDELAY_VALUE (D_IDELAYE2_IDELAY_VALUE) ,.CKE_ODT_AUX (CKE_ODT_AUX) ) ddr_byte_lane_D( .mem_dq_out (mem_dq_out[47:36]), .mem_dq_ts (mem_dq_ts[47:36]), .mem_dq_in (mem_dq_in[39:30]), .mem_dqs_out (mem_dqs_out[3]), .mem_dqs_ts (mem_dqs_ts[3]), .mem_dqs_in (mem_dqs_in[3]), .rst (D_rst_primitives), .phy_clk (phy_clk), .freq_refclk (freq_refclk), .mem_refclk (mem_refclk), .idelayctrl_refclk (idelayctrl_refclk), .sync_pulse (sync_pulse), .ddr_ck_out (D_ddr_clk), .rclk (D_rclk), .pi_dqs_found (D_pi_dqs_found), .dqs_out_of_range (D_pi_dqs_out_of_range), .if_empty_def (if_empty_def), .if_a_empty (D_if_a_empty), .if_empty (D_if_empty), .if_a_full (/*if_a_full*/), .if_full (D_if_full), .of_a_empty (/*of_a_empty*/), .of_empty (D_of_empty), .of_a_full (D_of_a_full), .of_full (D_of_full), .pre_fifo_a_full (D_pre_fifo_a_full), .phy_din (phy_din_remap[319:240]), .phy_dout (phy_dout_remap[319:240]), .phy_cmd_wr_en (phy_cmd_wr_en), .phy_data_wr_en (phy_data_wr_en), .phy_rd_en (phy_rd_en), .phaser_ctl_bus (phaser_ctl_bus), .idelay_inc (idelay_inc), .idelay_ce (D_idelay_ce), .idelay_ld (D_idelay_ld), .if_rst (if_rst), .byte_rd_en_oth_lanes ({A_byte_rd_en,B_byte_rd_en,C_byte_rd_en}), .byte_rd_en_oth_banks (byte_rd_en_oth_banks), .byte_rd_en (D_byte_rd_en), // calibration signals .pi_rst_dqs_find (D_pi_rst_dqs_find), .po_en_calib (phy_encalib), .po_fine_enable (D_po_fine_enable), .po_coarse_enable (D_po_coarse_enable), .po_fine_inc (D_po_fine_inc), .po_coarse_inc (D_po_coarse_inc), .po_counter_load_en (D_po_counter_load_en), .po_counter_read_en (D_po_counter_read_en), .po_counter_load_val (D_po_counter_load_val), .po_coarse_overflow (D_po_coarse_overflow), .po_fine_overflow (D_po_fine_overflow), .po_counter_read_val (D_po_counter_read_val), .po_sel_fine_oclk_delay(D_po_sel_fine_oclk_delay), .pi_en_calib (phy_encalib), .pi_fine_enable (D_pi_fine_enable), .pi_fine_inc (D_pi_fine_inc), .pi_counter_load_en (D_pi_counter_load_en), .pi_counter_read_en (D_pi_counter_read_en), .pi_counter_load_val (D_pi_counter_load_val), .pi_fine_overflow (D_pi_fine_overflow), .pi_counter_read_val (D_pi_counter_read_val), .pi_iserdes_rst (D_pi_iserdes_rst), .pi_phase_locked (D_pi_phase_locked) ); end else begin : no_ddr_byte_lane_D assign D_of_a_full = 1'b0; assign D_of_full = 1'b0; assign D_pre_fifo_a_full = 1'b0; assign D_if_empty = 1'b0; assign D_byte_rd_en = 1'b1; assign D_if_a_empty = 1'b0; assign D_rclk = 0; assign D_ddr_clk = {LP_DDR_CK_WIDTH*6{1'b0}}; assign D_pi_dqs_found = 1; assign D_pi_phase_locked = 1; assign D_pi_counter_read_val = 0; assign D_po_counter_read_val = 0; assign D_pi_fine_overflow = 0; assign D_po_coarse_overflow = 0; assign D_po_fine_overflow = 0; end endgenerate assign phaser_ctl_bus[MSB_RANK_SEL_I : MSB_RANK_SEL_I - 7] = in_rank; PHY_CONTROL #( .AO_WRLVL_EN ( PC_AO_WRLVL_EN), .AO_TOGGLE ( PC_AO_TOGGLE), .BURST_MODE ( PC_BURST_MODE), .CO_DURATION ( PC_CO_DURATION ), .CLK_RATIO ( PC_CLK_RATIO), .DATA_CTL_A_N ( PC_DATA_CTL_A), .DATA_CTL_B_N ( PC_DATA_CTL_B), .DATA_CTL_C_N ( PC_DATA_CTL_C), .DATA_CTL_D_N ( PC_DATA_CTL_D), .DI_DURATION ( PC_DI_DURATION ), .DO_DURATION ( PC_DO_DURATION ), .EVENTS_DELAY ( PC_EVENTS_DELAY), .FOUR_WINDOW_CLOCKS ( PC_FOUR_WINDOW_CLOCKS), .MULTI_REGION ( PC_MULTI_REGION ), .PHY_COUNT_ENABLE ( PC_PHY_COUNT_EN), .DISABLE_SEQ_MATCH ( PC_DISABLE_SEQ_MATCH), .SYNC_MODE ( PC_SYNC_MODE), .CMD_OFFSET ( PC_CMD_OFFSET), .RD_CMD_OFFSET_0 ( PC_RD_CMD_OFFSET_0), .RD_CMD_OFFSET_1 ( PC_RD_CMD_OFFSET_1), .RD_CMD_OFFSET_2 ( PC_RD_CMD_OFFSET_2), .RD_CMD_OFFSET_3 ( PC_RD_CMD_OFFSET_3), .RD_DURATION_0 ( PC_RD_DURATION_0), .RD_DURATION_1 ( PC_RD_DURATION_1), .RD_DURATION_2 ( PC_RD_DURATION_2), .RD_DURATION_3 ( PC_RD_DURATION_3), .WR_CMD_OFFSET_0 ( PC_WR_CMD_OFFSET_0), .WR_CMD_OFFSET_1 ( PC_WR_CMD_OFFSET_1), .WR_CMD_OFFSET_2 ( PC_WR_CMD_OFFSET_2), .WR_CMD_OFFSET_3 ( PC_WR_CMD_OFFSET_3), .WR_DURATION_0 ( PC_WR_DURATION_0), .WR_DURATION_1 ( PC_WR_DURATION_1), .WR_DURATION_2 ( PC_WR_DURATION_2), .WR_DURATION_3 ( PC_WR_DURATION_3) ) phy_control_i ( .AUXOUTPUT (aux_out), .INBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PI:MSB_BURST_PEND_PI-3]), .INRANKA (in_rank[1:0]), .INRANKB (in_rank[3:2]), .INRANKC (in_rank[5:4]), .INRANKD (in_rank[7:6]), .OUTBURSTPENDING (phaser_ctl_bus[MSB_BURST_PEND_PO:MSB_BURST_PEND_PO-3]), .PCENABLECALIB (phy_encalib), .PHYCTLALMOSTFULL (phy_ctl_a_full), .PHYCTLEMPTY (phy_ctl_empty), .PHYCTLFULL (phy_ctl_full), .PHYCTLREADY (phy_ctl_ready), .MEMREFCLK (mem_refclk), .PHYCLK (phy_ctl_clk), .PHYCTLMSTREMPTY (phy_ctl_mstr_empty), .PHYCTLWD (_phy_ctl_wd), .PHYCTLWRENABLE (phy_ctl_wr), .PLLLOCK (pll_lock), .REFDLLLOCK (ref_dll_lock), // is reset while !locked .RESET (rst), .SYNCIN (sync_pulse), .READCALIBENABLE (phy_read_calib), .WRITECALIBENABLE (phy_write_calib) `ifdef USE_PHY_CONTROL_TEST , .TESTINPUT (16'b0), .TESTOUTPUT (test_output), .TESTSELECT (test_select), .SCANENABLEN (scan_enable) `endif ); // register outputs to give extra slack in timing always @(posedge phy_clk ) begin case (calib_sel[1:0]) 2'h0: begin po_coarse_overflow <= #1 A_po_coarse_overflow; po_fine_overflow <= #1 A_po_fine_overflow; po_counter_read_val <= #1 A_po_counter_read_val; pi_fine_overflow <= #1 A_pi_fine_overflow; pi_counter_read_val<= #1 A_pi_counter_read_val; pi_phase_locked <= #1 A_pi_phase_locked; if ( calib_in_common) pi_dqs_found <= #1 pi_dqs_found_any; else pi_dqs_found <= #1 A_pi_dqs_found; pi_dqs_out_of_range <= #1 A_pi_dqs_out_of_range; end 2'h1: begin po_coarse_overflow <= #1 B_po_coarse_overflow; po_fine_overflow <= #1 B_po_fine_overflow; po_counter_read_val <= #1 B_po_counter_read_val; pi_fine_overflow <= #1 B_pi_fine_overflow; pi_counter_read_val <= #1 B_pi_counter_read_val; pi_phase_locked <= #1 B_pi_phase_locked; if ( calib_in_common) pi_dqs_found <= #1 pi_dqs_found_any; else pi_dqs_found <= #1 B_pi_dqs_found; pi_dqs_out_of_range <= #1 B_pi_dqs_out_of_range; end 2'h2: begin po_coarse_overflow <= #1 C_po_coarse_overflow; po_fine_overflow <= #1 C_po_fine_overflow; po_counter_read_val <= #1 C_po_counter_read_val; pi_fine_overflow <= #1 C_pi_fine_overflow; pi_counter_read_val <= #1 C_pi_counter_read_val; pi_phase_locked <= #1 C_pi_phase_locked; if ( calib_in_common) pi_dqs_found <= #1 pi_dqs_found_any; else pi_dqs_found <= #1 C_pi_dqs_found; pi_dqs_out_of_range <= #1 C_pi_dqs_out_of_range; end 2'h3: begin po_coarse_overflow <= #1 D_po_coarse_overflow; po_fine_overflow <= #1 D_po_fine_overflow; po_counter_read_val <= #1 D_po_counter_read_val; pi_fine_overflow <= #1 D_pi_fine_overflow; pi_counter_read_val <= #1 D_pi_counter_read_val; pi_phase_locked <= #1 D_pi_phase_locked; if ( calib_in_common) pi_dqs_found <= #1 pi_dqs_found_any; else pi_dqs_found <= #1 D_pi_dqs_found; pi_dqs_out_of_range <= #1 D_pi_dqs_out_of_range; end default: begin po_coarse_overflow <= po_coarse_overflow; end endcase end wire B_mux_ctrl; wire C_mux_ctrl; wire D_mux_ctrl; generate if (HIGHEST_LANE > 1) assign B_mux_ctrl = ( !calib_zero_lanes[1] && ( ! calib_zero_ctrl || DATA_CTL_N[1])); else assign B_mux_ctrl = 0; if (HIGHEST_LANE > 2) assign C_mux_ctrl = ( !calib_zero_lanes[2] && (! calib_zero_ctrl || DATA_CTL_N[2])); else assign C_mux_ctrl = 0; if (HIGHEST_LANE > 3) assign D_mux_ctrl = ( !calib_zero_lanes[3] && ( ! calib_zero_ctrl || DATA_CTL_N[3])); else assign D_mux_ctrl = 0; endgenerate always @(*) begin A_pi_fine_enable = 0; A_pi_fine_inc = 0; A_pi_counter_load_en = 0; A_pi_counter_read_en = 0; A_pi_counter_load_val = 0; A_pi_rst_dqs_find = 0; A_po_fine_enable = 0; A_po_coarse_enable = 0; A_po_fine_inc = 0; A_po_coarse_inc = 0; A_po_counter_load_en = 0; A_po_counter_read_en = 0; A_po_counter_load_val = 0; A_po_sel_fine_oclk_delay = 0; A_idelay_ce = 0; A_idelay_ld = 0; B_pi_fine_enable = 0; B_pi_fine_inc = 0; B_pi_counter_load_en = 0; B_pi_counter_read_en = 0; B_pi_counter_load_val = 0; B_pi_rst_dqs_find = 0; B_po_fine_enable = 0; B_po_coarse_enable = 0; B_po_fine_inc = 0; B_po_coarse_inc = 0; B_po_counter_load_en = 0; B_po_counter_read_en = 0; B_po_counter_load_val = 0; B_po_sel_fine_oclk_delay = 0; B_idelay_ce = 0; B_idelay_ld = 0; C_pi_fine_enable = 0; C_pi_fine_inc = 0; C_pi_counter_load_en = 0; C_pi_counter_read_en = 0; C_pi_counter_load_val = 0; C_pi_rst_dqs_find = 0; C_po_fine_enable = 0; C_po_coarse_enable = 0; C_po_fine_inc = 0; C_po_coarse_inc = 0; C_po_counter_load_en = 0; C_po_counter_read_en = 0; C_po_counter_load_val = 0; C_po_sel_fine_oclk_delay = 0; C_idelay_ce = 0; C_idelay_ld = 0; D_pi_fine_enable = 0; D_pi_fine_inc = 0; D_pi_counter_load_en = 0; D_pi_counter_read_en = 0; D_pi_counter_load_val = 0; D_pi_rst_dqs_find = 0; D_po_fine_enable = 0; D_po_coarse_enable = 0; D_po_fine_inc = 0; D_po_coarse_inc = 0; D_po_counter_load_en = 0; D_po_counter_read_en = 0; D_po_counter_load_val = 0; D_po_sel_fine_oclk_delay = 0; D_idelay_ce = 0; D_idelay_ld = 0; if ( calib_sel[2]) begin // if this is asserted, all calib signals are deasserted A_pi_fine_enable = 0; A_pi_fine_inc = 0; A_pi_counter_load_en = 0; A_pi_counter_read_en = 0; A_pi_counter_load_val = 0; A_pi_rst_dqs_find = 0; A_po_fine_enable = 0; A_po_coarse_enable = 0; A_po_fine_inc = 0; A_po_coarse_inc = 0; A_po_counter_load_en = 0; A_po_counter_read_en = 0; A_po_counter_load_val = 0; A_po_sel_fine_oclk_delay = 0; A_idelay_ce = 0; A_idelay_ld = 0; B_pi_fine_enable = 0; B_pi_fine_inc = 0; B_pi_counter_load_en = 0; B_pi_counter_read_en = 0; B_pi_counter_load_val = 0; B_pi_rst_dqs_find = 0; B_po_fine_enable = 0; B_po_coarse_enable = 0; B_po_fine_inc = 0; B_po_coarse_inc = 0; B_po_counter_load_en = 0; B_po_counter_read_en = 0; B_po_counter_load_val = 0; B_po_sel_fine_oclk_delay = 0; B_idelay_ce = 0; B_idelay_ld = 0; C_pi_fine_enable = 0; C_pi_fine_inc = 0; C_pi_counter_load_en = 0; C_pi_counter_read_en = 0; C_pi_counter_load_val = 0; C_pi_rst_dqs_find = 0; C_po_fine_enable = 0; C_po_coarse_enable = 0; C_po_fine_inc = 0; C_po_coarse_inc = 0; C_po_counter_load_en = 0; C_po_counter_read_en = 0; C_po_counter_load_val = 0; C_po_sel_fine_oclk_delay = 0; C_idelay_ce = 0; C_idelay_ld = 0; D_pi_fine_enable = 0; D_pi_fine_inc = 0; D_pi_counter_load_en = 0; D_pi_counter_read_en = 0; D_pi_counter_load_val = 0; D_pi_rst_dqs_find = 0; D_po_fine_enable = 0; D_po_coarse_enable = 0; D_po_fine_inc = 0; D_po_coarse_inc = 0; D_po_counter_load_en = 0; D_po_counter_read_en = 0; D_po_counter_load_val = 0; D_po_sel_fine_oclk_delay = 0; D_idelay_ce = 0; D_idelay_ld = 0; end else if (calib_in_common) begin // if this is asserted, each signal is broadcast to all phasers // in common if ( !calib_zero_lanes[0] && (! calib_zero_ctrl || DATA_CTL_N[0])) begin A_pi_fine_enable = pi_fine_enable; A_pi_fine_inc = pi_fine_inc; A_pi_counter_load_en = pi_counter_load_en; A_pi_counter_read_en = pi_counter_read_en; A_pi_counter_load_val = pi_counter_load_val; A_pi_rst_dqs_find = pi_rst_dqs_find; A_po_fine_enable = po_fine_enable; A_po_coarse_enable = po_coarse_enable; A_po_fine_inc = po_fine_inc; A_po_coarse_inc = po_coarse_inc; A_po_counter_load_en = po_counter_load_en; A_po_counter_read_en = po_counter_read_en; A_po_counter_load_val = po_counter_load_val; A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; A_idelay_ce = idelay_ce; A_idelay_ld = idelay_ld; end if ( B_mux_ctrl) begin B_pi_fine_enable = pi_fine_enable; B_pi_fine_inc = pi_fine_inc; B_pi_counter_load_en = pi_counter_load_en; B_pi_counter_read_en = pi_counter_read_en; B_pi_counter_load_val = pi_counter_load_val; B_pi_rst_dqs_find = pi_rst_dqs_find; B_po_fine_enable = po_fine_enable; B_po_coarse_enable = po_coarse_enable; B_po_fine_inc = po_fine_inc; B_po_coarse_inc = po_coarse_inc; B_po_counter_load_en = po_counter_load_en; B_po_counter_read_en = po_counter_read_en; B_po_counter_load_val = po_counter_load_val; B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; B_idelay_ce = idelay_ce; B_idelay_ld = idelay_ld; end if ( C_mux_ctrl) begin C_pi_fine_enable = pi_fine_enable; C_pi_fine_inc = pi_fine_inc; C_pi_counter_load_en = pi_counter_load_en; C_pi_counter_read_en = pi_counter_read_en; C_pi_counter_load_val = pi_counter_load_val; C_pi_rst_dqs_find = pi_rst_dqs_find; C_po_fine_enable = po_fine_enable; C_po_coarse_enable = po_coarse_enable; C_po_fine_inc = po_fine_inc; C_po_coarse_inc = po_coarse_inc; C_po_counter_load_en = po_counter_load_en; C_po_counter_read_en = po_counter_read_en; C_po_counter_load_val = po_counter_load_val; C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; C_idelay_ce = idelay_ce; C_idelay_ld = idelay_ld; end if ( D_mux_ctrl) begin D_pi_fine_enable = pi_fine_enable; D_pi_fine_inc = pi_fine_inc; D_pi_counter_load_en = pi_counter_load_en; D_pi_counter_read_en = pi_counter_read_en; D_pi_counter_load_val = pi_counter_load_val; D_pi_rst_dqs_find = pi_rst_dqs_find; D_po_fine_enable = po_fine_enable; D_po_coarse_enable = po_coarse_enable; D_po_fine_inc = po_fine_inc; D_po_coarse_inc = po_coarse_inc; D_po_counter_load_en = po_counter_load_en; D_po_counter_read_en = po_counter_read_en; D_po_counter_load_val = po_counter_load_val; D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; D_idelay_ce = idelay_ce; D_idelay_ld = idelay_ld; end end else begin // otherwise, only a single phaser is selected case (calib_sel[1:0]) 0: begin A_pi_fine_enable = pi_fine_enable; A_pi_fine_inc = pi_fine_inc; A_pi_counter_load_en = pi_counter_load_en; A_pi_counter_read_en = pi_counter_read_en; A_pi_counter_load_val = pi_counter_load_val; A_pi_rst_dqs_find = pi_rst_dqs_find; A_po_fine_enable = po_fine_enable; A_po_coarse_enable = po_coarse_enable; A_po_fine_inc = po_fine_inc; A_po_coarse_inc = po_coarse_inc; A_po_counter_load_en = po_counter_load_en; A_po_counter_read_en = po_counter_read_en; A_po_counter_load_val = po_counter_load_val; A_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; A_idelay_ce = idelay_ce; A_idelay_ld = idelay_ld; end 1: begin B_pi_fine_enable = pi_fine_enable; B_pi_fine_inc = pi_fine_inc; B_pi_counter_load_en = pi_counter_load_en; B_pi_counter_read_en = pi_counter_read_en; B_pi_counter_load_val = pi_counter_load_val; B_pi_rst_dqs_find = pi_rst_dqs_find; B_po_fine_enable = po_fine_enable; B_po_coarse_enable = po_coarse_enable; B_po_fine_inc = po_fine_inc; B_po_coarse_inc = po_coarse_inc; B_po_counter_load_en = po_counter_load_en; B_po_counter_read_en = po_counter_read_en; B_po_counter_load_val = po_counter_load_val; B_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; B_idelay_ce = idelay_ce; B_idelay_ld = idelay_ld; end 2: begin C_pi_fine_enable = pi_fine_enable; C_pi_fine_inc = pi_fine_inc; C_pi_counter_load_en = pi_counter_load_en; C_pi_counter_read_en = pi_counter_read_en; C_pi_counter_load_val = pi_counter_load_val; C_pi_rst_dqs_find = pi_rst_dqs_find; C_po_fine_enable = po_fine_enable; C_po_coarse_enable = po_coarse_enable; C_po_fine_inc = po_fine_inc; C_po_coarse_inc = po_coarse_inc; C_po_counter_load_en = po_counter_load_en; C_po_counter_read_en = po_counter_read_en; C_po_counter_load_val = po_counter_load_val; C_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; C_idelay_ce = idelay_ce; C_idelay_ld = idelay_ld; end 3: begin D_pi_fine_enable = pi_fine_enable; D_pi_fine_inc = pi_fine_inc; D_pi_counter_load_en = pi_counter_load_en; D_pi_counter_read_en = pi_counter_read_en; D_pi_counter_load_val = pi_counter_load_val; D_pi_rst_dqs_find = pi_rst_dqs_find; D_po_fine_enable = po_fine_enable; D_po_coarse_enable = po_coarse_enable; D_po_fine_inc = po_fine_inc; D_po_coarse_inc = po_coarse_inc; D_po_counter_load_en = po_counter_load_en; D_po_counter_load_val = po_counter_load_val; D_po_counter_read_en = po_counter_read_en; D_po_sel_fine_oclk_delay = po_sel_fine_oclk_delay; D_idelay_ce = idelay_ce; D_idelay_ld = idelay_ld; end endcase end end //obligatory phaser-ref PHASER_REF phaser_ref_i( .LOCKED (ref_dll_lock), .CLKIN (freq_refclk), .PWRDWN (1'b0), .RST ( ! pll_lock) ); // optional idelay_ctrl generate if ( GENERATE_IDELAYCTRL == "TRUE") IDELAYCTRL idelayctrl ( .RDY (/*idelayctrl_rdy*/), .REFCLK (idelayctrl_refclk), .RST (rst) ); endgenerate endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : mig_7series_v1_x_ddr_if_post_fifo.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Feb 08 2011 // \___\/\___\ // //Device : 7 Series //Design Name : DDR3 SDRAM //Purpose : Extends the depth of a PHASER IN_FIFO up to 4 entries //Reference : //Revision History : //***************************************************************************** `timescale 1 ps / 1 ps module mig_7series_v1_9_ddr_if_post_fifo # ( parameter TCQ = 100, // clk->out delay (sim only) parameter DEPTH = 4, // # of entries parameter WIDTH = 32 // data bus width ) ( input clk, // clock input rst, // synchronous reset input [3:0] empty_in, input rd_en_in, input [WIDTH-1:0] d_in, // write data from controller output empty_out, output byte_rd_en, output [WIDTH-1:0] d_out // write data to OUT_FIFO ); // # of bits used to represent read/write pointers localparam PTR_BITS = (DEPTH == 2) ? 1 : (((DEPTH == 3) || (DEPTH == 4)) ? 2 : 'bx); integer i; reg [WIDTH-1:0] mem[0:DEPTH-1]; (* keep = "true", max_fanout = 3 *) reg [4:0] my_empty /* synthesis syn_maxfan = 3 */; (* keep = "true", max_fanout = 3 *) reg [1:0] my_full /* synthesis syn_maxfan = 3 */; (* keep = "true", max_fanout = 10 *) reg [PTR_BITS-1:0] rd_ptr /* synthesis syn_maxfan = 10 */; (* keep = "true", max_fanout = 10 *) reg [PTR_BITS-1:0] wr_ptr /* synthesis syn_maxfan = 10 */; wire [WIDTH-1:0] mem_out; (* keep = "true", max_fanout = 10 *) wire wr_en /* synthesis syn_maxfan = 10 */; task updt_ptrs; input rd; input wr; reg [1:0] next_rd_ptr; reg [1:0] next_wr_ptr; begin next_rd_ptr = (rd_ptr + 1'b1)%DEPTH; next_wr_ptr = (wr_ptr + 1'b1)%DEPTH; casez ({rd, wr, my_empty[1], my_full[1]}) 4'b00zz: ; // No access, do nothing 4'b0100: begin // Write when neither empty, nor full; check for full wr_ptr <= #TCQ next_wr_ptr; my_full[0] <= #TCQ (next_wr_ptr == rd_ptr); my_full[1] <= #TCQ (next_wr_ptr == rd_ptr); //mem[wr_ptr] <= #TCQ d_in; end 4'b0110: begin // Write when empty; no need to check for full wr_ptr <= #TCQ next_wr_ptr; my_empty <= #TCQ 5'b00000; //mem[wr_ptr] <= #TCQ d_in; end 4'b1000: begin // Read when neither empty, nor full; check for empty rd_ptr <= #TCQ next_rd_ptr; my_empty[0] <= #TCQ (next_rd_ptr == wr_ptr); my_empty[1] <= #TCQ (next_rd_ptr == wr_ptr); my_empty[2] <= #TCQ (next_rd_ptr == wr_ptr); my_empty[3] <= #TCQ (next_rd_ptr == wr_ptr); my_empty[4] <= #TCQ (next_rd_ptr == wr_ptr); end 4'b1001: begin // Read when full; no need to check for empty rd_ptr <= #TCQ next_rd_ptr; my_full[0] <= #TCQ 1'b0; my_full[1] <= #TCQ 1'b0; end 4'b1100, 4'b1101, 4'b1110: begin // Read and write when empty, full, or neither empty/full; no need // to check for empty or full conditions rd_ptr <= #TCQ next_rd_ptr; wr_ptr <= #TCQ next_wr_ptr; //mem[wr_ptr] <= #TCQ d_in; end 4'b0101, 4'b1010: ; // Read when empty, Write when full; Keep all pointers the same // and don't change any of the flags (i.e. ignore the read/write). // This might happen because a faulty DQS_FOUND calibration could // result in excessive skew between when the various IN_FIFO's // first become not empty. In this case, the data going to each // post-FIFO/IN_FIFO should be read out and discarded // synthesis translate_off default: begin // Covers any other cases, in particular for simulation if // any signals are X's $display("ERR %m @%t: Bad access: rd:%b,wr:%b,empty:%b,full:%b", $time, rd, wr, my_empty[1], my_full[1]); rd_ptr <= #TCQ 2'bxx; wr_ptr <= #TCQ 2'bxx; end // synthesis translate_on endcase end endtask assign d_out = my_empty[4] ? d_in : mem_out;//mem[rd_ptr]; // The combined IN_FIFO + post FIFO is only "empty" when both are empty assign empty_out = empty_in[0] & my_empty[0]; assign byte_rd_en = !empty_in[3] || !my_empty[3]; always @(posedge clk) if (rst) begin my_empty <= #TCQ 5'b11111; my_full <= #TCQ 2'b00; rd_ptr <= #TCQ 'b0; wr_ptr <= #TCQ 'b0; end else begin // Special mode: If IN_FIFO has data, and controller is reading at // the same time, then operate post-FIFO in "passthrough" mode (i.e. // don't update any of the read/write pointers, and route IN_FIFO // data to post-FIFO data) if (my_empty[1] && !my_full[1] && rd_en_in && !empty_in[1]) ; else // Otherwise, we're writing to FIFO when IN_FIFO is not empty, // and reading from the FIFO based on the rd_en_in signal (read // enable from controller). The functino updt_ptrs should catch // an illegal conditions. updt_ptrs(rd_en_in, !empty_in[1]); end assign wr_en = (!empty_in[2] & ((!rd_en_in & !my_full[0]) | (rd_en_in & !my_empty[2]))); always @ (posedge clk) begin if (wr_en) mem[wr_ptr] <= #TCQ d_in; end assign mem_out = mem[rd_ptr]; endmodule
//***************************************************************************** // (c) Copyright 2009 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor: Xilinx // \ \ \/ Version: // \ \ Application: MIG // / / Filename: ddr_phy_rdlvl.v // /___/ /\ Date Last Modified: $Date: 2011/06/24 14:49:00 $ // \ \ / \ Date Created: // \___\/\___\ // //Device: 7 Series //Design Name: DDR3 SDRAM //Purpose: // Read leveling Stage1 calibration logic // NOTES: // 1. Window detection with PRBS pattern. //Reference: //Revision History: //***************************************************************************** /****************************************************************************** **$Id: ddr_phy_rdlvl.v,v 1.2 2011/06/24 14:49:00 mgeorge Exp $ **$Date: 2011/06/24 14:49:00 $ **$Author: mgeorge $ **$Revision: 1.2 $ **$Source: /devl/xcs/repo/env/Databases/ip/src2/O/mig_7series_v1_3/data/dlib/7series/ddr3_sdram/verilog/rtl/phy/ddr_phy_rdlvl.v,v $ ******************************************************************************/ `timescale 1ps/1ps (* use_dsp48 = "no" *) module mig_7series_v1_9_ddr_phy_rdlvl # ( parameter TCQ = 100, // clk->out delay (sim only) parameter nCK_PER_CLK = 2, // # of memory clocks per CLK parameter CLK_PERIOD = 3333, // Internal clock period (in ps) parameter DQ_WIDTH = 64, // # of DQ (data) parameter DQS_CNT_WIDTH = 3, // = ceil(log2(DQS_WIDTH)) parameter DQS_WIDTH = 8, // # of DQS (strobe) parameter DRAM_WIDTH = 8, // # of DQ per DQS parameter RANKS = 1, // # of DRAM ranks parameter PER_BIT_DESKEW = "ON", // Enable per-bit DQ deskew parameter SIM_CAL_OPTION = "NONE", // Skip various calibration steps parameter DEBUG_PORT = "OFF", // Enable debug port parameter DRAM_TYPE = "DDR3", // Memory I/F type: "DDR3", "DDR2" parameter OCAL_EN = "ON" ) ( input clk, input rst, // Calibration status, control signals input mpr_rdlvl_start, output mpr_rdlvl_done, output reg mpr_last_byte_done, output mpr_rnk_done, input rdlvl_stg1_start, (* keep = "true", max_fanout = 30 *) output reg rdlvl_stg1_done /* synthesis syn_maxfan = 30 */, output rdlvl_stg1_rnk_done, output reg rdlvl_stg1_err, output mpr_rdlvl_err, output rdlvl_err, output reg rdlvl_prech_req, output reg rdlvl_last_byte_done, output reg rdlvl_assrt_common, input prech_done, input phy_if_empty, input [4:0] idelaye2_init_val, // Captured data in fabric clock domain input [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data, // Decrement initial Phaser_IN Fine tap delay input dqs_po_dec_done, input [5:0] pi_counter_read_val, // Stage 1 calibration outputs output reg pi_fine_dly_dec_done, output reg pi_en_stg2_f, output reg pi_stg2_f_incdec, output reg pi_stg2_load, output reg [5:0] pi_stg2_reg_l, output [DQS_CNT_WIDTH:0] pi_stg2_rdlvl_cnt, // To DQ IDELAY required to find left edge of // valid window output idelay_ce, output idelay_inc, input idelay_ld, input [DQS_CNT_WIDTH:0] wrcal_cnt, // Only output if Per-bit de-skew enabled output reg [5*RANKS*DQ_WIDTH-1:0] dlyval_dq, // Debug Port output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_cnt, output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_cnt, output [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt, output [5*DQS_WIDTH*RANKS-1:0] dbg_dq_idelay_tap_cnt, input dbg_idel_up_all, input dbg_idel_down_all, input dbg_idel_up_cpt, input dbg_idel_down_cpt, input [DQS_CNT_WIDTH-1:0] dbg_sel_idel_cpt, input dbg_sel_all_idel_cpt, output [255:0] dbg_phy_rdlvl ); // minimum time (in IDELAY taps) for which capture data must be stable for // algorithm to consider a valid data eye to be found. The read leveling // logic will ignore any window found smaller than this value. Limitations // on how small this number can be is determined by: (1) the algorithmic // limitation of how many taps wide the data eye can be (3 taps), and (2) // how wide regions of "instability" that occur around the edges of the // read valid window can be (i.e. need to be able to filter out "false" // windows that occur for a short # of taps around the edges of the true // data window, although with multi-sampling during read leveling, this is // not as much a concern) - the larger the value, the more protection // against "false" windows localparam MIN_EYE_SIZE = 16; // Length of calibration sequence (in # of words) localparam CAL_PAT_LEN = 8; // Read data shift register length localparam RD_SHIFT_LEN = CAL_PAT_LEN / (2*nCK_PER_CLK); // # of cycles required to perform read data shift register compare // This is defined as from the cycle the new data is loaded until // signal found_edge_r is valid localparam RD_SHIFT_COMP_DELAY = 5; // worst-case # of cycles to wait to ensure that both the SR and // PREV_SR shift registers have valid data, and that the comparison // of the two shift register values is valid. The "+1" at the end of // this equation is a fudge factor, I freely admit that localparam SR_VALID_DELAY = (2 * RD_SHIFT_LEN) + RD_SHIFT_COMP_DELAY + 1; // # of clock cycles to wait after changing tap value or read data MUX // to allow: (1) tap chain to settle, (2) for delayed input to propagate // thru ISERDES, (3) for the read data comparison logic to have time to // output the comparison of two consecutive samples of the settled read data // The minimum delay is 16 cycles, which should be good enough to handle all // three of the above conditions for the simulation-only case with a short // training pattern. For H/W (or for simulation with longer training // pattern), it will take longer to store and compare two consecutive // samples, and the value of this parameter will reflect that localparam PIPE_WAIT_CNT = (SR_VALID_DELAY < 8) ? 16 : (SR_VALID_DELAY + 8); // # of read data samples to examine when detecting whether an edge has // occured during stage 1 calibration. Width of local param must be // changed as appropriate. Note that there are two counters used, each // counter can be changed independently of the other - they are used in // cascade to create a larger counter localparam [11:0] DETECT_EDGE_SAMPLE_CNT0 = 12'h001; //12'hFFF; localparam [11:0] DETECT_EDGE_SAMPLE_CNT1 = 12'h001; // 12'h1FF Must be > 0 localparam [5:0] CAL1_IDLE = 6'h00; localparam [5:0] CAL1_NEW_DQS_WAIT = 6'h01; localparam [5:0] CAL1_STORE_FIRST_WAIT = 6'h02; localparam [5:0] CAL1_PAT_DETECT = 6'h03; localparam [5:0] CAL1_DQ_IDEL_TAP_INC = 6'h04; localparam [5:0] CAL1_DQ_IDEL_TAP_INC_WAIT = 6'h05; localparam [5:0] CAL1_DQ_IDEL_TAP_DEC = 6'h06; localparam [5:0] CAL1_DQ_IDEL_TAP_DEC_WAIT = 6'h07; localparam [5:0] CAL1_DETECT_EDGE = 6'h08; localparam [5:0] CAL1_IDEL_INC_CPT = 6'h09; localparam [5:0] CAL1_IDEL_INC_CPT_WAIT = 6'h0A; localparam [5:0] CAL1_CALC_IDEL = 6'h0B; localparam [5:0] CAL1_IDEL_DEC_CPT = 6'h0C; localparam [5:0] CAL1_IDEL_DEC_CPT_WAIT = 6'h0D; localparam [5:0] CAL1_NEXT_DQS = 6'h0E; localparam [5:0] CAL1_DONE = 6'h0F; localparam [5:0] CAL1_PB_STORE_FIRST_WAIT = 6'h10; localparam [5:0] CAL1_PB_DETECT_EDGE = 6'h11; localparam [5:0] CAL1_PB_INC_CPT = 6'h12; localparam [5:0] CAL1_PB_INC_CPT_WAIT = 6'h13; localparam [5:0] CAL1_PB_DEC_CPT_LEFT = 6'h14; localparam [5:0] CAL1_PB_DEC_CPT_LEFT_WAIT = 6'h15; localparam [5:0] CAL1_PB_DETECT_EDGE_DQ = 6'h16; localparam [5:0] CAL1_PB_INC_DQ = 6'h17; localparam [5:0] CAL1_PB_INC_DQ_WAIT = 6'h18; localparam [5:0] CAL1_PB_DEC_CPT = 6'h19; localparam [5:0] CAL1_PB_DEC_CPT_WAIT = 6'h1A; localparam [5:0] CAL1_REGL_LOAD = 6'h1B; localparam [5:0] CAL1_RDLVL_ERR = 6'h1C; localparam [5:0] CAL1_MPR_NEW_DQS_WAIT = 6'h1D; localparam [5:0] CAL1_VALID_WAIT = 6'h1E; localparam [5:0] CAL1_MPR_PAT_DETECT = 6'h1F; localparam [5:0] CAL1_NEW_DQS_PREWAIT = 6'h20; integer a; integer b; integer d; integer e; integer f; integer h; integer g; integer i; integer j; integer k; integer l; integer m; integer n; integer r; integer p; integer q; integer s; integer t; integer u; integer w; integer ce_i; integer ce_rnk_i; integer aa; integer bb; integer cc; integer dd; genvar x; genvar z; reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_r; wire [DQS_CNT_WIDTH+2:0]cal1_cnt_cpt_timing; reg [DQS_CNT_WIDTH:0] cal1_cnt_cpt_timing_r; reg cal1_dq_idel_ce; reg cal1_dq_idel_inc; reg cal1_dlyce_cpt_r; reg cal1_dlyinc_cpt_r; reg cal1_dlyce_dq_r; reg cal1_dlyinc_dq_r; reg cal1_wait_cnt_en_r; reg [4:0] cal1_wait_cnt_r; reg cal1_wait_r; reg [DQ_WIDTH-1:0] dlyce_dq_r; reg dlyinc_dq_r; reg [4:0] dlyval_dq_reg_r [0:RANKS-1][0:DQ_WIDTH-1]; reg cal1_prech_req_r; reg [5:0] cal1_state_r; reg [5:0] cal1_state_r1; reg [5:0] cnt_idel_dec_cpt_r; reg [3:0] cnt_shift_r; reg detect_edge_done_r; reg [5:0] right_edge_taps_r; reg [5:0] first_edge_taps_r; reg found_edge_r; reg found_first_edge_r; reg found_second_edge_r; reg found_stable_eye_r; reg found_stable_eye_last_r; reg found_edge_all_r; reg [5:0] tap_cnt_cpt_r; reg tap_limit_cpt_r; reg [4:0] idel_tap_cnt_dq_pb_r; reg idel_tap_limit_dq_pb_r; reg [DRAM_WIDTH-1:0] mux_rd_fall0_r; reg [DRAM_WIDTH-1:0] mux_rd_fall1_r; reg [DRAM_WIDTH-1:0] mux_rd_rise0_r; reg [DRAM_WIDTH-1:0] mux_rd_rise1_r; reg [DRAM_WIDTH-1:0] mux_rd_fall2_r; reg [DRAM_WIDTH-1:0] mux_rd_fall3_r; reg [DRAM_WIDTH-1:0] mux_rd_rise2_r; reg [DRAM_WIDTH-1:0] mux_rd_rise3_r; reg mux_rd_valid_r; reg new_cnt_cpt_r; reg [RD_SHIFT_LEN-1:0] old_sr_fall0_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] old_sr_fall1_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] old_sr_rise0_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] old_sr_rise1_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] old_sr_fall2_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] old_sr_fall3_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] old_sr_rise2_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] old_sr_rise3_r [DRAM_WIDTH-1:0]; reg [DRAM_WIDTH-1:0] old_sr_match_fall0_r; reg [DRAM_WIDTH-1:0] old_sr_match_fall1_r; reg [DRAM_WIDTH-1:0] old_sr_match_rise0_r; reg [DRAM_WIDTH-1:0] old_sr_match_rise1_r; reg [DRAM_WIDTH-1:0] old_sr_match_fall2_r; reg [DRAM_WIDTH-1:0] old_sr_match_fall3_r; reg [DRAM_WIDTH-1:0] old_sr_match_rise2_r; reg [DRAM_WIDTH-1:0] old_sr_match_rise3_r; reg [4:0] pb_cnt_eye_size_r [DRAM_WIDTH-1:0]; reg [DRAM_WIDTH-1:0] pb_detect_edge_done_r; reg [DRAM_WIDTH-1:0] pb_found_edge_last_r; reg [DRAM_WIDTH-1:0] pb_found_edge_r; reg [DRAM_WIDTH-1:0] pb_found_first_edge_r; reg [DRAM_WIDTH-1:0] pb_found_stable_eye_r; reg [DRAM_WIDTH-1:0] pb_last_tap_jitter_r; reg pi_en_stg2_f_timing; reg pi_stg2_f_incdec_timing; reg pi_stg2_load_timing; reg [5:0] pi_stg2_reg_l_timing; reg [DRAM_WIDTH-1:0] prev_sr_diff_r; reg [RD_SHIFT_LEN-1:0] prev_sr_fall0_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] prev_sr_fall1_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] prev_sr_rise0_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] prev_sr_rise1_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] prev_sr_fall2_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] prev_sr_fall3_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] prev_sr_rise2_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] prev_sr_rise3_r [DRAM_WIDTH-1:0]; reg [DRAM_WIDTH-1:0] prev_sr_match_cyc2_r; reg [DRAM_WIDTH-1:0] prev_sr_match_fall0_r; reg [DRAM_WIDTH-1:0] prev_sr_match_fall1_r; reg [DRAM_WIDTH-1:0] prev_sr_match_rise0_r; reg [DRAM_WIDTH-1:0] prev_sr_match_rise1_r; reg [DRAM_WIDTH-1:0] prev_sr_match_fall2_r; reg [DRAM_WIDTH-1:0] prev_sr_match_fall3_r; reg [DRAM_WIDTH-1:0] prev_sr_match_rise2_r; reg [DRAM_WIDTH-1:0] prev_sr_match_rise3_r; wire [DQ_WIDTH-1:0] rd_data_rise0; wire [DQ_WIDTH-1:0] rd_data_fall0; wire [DQ_WIDTH-1:0] rd_data_rise1; wire [DQ_WIDTH-1:0] rd_data_fall1; wire [DQ_WIDTH-1:0] rd_data_rise2; wire [DQ_WIDTH-1:0] rd_data_fall2; wire [DQ_WIDTH-1:0] rd_data_rise3; wire [DQ_WIDTH-1:0] rd_data_fall3; reg samp_cnt_done_r; reg samp_edge_cnt0_en_r; reg [11:0] samp_edge_cnt0_r; reg samp_edge_cnt1_en_r; reg [11:0] samp_edge_cnt1_r; reg [DQS_CNT_WIDTH:0] rd_mux_sel_r; reg [5:0] second_edge_taps_r; reg [RD_SHIFT_LEN-1:0] sr_fall0_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] sr_fall1_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] sr_rise0_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] sr_rise1_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] sr_fall2_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] sr_fall3_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] sr_rise2_r [DRAM_WIDTH-1:0]; reg [RD_SHIFT_LEN-1:0] sr_rise3_r [DRAM_WIDTH-1:0]; reg store_sr_r; reg store_sr_req_pulsed_r; reg store_sr_req_r; reg sr_valid_r; reg sr_valid_r1; reg sr_valid_r2; reg [DRAM_WIDTH-1:0] old_sr_diff_r; reg [DRAM_WIDTH-1:0] old_sr_match_cyc2_r; reg pat0_data_match_r; reg pat1_data_match_r; wire pat_data_match_r; wire [RD_SHIFT_LEN-1:0] pat0_fall0 [3:0]; wire [RD_SHIFT_LEN-1:0] pat0_fall1 [3:0]; wire [RD_SHIFT_LEN-1:0] pat0_fall2 [3:0]; wire [RD_SHIFT_LEN-1:0] pat0_fall3 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_fall0 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_fall1 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_fall2 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_fall3 [3:0]; reg [DRAM_WIDTH-1:0] pat0_match_fall0_r; reg pat0_match_fall0_and_r; reg [DRAM_WIDTH-1:0] pat0_match_fall1_r; reg pat0_match_fall1_and_r; reg [DRAM_WIDTH-1:0] pat0_match_fall2_r; reg pat0_match_fall2_and_r; reg [DRAM_WIDTH-1:0] pat0_match_fall3_r; reg pat0_match_fall3_and_r; reg [DRAM_WIDTH-1:0] pat0_match_rise0_r; reg pat0_match_rise0_and_r; reg [DRAM_WIDTH-1:0] pat0_match_rise1_r; reg pat0_match_rise1_and_r; reg [DRAM_WIDTH-1:0] pat0_match_rise2_r; reg pat0_match_rise2_and_r; reg [DRAM_WIDTH-1:0] pat0_match_rise3_r; reg pat0_match_rise3_and_r; reg [DRAM_WIDTH-1:0] pat1_match_fall0_r; reg pat1_match_fall0_and_r; reg [DRAM_WIDTH-1:0] pat1_match_fall1_r; reg pat1_match_fall1_and_r; reg [DRAM_WIDTH-1:0] pat1_match_fall2_r; reg pat1_match_fall2_and_r; reg [DRAM_WIDTH-1:0] pat1_match_fall3_r; reg pat1_match_fall3_and_r; reg [DRAM_WIDTH-1:0] pat1_match_rise0_r; reg pat1_match_rise0_and_r; reg [DRAM_WIDTH-1:0] pat1_match_rise1_r; reg pat1_match_rise1_and_r; reg [DRAM_WIDTH-1:0] pat1_match_rise2_r; reg pat1_match_rise2_and_r; reg [DRAM_WIDTH-1:0] pat1_match_rise3_r; reg pat1_match_rise3_and_r; reg [4:0] idelay_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1]; reg [5*DQS_WIDTH*RANKS-1:0] idelay_tap_cnt_w; reg [4:0] idelay_tap_cnt_slice_r; reg idelay_tap_limit_r; wire [RD_SHIFT_LEN-1:0] pat0_rise0 [3:0]; wire [RD_SHIFT_LEN-1:0] pat0_rise1 [3:0]; wire [RD_SHIFT_LEN-1:0] pat0_rise2 [3:0]; wire [RD_SHIFT_LEN-1:0] pat0_rise3 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_rise0 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_rise1 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_rise2 [3:0]; wire [RD_SHIFT_LEN-1:0] pat1_rise3 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_rise0 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_fall0 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_rise1 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_fall1 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_rise2 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_fall2 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_rise3 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat0_fall3 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_rise0 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_fall0 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_rise1 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_fall1 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_rise2 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_fall2 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_rise3 [3:0]; wire [RD_SHIFT_LEN-1:0] idel_pat1_fall3 [3:0]; reg [DRAM_WIDTH-1:0] idel_pat0_match_rise0_r; reg [DRAM_WIDTH-1:0] idel_pat0_match_fall0_r; reg [DRAM_WIDTH-1:0] idel_pat0_match_rise1_r; reg [DRAM_WIDTH-1:0] idel_pat0_match_fall1_r; reg [DRAM_WIDTH-1:0] idel_pat0_match_rise2_r; reg [DRAM_WIDTH-1:0] idel_pat0_match_fall2_r; reg [DRAM_WIDTH-1:0] idel_pat0_match_rise3_r; reg [DRAM_WIDTH-1:0] idel_pat0_match_fall3_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_rise0_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_fall0_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_rise1_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_fall1_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_rise2_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_fall2_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_rise3_r; reg [DRAM_WIDTH-1:0] idel_pat1_match_fall3_r; reg idel_pat0_match_rise0_and_r; reg idel_pat0_match_fall0_and_r; reg idel_pat0_match_rise1_and_r; reg idel_pat0_match_fall1_and_r; reg idel_pat0_match_rise2_and_r; reg idel_pat0_match_fall2_and_r; reg idel_pat0_match_rise3_and_r; reg idel_pat0_match_fall3_and_r; reg idel_pat1_match_rise0_and_r; reg idel_pat1_match_fall0_and_r; reg idel_pat1_match_rise1_and_r; reg idel_pat1_match_fall1_and_r; reg idel_pat1_match_rise2_and_r; reg idel_pat1_match_fall2_and_r; reg idel_pat1_match_rise3_and_r; reg idel_pat1_match_fall3_and_r; reg idel_pat0_data_match_r; reg idel_pat1_data_match_r; reg idel_pat_data_match; reg idel_pat_data_match_r; reg [4:0] idel_dec_cnt; reg [5:0] rdlvl_dqs_tap_cnt_r [0:RANKS-1][0:DQS_WIDTH-1]; reg [1:0] rnk_cnt_r; reg rdlvl_rank_done_r; reg [3:0] done_cnt; reg [1:0] regl_rank_cnt; reg [DQS_CNT_WIDTH:0] regl_dqs_cnt; reg [DQS_CNT_WIDTH:0] regl_dqs_cnt_r; wire [DQS_CNT_WIDTH+2:0]regl_dqs_cnt_timing; reg regl_rank_done_r; reg rdlvl_stg1_start_r; reg dqs_po_dec_done_r1; reg dqs_po_dec_done_r2; reg fine_dly_dec_done_r1; reg fine_dly_dec_done_r2; reg [3:0] wait_cnt_r; reg [5:0] pi_rdval_cnt; reg pi_cnt_dec; reg mpr_valid_r; reg mpr_valid_r1; reg mpr_valid_r2; reg mpr_rd_rise0_prev_r; reg mpr_rd_fall0_prev_r; reg mpr_rd_rise1_prev_r; reg mpr_rd_fall1_prev_r; reg mpr_rd_rise2_prev_r; reg mpr_rd_fall2_prev_r; reg mpr_rd_rise3_prev_r; reg mpr_rd_fall3_prev_r; reg mpr_rdlvl_done_r; reg mpr_rdlvl_done_r1; reg mpr_rdlvl_done_r2; reg mpr_rdlvl_start_r; reg mpr_rank_done_r; reg [2:0] stable_idel_cnt; reg inhibit_edge_detect_r; reg idel_pat_detect_valid_r; reg idel_mpr_pat_detect_r; reg mpr_pat_detect_r; reg mpr_dec_cpt_r; // Debug reg [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_first_edge_taps; reg [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_second_edge_taps; reg [6*DQS_WIDTH*RANKS-1:0] dbg_cpt_tap_cnt_w; //*************************************************************************** // Debug //*************************************************************************** always @(*) begin for (d = 0; d < RANKS; d = d + 1) begin for (e = 0; e < DQS_WIDTH; e = e + 1) begin idelay_tap_cnt_w[(5*e+5*DQS_WIDTH*d)+:5] <= #TCQ idelay_tap_cnt_r[d][e]; dbg_cpt_tap_cnt_w[(6*e+6*DQS_WIDTH*d)+:6] <= #TCQ rdlvl_dqs_tap_cnt_r[d][e]; end end end assign mpr_rdlvl_err = rdlvl_stg1_err & (!mpr_rdlvl_done); assign rdlvl_err = rdlvl_stg1_err & (mpr_rdlvl_done); assign dbg_phy_rdlvl[0] = rdlvl_stg1_start; assign dbg_phy_rdlvl[1] = pat_data_match_r; assign dbg_phy_rdlvl[2] = mux_rd_valid_r; assign dbg_phy_rdlvl[3] = idelay_tap_limit_r; assign dbg_phy_rdlvl[8:4] = 'b0; assign dbg_phy_rdlvl[14:9] = cal1_state_r[5:0]; assign dbg_phy_rdlvl[20:15] = cnt_idel_dec_cpt_r; assign dbg_phy_rdlvl[21] = found_first_edge_r; assign dbg_phy_rdlvl[22] = found_second_edge_r; assign dbg_phy_rdlvl[23] = found_edge_r; assign dbg_phy_rdlvl[24] = store_sr_r; // [40:25] previously used for sr, old_sr shift registers. If connecting // these signals again, don't forget to parameterize based on RD_SHIFT_LEN assign dbg_phy_rdlvl[40:25] = 'b0; assign dbg_phy_rdlvl[41] = sr_valid_r; assign dbg_phy_rdlvl[42] = found_stable_eye_r; assign dbg_phy_rdlvl[48:43] = tap_cnt_cpt_r; assign dbg_phy_rdlvl[54:49] = first_edge_taps_r; assign dbg_phy_rdlvl[60:55] = second_edge_taps_r; assign dbg_phy_rdlvl[64:61] = cal1_cnt_cpt_timing_r; assign dbg_phy_rdlvl[65] = cal1_dlyce_cpt_r; assign dbg_phy_rdlvl[66] = cal1_dlyinc_cpt_r; assign dbg_phy_rdlvl[67] = found_edge_r; assign dbg_phy_rdlvl[68] = found_first_edge_r; assign dbg_phy_rdlvl[73:69] = 'b0; assign dbg_phy_rdlvl[74] = idel_pat_data_match; assign dbg_phy_rdlvl[75] = idel_pat0_data_match_r; assign dbg_phy_rdlvl[76] = idel_pat1_data_match_r; assign dbg_phy_rdlvl[77] = pat0_data_match_r; assign dbg_phy_rdlvl[78] = pat1_data_match_r; assign dbg_phy_rdlvl[79+:5*DQS_WIDTH*RANKS] = idelay_tap_cnt_w; assign dbg_phy_rdlvl[170+:8] = mux_rd_rise0_r; assign dbg_phy_rdlvl[178+:8] = mux_rd_fall0_r; assign dbg_phy_rdlvl[186+:8] = mux_rd_rise1_r; assign dbg_phy_rdlvl[194+:8] = mux_rd_fall1_r; assign dbg_phy_rdlvl[202+:8] = mux_rd_rise2_r; assign dbg_phy_rdlvl[210+:8] = mux_rd_fall2_r; assign dbg_phy_rdlvl[218+:8] = mux_rd_rise3_r; assign dbg_phy_rdlvl[226+:8] = mux_rd_fall3_r; //*************************************************************************** // Debug output //*************************************************************************** // CPT taps assign dbg_cpt_first_edge_cnt = dbg_cpt_first_edge_taps; assign dbg_cpt_second_edge_cnt = dbg_cpt_second_edge_taps; assign dbg_cpt_tap_cnt = dbg_cpt_tap_cnt_w; assign dbg_dq_idelay_tap_cnt = idelay_tap_cnt_w; // Record first and second edges found during CPT calibration generate always @(posedge clk) if (rst) begin dbg_cpt_first_edge_taps <= #TCQ 'b0; dbg_cpt_second_edge_taps <= #TCQ 'b0; end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_CALC_IDEL)) begin for (ce_rnk_i = 0; ce_rnk_i < RANKS; ce_rnk_i = ce_rnk_i + 1) begin: gen_dbg_cpt_rnk for (ce_i = 0; ce_i < DQS_WIDTH; ce_i = ce_i + 1) begin: gen_dbg_cpt_edge if (found_first_edge_r) dbg_cpt_first_edge_taps[((6*ce_i)+(ce_rnk_i*DQS_WIDTH*6))+:6] <= #TCQ first_edge_taps_r; if (found_second_edge_r) dbg_cpt_second_edge_taps[((6*ce_i)+(ce_rnk_i*DQS_WIDTH*6))+:6] <= #TCQ second_edge_taps_r; end end end else if (cal1_state_r == CAL1_CALC_IDEL) begin // Record tap counts of first and second edge edges during // CPT calibration for each DQS group. If neither edge has // been found, then those taps will remain 0 if (found_first_edge_r) dbg_cpt_first_edge_taps[(((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1)) +(rnk_cnt_r*DQS_WIDTH*6))+:6] <= #TCQ first_edge_taps_r; if (found_second_edge_r) dbg_cpt_second_edge_taps[(((cal1_cnt_cpt_timing <<2) + (cal1_cnt_cpt_timing <<1)) +(rnk_cnt_r*DQS_WIDTH*6))+:6] <= #TCQ second_edge_taps_r; end endgenerate assign rdlvl_stg1_rnk_done = rdlvl_rank_done_r;// || regl_rank_done_r; assign mpr_rnk_done = mpr_rank_done_r; assign mpr_rdlvl_done = ((DRAM_TYPE == "DDR3") && (OCAL_EN == "ON")) ? //&& (SIM_CAL_OPTION == "NONE") mpr_rdlvl_done_r : 1'b1; //************************************************************************** // DQS count to hard PHY during write calibration using Phaser_OUT Stage2 // coarse delay //************************************************************************** assign pi_stg2_rdlvl_cnt = (cal1_state_r == CAL1_REGL_LOAD) ? regl_dqs_cnt_r : cal1_cnt_cpt_r; assign idelay_ce = cal1_dq_idel_ce; assign idelay_inc = cal1_dq_idel_inc; //*************************************************************************** // Assert calib_in_common in FAST_CAL mode for IDELAY tap increments to all // DQs simultaneously //*************************************************************************** always @(posedge clk) begin if (rst) rdlvl_assrt_common <= #TCQ 1'b0; else if ((SIM_CAL_OPTION == "FAST_CAL") & rdlvl_stg1_start & !rdlvl_stg1_start_r) rdlvl_assrt_common <= #TCQ 1'b1; else if (!idel_pat_data_match_r & idel_pat_data_match) rdlvl_assrt_common <= #TCQ 1'b0; end //*************************************************************************** // Data mux to route appropriate bit to calibration logic - i.e. calibration // is done sequentially, one bit (or DQS group) at a time //*************************************************************************** generate if (nCK_PER_CLK == 4) begin: rd_data_div4_logic_clk assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0]; assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH]; assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH]; assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH]; assign rd_data_rise2 = rd_data[5*DQ_WIDTH-1:4*DQ_WIDTH]; assign rd_data_fall2 = rd_data[6*DQ_WIDTH-1:5*DQ_WIDTH]; assign rd_data_rise3 = rd_data[7*DQ_WIDTH-1:6*DQ_WIDTH]; assign rd_data_fall3 = rd_data[8*DQ_WIDTH-1:7*DQ_WIDTH]; end else begin: rd_data_div2_logic_clk assign rd_data_rise0 = rd_data[DQ_WIDTH-1:0]; assign rd_data_fall0 = rd_data[2*DQ_WIDTH-1:DQ_WIDTH]; assign rd_data_rise1 = rd_data[3*DQ_WIDTH-1:2*DQ_WIDTH]; assign rd_data_fall1 = rd_data[4*DQ_WIDTH-1:3*DQ_WIDTH]; end endgenerate always @(posedge clk) begin rd_mux_sel_r <= #TCQ cal1_cnt_cpt_r; end // Register outputs for improved timing. // NOTE: Will need to change when per-bit DQ deskew is supported. // Currenly all bits in DQS group are checked in aggregate generate genvar mux_i; for (mux_i = 0; mux_i < DRAM_WIDTH; mux_i = mux_i + 1) begin: gen_mux_rd always @(posedge clk) begin mux_rd_rise0_r[mux_i] <= #TCQ rd_data_rise0[DRAM_WIDTH*rd_mux_sel_r + mux_i]; mux_rd_fall0_r[mux_i] <= #TCQ rd_data_fall0[DRAM_WIDTH*rd_mux_sel_r + mux_i]; mux_rd_rise1_r[mux_i] <= #TCQ rd_data_rise1[DRAM_WIDTH*rd_mux_sel_r + mux_i]; mux_rd_fall1_r[mux_i] <= #TCQ rd_data_fall1[DRAM_WIDTH*rd_mux_sel_r + mux_i]; mux_rd_rise2_r[mux_i] <= #TCQ rd_data_rise2[DRAM_WIDTH*rd_mux_sel_r + mux_i]; mux_rd_fall2_r[mux_i] <= #TCQ rd_data_fall2[DRAM_WIDTH*rd_mux_sel_r + mux_i]; mux_rd_rise3_r[mux_i] <= #TCQ rd_data_rise3[DRAM_WIDTH*rd_mux_sel_r + mux_i]; mux_rd_fall3_r[mux_i] <= #TCQ rd_data_fall3[DRAM_WIDTH*rd_mux_sel_r + mux_i]; end end endgenerate //*************************************************************************** // MPR Read Leveling //*************************************************************************** // storing the previous read data for checking later. Only bit 0 is used // since MPR contents (01010101) are available generally on DQ[0] per // JEDEC spec. always @(posedge clk)begin if ((cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) || ((cal1_state_r == CAL1_MPR_PAT_DETECT) && (idel_pat_detect_valid_r)))begin mpr_rd_rise0_prev_r <= #TCQ mux_rd_rise0_r[0]; mpr_rd_fall0_prev_r <= #TCQ mux_rd_fall0_r[0]; mpr_rd_rise1_prev_r <= #TCQ mux_rd_rise1_r[0]; mpr_rd_fall1_prev_r <= #TCQ mux_rd_fall1_r[0]; mpr_rd_rise2_prev_r <= #TCQ mux_rd_rise2_r[0]; mpr_rd_fall2_prev_r <= #TCQ mux_rd_fall2_r[0]; mpr_rd_rise3_prev_r <= #TCQ mux_rd_rise3_r[0]; mpr_rd_fall3_prev_r <= #TCQ mux_rd_fall3_r[0]; end end generate if (nCK_PER_CLK == 4) begin: mpr_4to1 // changed stable count of 2 IDELAY taps at 78 ps resolution always @(posedge clk) begin if (rst | (cal1_state_r == CAL1_NEW_DQS_PREWAIT) | //(cal1_state_r == CAL1_DETECT_EDGE) | (mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) | (mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) | (mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) | (mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) | (mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) | (mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) | (mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) | (mpr_rd_fall3_prev_r != mux_rd_fall3_r[0])) stable_idel_cnt <= #TCQ 3'd0; else if ((|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) & ((cal1_state_r == CAL1_MPR_PAT_DETECT) & (idel_pat_detect_valid_r))) begin if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) & (mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) & (mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) & (mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) & (mpr_rd_rise2_prev_r == mux_rd_rise2_r[0]) & (mpr_rd_fall2_prev_r == mux_rd_fall2_r[0]) & (mpr_rd_rise3_prev_r == mux_rd_rise3_r[0]) & (mpr_rd_fall3_prev_r == mux_rd_fall3_r[0]) & (stable_idel_cnt < 3'd2)) stable_idel_cnt <= #TCQ stable_idel_cnt + 1; end end always @(posedge clk) begin if (rst | (mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r & mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r & mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r & mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r)) inhibit_edge_detect_r <= 1'b1; // Wait for settling time after idelay tap increment before // de-asserting inhibit_edge_detect_r else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) & (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) & (~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r & ~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r & ~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r & ~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r)) inhibit_edge_detect_r <= 1'b0; end //checking for transition from 01010101 to 10101010 always @(posedge clk)begin if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) | inhibit_edge_detect_r) idel_mpr_pat_detect_r <= #TCQ 1'b0; // 10101010 is not the correct pattern else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r & mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r & mpr_rd_rise2_prev_r & ~mpr_rd_fall2_prev_r & mpr_rd_rise3_prev_r & ~mpr_rd_fall3_prev_r) || ((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT) && (idel_pat_detect_valid_r))) //|| (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2)) idel_mpr_pat_detect_r <= #TCQ 1'b0; // 01010101 to 10101010 is the correct transition else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r & ~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r & ~mpr_rd_rise2_prev_r & mpr_rd_fall2_prev_r & ~mpr_rd_rise3_prev_r & mpr_rd_fall3_prev_r) & (stable_idel_cnt == 3'd2) & ((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) || (mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) || (mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) || (mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]) || (mpr_rd_rise2_prev_r != mux_rd_rise2_r[0]) || (mpr_rd_fall2_prev_r != mux_rd_fall2_r[0]) || (mpr_rd_rise3_prev_r != mux_rd_rise3_r[0]) || (mpr_rd_fall3_prev_r != mux_rd_fall3_r[0]))) idel_mpr_pat_detect_r <= #TCQ 1'b1; end end else if (nCK_PER_CLK == 2) begin: mpr_2to1 // changed stable count of 2 IDELAY taps at 78 ps resolution always @(posedge clk) begin if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) | (mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) | (mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) | (mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) | (mpr_rd_fall1_prev_r != mux_rd_fall1_r[0])) stable_idel_cnt <= #TCQ 3'd0; else if ((idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd0) & ((cal1_state_r == CAL1_MPR_PAT_DETECT) & (idel_pat_detect_valid_r))) begin if ((mpr_rd_rise0_prev_r == mux_rd_rise0_r[0]) & (mpr_rd_fall0_prev_r == mux_rd_fall0_r[0]) & (mpr_rd_rise1_prev_r == mux_rd_rise1_r[0]) & (mpr_rd_fall1_prev_r == mux_rd_fall1_r[0]) & (stable_idel_cnt < 3'd2)) stable_idel_cnt <= #TCQ stable_idel_cnt + 1; end end always @(posedge clk) begin if (rst | (mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r & mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r)) inhibit_edge_detect_r <= 1'b1; else if ((cal1_state_r == CAL1_MPR_PAT_DETECT) & (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] > 5'd1) & (~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r & ~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r)) inhibit_edge_detect_r <= 1'b0; end //checking for transition from 01010101 to 10101010 always @(posedge clk)begin if (rst | (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) | inhibit_edge_detect_r) idel_mpr_pat_detect_r <= #TCQ 1'b0; // 1010 is not the correct pattern else if ((mpr_rd_rise0_prev_r & ~mpr_rd_fall0_prev_r & mpr_rd_rise1_prev_r & ~mpr_rd_fall1_prev_r) || ((stable_idel_cnt < 3'd2) & (cal1_state_r == CAL1_MPR_PAT_DETECT) & (idel_pat_detect_valid_r))) // ||(idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] < 5'd2)) idel_mpr_pat_detect_r <= #TCQ 1'b0; // 0101 to 1010 is the correct transition else if ((~mpr_rd_rise0_prev_r & mpr_rd_fall0_prev_r & ~mpr_rd_rise1_prev_r & mpr_rd_fall1_prev_r) & (stable_idel_cnt == 3'd2) & ((mpr_rd_rise0_prev_r != mux_rd_rise0_r[0]) || (mpr_rd_fall0_prev_r != mux_rd_fall0_r[0]) || (mpr_rd_rise1_prev_r != mux_rd_rise1_r[0]) || (mpr_rd_fall1_prev_r != mux_rd_fall1_r[0]))) idel_mpr_pat_detect_r <= #TCQ 1'b1; end end endgenerate // Registered signal indicates when mux_rd_rise/fall_r is valid always @(posedge clk) mux_rd_valid_r <= #TCQ ~phy_if_empty; //*************************************************************************** // Decrement initial Phaser_IN fine delay value before proceeding with // read calibration //*************************************************************************** always @(posedge clk) begin dqs_po_dec_done_r1 <= #TCQ dqs_po_dec_done; dqs_po_dec_done_r2 <= #TCQ dqs_po_dec_done_r1; fine_dly_dec_done_r2 <= #TCQ fine_dly_dec_done_r1; pi_fine_dly_dec_done <= #TCQ fine_dly_dec_done_r2; end always @(posedge clk) begin if (rst || pi_cnt_dec) wait_cnt_r <= #TCQ 'd8; else if (dqs_po_dec_done_r2 && (wait_cnt_r > 'd0)) wait_cnt_r <= #TCQ wait_cnt_r - 1; end always @(posedge clk) begin if (rst) begin pi_rdval_cnt <= #TCQ 'd0; end else if (dqs_po_dec_done_r1 && ~dqs_po_dec_done_r2) begin pi_rdval_cnt <= #TCQ pi_counter_read_val; end else if (pi_rdval_cnt > 'd0) begin if (pi_cnt_dec) pi_rdval_cnt <= #TCQ pi_rdval_cnt - 1; else pi_rdval_cnt <= #TCQ pi_rdval_cnt; end else if (pi_rdval_cnt == 'd0) begin pi_rdval_cnt <= #TCQ pi_rdval_cnt; end end always @(posedge clk) begin if (rst || (pi_rdval_cnt == 'd0)) pi_cnt_dec <= #TCQ 1'b0; else if (dqs_po_dec_done_r2 && (pi_rdval_cnt > 'd0) && (wait_cnt_r == 'd1)) pi_cnt_dec <= #TCQ 1'b1; else pi_cnt_dec <= #TCQ 1'b0; end always @(posedge clk) begin if (rst) begin fine_dly_dec_done_r1 <= #TCQ 1'b0; end else if (((pi_cnt_dec == 'd1) && (pi_rdval_cnt == 'd1)) || (dqs_po_dec_done_r2 && (pi_rdval_cnt == 'd0))) begin fine_dly_dec_done_r1 <= #TCQ 1'b1; end end //*************************************************************************** // Demultiplexor to control Phaser_IN delay values //*************************************************************************** // Read DQS always @(posedge clk) begin if (rst) begin pi_en_stg2_f_timing <= #TCQ 'b0; pi_stg2_f_incdec_timing <= #TCQ 'b0; end else if (pi_cnt_dec) begin pi_en_stg2_f_timing <= #TCQ 'b1; pi_stg2_f_incdec_timing <= #TCQ 'b0; end else if (cal1_dlyce_cpt_r) begin if ((SIM_CAL_OPTION == "NONE") || (SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin // Change only specified DQS pi_en_stg2_f_timing <= #TCQ 1'b1; pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r; end else if (SIM_CAL_OPTION == "FAST_CAL") begin // if simulating, and "shortcuts" for calibration enabled, apply // results to all DQSs (i.e. assume same delay on all // DQSs). pi_en_stg2_f_timing <= #TCQ 1'b1; pi_stg2_f_incdec_timing <= #TCQ cal1_dlyinc_cpt_r; end end else begin pi_en_stg2_f_timing <= #TCQ 'b0; pi_stg2_f_incdec_timing <= #TCQ 'b0; end end // registered for timing always @(posedge clk) begin pi_en_stg2_f <= #TCQ pi_en_stg2_f_timing; pi_stg2_f_incdec <= #TCQ pi_stg2_f_incdec_timing; end // This counter used to implement settling time between // Phaser_IN rank register loads to different DQSs always @(posedge clk) begin if (rst) done_cnt <= #TCQ 'b0; else if (((cal1_state_r == CAL1_REGL_LOAD) && (cal1_state_r1 == CAL1_NEXT_DQS)) || ((done_cnt == 4'd1) && (cal1_state_r != CAL1_DONE))) done_cnt <= #TCQ 4'b1010; else if (done_cnt > 'b0) done_cnt <= #TCQ done_cnt - 1; end // During rank register loading the rank count must be sent to // Phaser_IN via the phy_ctl_wd?? If so phy_init will have to // issue NOPs during rank register loading with the appropriate // rank count always @(posedge clk) begin if (rst || (regl_rank_done_r == 1'b1)) regl_rank_done_r <= #TCQ 1'b0; else if ((regl_dqs_cnt == DQS_WIDTH-1) && (regl_rank_cnt != RANKS-1) && (done_cnt == 4'd1)) regl_rank_done_r <= #TCQ 1'b1; end // Temp wire for timing. // The following in the always block below causes timing issues // due to DSP block inference // 6*regl_dqs_cnt. // replacing this with two left shifts + 1 left shift to avoid // DSP multiplier. assign regl_dqs_cnt_timing = {2'd0, regl_dqs_cnt}; // Load Phaser_OUT rank register with rdlvl delay value // for each DQS per rank. always @(posedge clk) begin if (rst || (done_cnt == 4'd0)) begin pi_stg2_load_timing <= #TCQ 'b0; pi_stg2_reg_l_timing <= #TCQ 'b0; end else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt <= DQS_WIDTH-1) && (done_cnt == 4'd1)) begin pi_stg2_load_timing <= #TCQ 'b1; pi_stg2_reg_l_timing <= #TCQ rdlvl_dqs_tap_cnt_r[rnk_cnt_r][regl_dqs_cnt]; end else begin pi_stg2_load_timing <= #TCQ 'b0; pi_stg2_reg_l_timing <= #TCQ 'b0; end end // registered for timing always @(posedge clk) begin pi_stg2_load <= #TCQ pi_stg2_load_timing; pi_stg2_reg_l <= #TCQ pi_stg2_reg_l_timing; end always @(posedge clk) begin if (rst || (done_cnt == 4'd0) || (mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2)) regl_rank_cnt <= #TCQ 2'b00; else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin if (regl_rank_cnt == RANKS-1) regl_rank_cnt <= #TCQ regl_rank_cnt; else regl_rank_cnt <= #TCQ regl_rank_cnt + 1; end end always @(posedge clk) begin if (rst || (done_cnt == 4'd0) || (mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2)) regl_dqs_cnt <= #TCQ {DQS_CNT_WIDTH+1{1'b0}}; else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1)) begin if (regl_rank_cnt == RANKS-1) regl_dqs_cnt <= #TCQ regl_dqs_cnt; else regl_dqs_cnt <= #TCQ 'b0; end else if ((cal1_state_r == CAL1_REGL_LOAD) && (regl_dqs_cnt != DQS_WIDTH-1) && (done_cnt == 4'd1)) regl_dqs_cnt <= #TCQ regl_dqs_cnt + 1; else regl_dqs_cnt <= #TCQ regl_dqs_cnt; end always @(posedge clk) regl_dqs_cnt_r <= #TCQ regl_dqs_cnt; //***************************************************************** // DQ Stage 1 CALIBRATION INCREMENT/DECREMENT LOGIC: // The actual IDELAY elements for each of the DQ bits is set via the // DLYVAL parallel load port. However, the stage 1 calibration // algorithm (well most of it) only needs to increment or decrement the DQ // IDELAY value by 1 at any one time. //***************************************************************** // Chip-select generation for each of the individual counters tracking // IDELAY tap values for each DQ generate for (z = 0; z < DQS_WIDTH; z = z + 1) begin: gen_dlyce_dq always @(posedge clk) if (rst) dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0; else if (SIM_CAL_OPTION == "SKIP_CAL") // If skipping calibration altogether (only for simulation), no // need to set DQ IODELAY values - they are hardcoded dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0; else if (SIM_CAL_OPTION == "FAST_CAL") begin // If fast calibration option (simulation only) selected, DQ // IODELAYs across all bytes are updated simultaneously // (although per-bit deskew within DQS[0] is still supported) for (h = 0; h < DRAM_WIDTH; h = h + 1) begin dlyce_dq_r[DRAM_WIDTH*z + h] <= #TCQ cal1_dlyce_dq_r; end end else if ((SIM_CAL_OPTION == "NONE") || (SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin if (cal1_cnt_cpt_r == z) begin for (g = 0; g < DRAM_WIDTH; g = g + 1) begin dlyce_dq_r[DRAM_WIDTH*z + g] <= #TCQ cal1_dlyce_dq_r; end end else dlyce_dq_r[DRAM_WIDTH*z+:DRAM_WIDTH] <= #TCQ 'b0; end end endgenerate // Also delay increment/decrement control to match delay on DLYCE always @(posedge clk) if (rst) dlyinc_dq_r <= #TCQ 1'b0; else dlyinc_dq_r <= #TCQ cal1_dlyinc_dq_r; // Each DQ has a counter associated with it to record current read-leveling // delay value always @(posedge clk) // Reset or skipping calibration all together if (rst | (SIM_CAL_OPTION == "SKIP_CAL")) begin for (aa = 0; aa < RANKS; aa = aa + 1) begin: rst_dlyval_dq_reg_r for (bb = 0; bb < DQ_WIDTH; bb = bb + 1) dlyval_dq_reg_r[aa][bb] <= #TCQ 'b0; end end else if (SIM_CAL_OPTION == "FAST_CAL") begin for (n = 0; n < RANKS; n = n + 1) begin: gen_dlyval_dq_reg_rnk for (r = 0; r < DQ_WIDTH; r = r + 1) begin: gen_dlyval_dq_reg if (dlyce_dq_r[r]) begin if (dlyinc_dq_r) dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] + 5'h01; else dlyval_dq_reg_r[n][r] <= #TCQ dlyval_dq_reg_r[n][r] - 5'h01; end end end end else begin if (dlyce_dq_r[cal1_cnt_cpt_r]) begin if (dlyinc_dq_r) dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] + 5'h01; else dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] <= #TCQ dlyval_dq_reg_r[rnk_cnt_r][cal1_cnt_cpt_r] - 5'h01; end end // Register for timing (help with logic placement) always @(posedge clk) begin for (cc = 0; cc < RANKS; cc = cc + 1) begin: dlyval_dq_assgn for (dd = 0; dd < DQ_WIDTH; dd = dd + 1) dlyval_dq[((5*dd)+(cc*DQ_WIDTH*5))+:5] <= #TCQ dlyval_dq_reg_r[cc][dd]; end end //*************************************************************************** // Generate signal used to delay calibration state machine - used when: // (1) IDELAY value changed // (2) RD_MUX_SEL value changed // Use when a delay is necessary to give the change time to propagate // through the data pipeline (through IDELAY and ISERDES, and fabric // pipeline stages) //*************************************************************************** // List all the stage 1 calibration wait states here. always @(posedge clk) if ((cal1_state_r == CAL1_NEW_DQS_WAIT) || (cal1_state_r == CAL1_MPR_NEW_DQS_WAIT) || (cal1_state_r == CAL1_NEW_DQS_PREWAIT) || (cal1_state_r == CAL1_VALID_WAIT) || (cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) || (cal1_state_r == CAL1_PB_INC_CPT_WAIT) || (cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT) || (cal1_state_r == CAL1_PB_INC_DQ_WAIT) || (cal1_state_r == CAL1_PB_DEC_CPT_WAIT) || (cal1_state_r == CAL1_IDEL_INC_CPT_WAIT) || (cal1_state_r == CAL1_IDEL_DEC_CPT_WAIT) || (cal1_state_r == CAL1_STORE_FIRST_WAIT) || (cal1_state_r == CAL1_DQ_IDEL_TAP_INC_WAIT) || (cal1_state_r == CAL1_DQ_IDEL_TAP_DEC_WAIT)) cal1_wait_cnt_en_r <= #TCQ 1'b1; else cal1_wait_cnt_en_r <= #TCQ 1'b0; always @(posedge clk) if (!cal1_wait_cnt_en_r) begin cal1_wait_cnt_r <= #TCQ 5'b00000; cal1_wait_r <= #TCQ 1'b1; end else begin if (cal1_wait_cnt_r != PIPE_WAIT_CNT - 1) begin cal1_wait_cnt_r <= #TCQ cal1_wait_cnt_r + 1; cal1_wait_r <= #TCQ 1'b1; end else begin // Need to reset to 0 to handle the case when there are two // different WAIT states back-to-back cal1_wait_cnt_r <= #TCQ 5'b00000; cal1_wait_r <= #TCQ 1'b0; end end //*************************************************************************** // generate request to PHY_INIT logic to issue precharged. Required when // calibration can take a long time (during which there are only constant // reads present on this bus). In this case need to issue perioidic // precharges to avoid tRAS violation. This signal must meet the following // requirements: (1) only transition from 0->1 when prech is first needed, // (2) stay at 1 and only transition 1->0 when RDLVL_PRECH_DONE asserted //*************************************************************************** always @(posedge clk) if (rst) rdlvl_prech_req <= #TCQ 1'b0; else rdlvl_prech_req <= #TCQ cal1_prech_req_r; //*************************************************************************** // Serial-to-parallel register to store last RDDATA_SHIFT_LEN cycles of // data from ISERDES. The value of this register is also stored, so that // previous and current values of the ISERDES data can be compared while // varying the IODELAY taps to see if an "edge" of the data valid window // has been encountered since the last IODELAY tap adjustment //*************************************************************************** //*************************************************************************** // Shift register to store last RDDATA_SHIFT_LEN cycles of data from ISERDES // NOTE: Written using discrete flops, but SRL can be used if the matching // logic does the comparison sequentially, rather than parallel //*************************************************************************** generate genvar rd_i; if (nCK_PER_CLK == 4) begin: gen_sr_div4 if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1 for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr always @(posedge clk) begin if (mux_rd_valid_r) begin sr_rise0_r[rd_i] <= #TCQ mux_rd_rise0_r[rd_i]; sr_fall0_r[rd_i] <= #TCQ mux_rd_fall0_r[rd_i]; sr_rise1_r[rd_i] <= #TCQ mux_rd_rise1_r[rd_i]; sr_fall1_r[rd_i] <= #TCQ mux_rd_fall1_r[rd_i]; sr_rise2_r[rd_i] <= #TCQ mux_rd_rise2_r[rd_i]; sr_fall2_r[rd_i] <= #TCQ mux_rd_fall2_r[rd_i]; sr_rise3_r[rd_i] <= #TCQ mux_rd_rise3_r[rd_i]; sr_fall3_r[rd_i] <= #TCQ mux_rd_fall3_r[rd_i]; end end end end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1 for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr always @(posedge clk) begin if (mux_rd_valid_r) begin sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_rise0_r[rd_i]}; sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_fall0_r[rd_i]}; sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_rise1_r[rd_i]}; sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_fall1_r[rd_i]}; sr_rise2_r[rd_i] <= #TCQ {sr_rise2_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_rise2_r[rd_i]}; sr_fall2_r[rd_i] <= #TCQ {sr_fall2_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_fall2_r[rd_i]}; sr_rise3_r[rd_i] <= #TCQ {sr_rise3_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_rise3_r[rd_i]}; sr_fall3_r[rd_i] <= #TCQ {sr_fall3_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_fall3_r[rd_i]}; end end end end end else if (nCK_PER_CLK == 2) begin: gen_sr_div2 if (RD_SHIFT_LEN == 1) begin: gen_sr_len_eq1 for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr always @(posedge clk) begin if (mux_rd_valid_r) begin sr_rise0_r[rd_i] <= #TCQ {mux_rd_rise0_r[rd_i]}; sr_fall0_r[rd_i] <= #TCQ {mux_rd_fall0_r[rd_i]}; sr_rise1_r[rd_i] <= #TCQ {mux_rd_rise1_r[rd_i]}; sr_fall1_r[rd_i] <= #TCQ {mux_rd_fall1_r[rd_i]}; end end end end else if (RD_SHIFT_LEN > 1) begin: gen_sr_len_gt1 for (rd_i = 0; rd_i < DRAM_WIDTH; rd_i = rd_i + 1) begin: gen_sr always @(posedge clk) begin if (mux_rd_valid_r) begin sr_rise0_r[rd_i] <= #TCQ {sr_rise0_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_rise0_r[rd_i]}; sr_fall0_r[rd_i] <= #TCQ {sr_fall0_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_fall0_r[rd_i]}; sr_rise1_r[rd_i] <= #TCQ {sr_rise1_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_rise1_r[rd_i]}; sr_fall1_r[rd_i] <= #TCQ {sr_fall1_r[rd_i][RD_SHIFT_LEN-2:0], mux_rd_fall1_r[rd_i]}; end end end end end endgenerate //*************************************************************************** // Conversion to pattern calibration //*************************************************************************** // Pattern for DQ IDELAY calibration //***************************************************************** // Expected data pattern when DQ shifted to the right such that // DQS before the left edge of the DVW: // Based on pattern of ({rise,fall}) = // 0x1, 0xB, 0x4, 0x4, 0xB, 0x9 // Each nibble will look like: // bit3: 0, 1, 0, 0, 1, 1 // bit2: 0, 0, 1, 1, 0, 0 // bit1: 0, 1, 0, 0, 1, 0 // bit0: 1, 1, 0, 0, 1, 1 // Or if the write is early it could look like: // 0x4, 0x4, 0xB, 0x9, 0x6, 0xE // bit3: 0, 0, 1, 1, 0, 1 // bit2: 1, 1, 0, 0, 1, 1 // bit1: 0, 0, 1, 0, 1, 1 // bit0: 0, 0, 1, 1, 0, 0 // Change the hard-coded pattern below accordingly as RD_SHIFT_LEN // and the actual training pattern contents change //***************************************************************** generate if (nCK_PER_CLK == 4) begin: gen_pat_div4 // Pattern for DQ IDELAY increment // Target pattern for "early write" assign {idel_pat0_rise0[3], idel_pat0_rise0[2], idel_pat0_rise0[1], idel_pat0_rise0[0]} = 4'h1; assign {idel_pat0_fall0[3], idel_pat0_fall0[2], idel_pat0_fall0[1], idel_pat0_fall0[0]} = 4'h7; assign {idel_pat0_rise1[3], idel_pat0_rise1[2], idel_pat0_rise1[1], idel_pat0_rise1[0]} = 4'hE; assign {idel_pat0_fall1[3], idel_pat0_fall1[2], idel_pat0_fall1[1], idel_pat0_fall1[0]} = 4'hC; assign {idel_pat0_rise2[3], idel_pat0_rise2[2], idel_pat0_rise2[1], idel_pat0_rise2[0]} = 4'h9; assign {idel_pat0_fall2[3], idel_pat0_fall2[2], idel_pat0_fall2[1], idel_pat0_fall2[0]} = 4'h2; assign {idel_pat0_rise3[3], idel_pat0_rise3[2], idel_pat0_rise3[1], idel_pat0_rise3[0]} = 4'h4; assign {idel_pat0_fall3[3], idel_pat0_fall3[2], idel_pat0_fall3[1], idel_pat0_fall3[0]} = 4'hB; // Target pattern for "on-time write" assign {idel_pat1_rise0[3], idel_pat1_rise0[2], idel_pat1_rise0[1], idel_pat1_rise0[0]} = 4'h4; assign {idel_pat1_fall0[3], idel_pat1_fall0[2], idel_pat1_fall0[1], idel_pat1_fall0[0]} = 4'h9; assign {idel_pat1_rise1[3], idel_pat1_rise1[2], idel_pat1_rise1[1], idel_pat1_rise1[0]} = 4'h3; assign {idel_pat1_fall1[3], idel_pat1_fall1[2], idel_pat1_fall1[1], idel_pat1_fall1[0]} = 4'h7; assign {idel_pat1_rise2[3], idel_pat1_rise2[2], idel_pat1_rise2[1], idel_pat1_rise2[0]} = 4'hE; assign {idel_pat1_fall2[3], idel_pat1_fall2[2], idel_pat1_fall2[1], idel_pat1_fall2[0]} = 4'hC; assign {idel_pat1_rise3[3], idel_pat1_rise3[2], idel_pat1_rise3[1], idel_pat1_rise3[0]} = 4'h9; assign {idel_pat1_fall3[3], idel_pat1_fall3[2], idel_pat1_fall3[1], idel_pat1_fall3[0]} = 4'h2; // Correct data valid window for "early write" assign {pat0_rise0[3], pat0_rise0[2], pat0_rise0[1], pat0_rise0[0]} = 4'h7; assign {pat0_fall0[3], pat0_fall0[2], pat0_fall0[1], pat0_fall0[0]} = 4'hE; assign {pat0_rise1[3], pat0_rise1[2], pat0_rise1[1], pat0_rise1[0]} = 4'hC; assign {pat0_fall1[3], pat0_fall1[2], pat0_fall1[1], pat0_fall1[0]} = 4'h9; assign {pat0_rise2[3], pat0_rise2[2], pat0_rise2[1], pat0_rise2[0]} = 4'h2; assign {pat0_fall2[3], pat0_fall2[2], pat0_fall2[1], pat0_fall2[0]} = 4'h4; assign {pat0_rise3[3], pat0_rise3[2], pat0_rise3[1], pat0_rise3[0]} = 4'hB; assign {pat0_fall3[3], pat0_fall3[2], pat0_fall3[1], pat0_fall3[0]} = 4'h1; // Correct data valid window for "on-time write" assign {pat1_rise0[3], pat1_rise0[2], pat1_rise0[1], pat1_rise0[0]} = 4'h9; assign {pat1_fall0[3], pat1_fall0[2], pat1_fall0[1], pat1_fall0[0]} = 4'h3; assign {pat1_rise1[3], pat1_rise1[2], pat1_rise1[1], pat1_rise1[0]} = 4'h7; assign {pat1_fall1[3], pat1_fall1[2], pat1_fall1[1], pat1_fall1[0]} = 4'hE; assign {pat1_rise2[3], pat1_rise2[2], pat1_rise2[1], pat1_rise2[0]} = 4'hC; assign {pat1_fall2[3], pat1_fall2[2], pat1_fall2[1], pat1_fall2[0]} = 4'h9; assign {pat1_rise3[3], pat1_rise3[2], pat1_rise3[1], pat1_rise3[0]} = 4'h2; assign {pat1_fall3[3], pat1_fall3[2], pat1_fall3[1], pat1_fall3[0]} = 4'h4; end else if (nCK_PER_CLK == 2) begin: gen_pat_div2 // Pattern for DQ IDELAY increment // Target pattern for "early write" assign idel_pat0_rise0[3] = 2'b01; assign idel_pat0_fall0[3] = 2'b00; assign idel_pat0_rise1[3] = 2'b10; assign idel_pat0_fall1[3] = 2'b11; assign idel_pat0_rise0[2] = 2'b00; assign idel_pat0_fall0[2] = 2'b10; assign idel_pat0_rise1[2] = 2'b11; assign idel_pat0_fall1[2] = 2'b10; assign idel_pat0_rise0[1] = 2'b00; assign idel_pat0_fall0[1] = 2'b11; assign idel_pat0_rise1[1] = 2'b10; assign idel_pat0_fall1[1] = 2'b01; assign idel_pat0_rise0[0] = 2'b11; assign idel_pat0_fall0[0] = 2'b10; assign idel_pat0_rise1[0] = 2'b00; assign idel_pat0_fall1[0] = 2'b01; // Target pattern for "on-time write" assign idel_pat1_rise0[3] = 2'b01; assign idel_pat1_fall0[3] = 2'b11; assign idel_pat1_rise1[3] = 2'b01; assign idel_pat1_fall1[3] = 2'b00; assign idel_pat1_rise0[2] = 2'b11; assign idel_pat1_fall0[2] = 2'b01; assign idel_pat1_rise1[2] = 2'b00; assign idel_pat1_fall1[2] = 2'b10; assign idel_pat1_rise0[1] = 2'b01; assign idel_pat1_fall0[1] = 2'b00; assign idel_pat1_rise1[1] = 2'b10; assign idel_pat1_fall1[1] = 2'b11; assign idel_pat1_rise0[0] = 2'b00; assign idel_pat1_fall0[0] = 2'b10; assign idel_pat1_rise1[0] = 2'b11; assign idel_pat1_fall1[0] = 2'b10; // Correct data valid window for "early write" assign pat0_rise0[3] = 2'b00; assign pat0_fall0[3] = 2'b10; assign pat0_rise1[3] = 2'b11; assign pat0_fall1[3] = 2'b10; assign pat0_rise0[2] = 2'b10; assign pat0_fall0[2] = 2'b11; assign pat0_rise1[2] = 2'b10; assign pat0_fall1[2] = 2'b00; assign pat0_rise0[1] = 2'b11; assign pat0_fall0[1] = 2'b10; assign pat0_rise1[1] = 2'b01; assign pat0_fall1[1] = 2'b00; assign pat0_rise0[0] = 2'b10; assign pat0_fall0[0] = 2'b00; assign pat0_rise1[0] = 2'b01; assign pat0_fall1[0] = 2'b11; // Correct data valid window for "on-time write" assign pat1_rise0[3] = 2'b11; assign pat1_fall0[3] = 2'b01; assign pat1_rise1[3] = 2'b00; assign pat1_fall1[3] = 2'b10; assign pat1_rise0[2] = 2'b01; assign pat1_fall0[2] = 2'b00; assign pat1_rise1[2] = 2'b10; assign pat1_fall1[2] = 2'b11; assign pat1_rise0[1] = 2'b00; assign pat1_fall0[1] = 2'b10; assign pat1_rise1[1] = 2'b11; assign pat1_fall1[1] = 2'b10; assign pat1_rise0[0] = 2'b10; assign pat1_fall0[0] = 2'b11; assign pat1_rise1[0] = 2'b10; assign pat1_fall1[0] = 2'b00; end endgenerate // Each bit of each byte is compared to expected pattern. // This was done to prevent (and "drastically decrease") the chance that // invalid data clocked in when the DQ bus is tri-state (along with a // combination of the correct data) will resemble the expected data // pattern. A better fix for this is to change the training pattern and/or // make the pattern longer. generate genvar pt_i; if (nCK_PER_CLK == 4) begin: gen_pat_match_div4 for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match // DQ IDELAY pattern detection always @(posedge clk) begin if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4]) idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4]) idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4]) idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4]) idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0; if (sr_rise2_r[pt_i] == idel_pat0_rise2[pt_i%4]) idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_rise2_r[pt_i] <= #TCQ 1'b0; if (sr_fall2_r[pt_i] == idel_pat0_fall2[pt_i%4]) idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_fall2_r[pt_i] <= #TCQ 1'b0; if (sr_rise3_r[pt_i] == idel_pat0_rise3[pt_i%4]) idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_rise3_r[pt_i] <= #TCQ 1'b0; if (sr_fall3_r[pt_i] == idel_pat0_fall3[pt_i%4]) idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_fall3_r[pt_i] <= #TCQ 1'b0; end always @(posedge clk) begin if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4]) idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4]) idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4]) idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4]) idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0; if (sr_rise2_r[pt_i] == idel_pat1_rise2[pt_i%4]) idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_rise2_r[pt_i] <= #TCQ 1'b0; if (sr_fall2_r[pt_i] == idel_pat1_fall2[pt_i%4]) idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_fall2_r[pt_i] <= #TCQ 1'b0; if (sr_rise3_r[pt_i] == idel_pat1_rise3[pt_i%4]) idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_rise3_r[pt_i] <= #TCQ 1'b0; if (sr_fall3_r[pt_i] == idel_pat1_fall3[pt_i%4]) idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_fall3_r[pt_i] <= #TCQ 1'b0; end // DQS DVW pattern detection always @(posedge clk) begin if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4]) pat0_match_rise0_r[pt_i] <= #TCQ 1'b1; else pat0_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4]) pat0_match_fall0_r[pt_i] <= #TCQ 1'b1; else pat0_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4]) pat0_match_rise1_r[pt_i] <= #TCQ 1'b1; else pat0_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4]) pat0_match_fall1_r[pt_i] <= #TCQ 1'b1; else pat0_match_fall1_r[pt_i] <= #TCQ 1'b0; if (sr_rise2_r[pt_i] == pat0_rise2[pt_i%4]) pat0_match_rise2_r[pt_i] <= #TCQ 1'b1; else pat0_match_rise2_r[pt_i] <= #TCQ 1'b0; if (sr_fall2_r[pt_i] == pat0_fall2[pt_i%4]) pat0_match_fall2_r[pt_i] <= #TCQ 1'b1; else pat0_match_fall2_r[pt_i] <= #TCQ 1'b0; if (sr_rise3_r[pt_i] == pat0_rise3[pt_i%4]) pat0_match_rise3_r[pt_i] <= #TCQ 1'b1; else pat0_match_rise3_r[pt_i] <= #TCQ 1'b0; if (sr_fall3_r[pt_i] == pat0_fall3[pt_i%4]) pat0_match_fall3_r[pt_i] <= #TCQ 1'b1; else pat0_match_fall3_r[pt_i] <= #TCQ 1'b0; end always @(posedge clk) begin if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4]) pat1_match_rise0_r[pt_i] <= #TCQ 1'b1; else pat1_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4]) pat1_match_fall0_r[pt_i] <= #TCQ 1'b1; else pat1_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4]) pat1_match_rise1_r[pt_i] <= #TCQ 1'b1; else pat1_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4]) pat1_match_fall1_r[pt_i] <= #TCQ 1'b1; else pat1_match_fall1_r[pt_i] <= #TCQ 1'b0; if (sr_rise2_r[pt_i] == pat1_rise2[pt_i%4]) pat1_match_rise2_r[pt_i] <= #TCQ 1'b1; else pat1_match_rise2_r[pt_i] <= #TCQ 1'b0; if (sr_fall2_r[pt_i] == pat1_fall2[pt_i%4]) pat1_match_fall2_r[pt_i] <= #TCQ 1'b1; else pat1_match_fall2_r[pt_i] <= #TCQ 1'b0; if (sr_rise3_r[pt_i] == pat1_rise3[pt_i%4]) pat1_match_rise3_r[pt_i] <= #TCQ 1'b1; else pat1_match_rise3_r[pt_i] <= #TCQ 1'b0; if (sr_fall3_r[pt_i] == pat1_fall3[pt_i%4]) pat1_match_fall3_r[pt_i] <= #TCQ 1'b1; else pat1_match_fall3_r[pt_i] <= #TCQ 1'b0; end end // Combine pattern match "subterms" for DQ-IDELAY stage always @(posedge clk) begin idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r; idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r; idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r; idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r; idel_pat0_match_rise2_and_r <= #TCQ &idel_pat0_match_rise2_r; idel_pat0_match_fall2_and_r <= #TCQ &idel_pat0_match_fall2_r; idel_pat0_match_rise3_and_r <= #TCQ &idel_pat0_match_rise3_r; idel_pat0_match_fall3_and_r <= #TCQ &idel_pat0_match_fall3_r; idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r && idel_pat0_match_fall0_and_r && idel_pat0_match_rise1_and_r && idel_pat0_match_fall1_and_r && idel_pat0_match_rise2_and_r && idel_pat0_match_fall2_and_r && idel_pat0_match_rise3_and_r && idel_pat0_match_fall3_and_r); end always @(posedge clk) begin idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r; idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r; idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r; idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r; idel_pat1_match_rise2_and_r <= #TCQ &idel_pat1_match_rise2_r; idel_pat1_match_fall2_and_r <= #TCQ &idel_pat1_match_fall2_r; idel_pat1_match_rise3_and_r <= #TCQ &idel_pat1_match_rise3_r; idel_pat1_match_fall3_and_r <= #TCQ &idel_pat1_match_fall3_r; idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r && idel_pat1_match_fall0_and_r && idel_pat1_match_rise1_and_r && idel_pat1_match_fall1_and_r && idel_pat1_match_rise2_and_r && idel_pat1_match_fall2_and_r && idel_pat1_match_rise3_and_r && idel_pat1_match_fall3_and_r); end always @(idel_pat0_data_match_r or idel_pat1_data_match_r) idel_pat_data_match <= #TCQ idel_pat0_data_match_r | idel_pat1_data_match_r; always @(posedge clk) idel_pat_data_match_r <= #TCQ idel_pat_data_match; // Combine pattern match "subterms" for DQS-PHASER_IN stage always @(posedge clk) begin pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r; pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r; pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r; pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r; pat0_match_rise2_and_r <= #TCQ &pat0_match_rise2_r; pat0_match_fall2_and_r <= #TCQ &pat0_match_fall2_r; pat0_match_rise3_and_r <= #TCQ &pat0_match_rise3_r; pat0_match_fall3_and_r <= #TCQ &pat0_match_fall3_r; pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r && pat0_match_fall0_and_r && pat0_match_rise1_and_r && pat0_match_fall1_and_r && pat0_match_rise2_and_r && pat0_match_fall2_and_r && pat0_match_rise3_and_r && pat0_match_fall3_and_r); end always @(posedge clk) begin pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r; pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r; pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r; pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r; pat1_match_rise2_and_r <= #TCQ &pat1_match_rise2_r; pat1_match_fall2_and_r <= #TCQ &pat1_match_fall2_r; pat1_match_rise3_and_r <= #TCQ &pat1_match_rise3_r; pat1_match_fall3_and_r <= #TCQ &pat1_match_fall3_r; pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r && pat1_match_fall0_and_r && pat1_match_rise1_and_r && pat1_match_fall1_and_r && pat1_match_rise2_and_r && pat1_match_fall2_and_r && pat1_match_rise3_and_r && pat1_match_fall3_and_r); end assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r; end else if (nCK_PER_CLK == 2) begin: gen_pat_match_div2 for (pt_i = 0; pt_i < DRAM_WIDTH; pt_i = pt_i + 1) begin: gen_pat_match // DQ IDELAY pattern detection always @(posedge clk) begin if (sr_rise0_r[pt_i] == idel_pat0_rise0[pt_i%4]) idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == idel_pat0_fall0[pt_i%4]) idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == idel_pat0_rise1[pt_i%4]) idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == idel_pat0_fall1[pt_i%4]) idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b1; else idel_pat0_match_fall1_r[pt_i] <= #TCQ 1'b0; end always @(posedge clk) begin if (sr_rise0_r[pt_i] == idel_pat1_rise0[pt_i%4]) idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == idel_pat1_fall0[pt_i%4]) idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == idel_pat1_rise1[pt_i%4]) idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == idel_pat1_fall1[pt_i%4]) idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b1; else idel_pat1_match_fall1_r[pt_i] <= #TCQ 1'b0; end // DQS DVW pattern detection always @(posedge clk) begin if (sr_rise0_r[pt_i] == pat0_rise0[pt_i%4]) pat0_match_rise0_r[pt_i] <= #TCQ 1'b1; else pat0_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == pat0_fall0[pt_i%4]) pat0_match_fall0_r[pt_i] <= #TCQ 1'b1; else pat0_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == pat0_rise1[pt_i%4]) pat0_match_rise1_r[pt_i] <= #TCQ 1'b1; else pat0_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == pat0_fall1[pt_i%4]) pat0_match_fall1_r[pt_i] <= #TCQ 1'b1; else pat0_match_fall1_r[pt_i] <= #TCQ 1'b0; end always @(posedge clk) begin if (sr_rise0_r[pt_i] == pat1_rise0[pt_i%4]) pat1_match_rise0_r[pt_i] <= #TCQ 1'b1; else pat1_match_rise0_r[pt_i] <= #TCQ 1'b0; if (sr_fall0_r[pt_i] == pat1_fall0[pt_i%4]) pat1_match_fall0_r[pt_i] <= #TCQ 1'b1; else pat1_match_fall0_r[pt_i] <= #TCQ 1'b0; if (sr_rise1_r[pt_i] == pat1_rise1[pt_i%4]) pat1_match_rise1_r[pt_i] <= #TCQ 1'b1; else pat1_match_rise1_r[pt_i] <= #TCQ 1'b0; if (sr_fall1_r[pt_i] == pat1_fall1[pt_i%4]) pat1_match_fall1_r[pt_i] <= #TCQ 1'b1; else pat1_match_fall1_r[pt_i] <= #TCQ 1'b0; end end // Combine pattern match "subterms" for DQ-IDELAY stage always @(posedge clk) begin idel_pat0_match_rise0_and_r <= #TCQ &idel_pat0_match_rise0_r; idel_pat0_match_fall0_and_r <= #TCQ &idel_pat0_match_fall0_r; idel_pat0_match_rise1_and_r <= #TCQ &idel_pat0_match_rise1_r; idel_pat0_match_fall1_and_r <= #TCQ &idel_pat0_match_fall1_r; idel_pat0_data_match_r <= #TCQ (idel_pat0_match_rise0_and_r && idel_pat0_match_fall0_and_r && idel_pat0_match_rise1_and_r && idel_pat0_match_fall1_and_r); end always @(posedge clk) begin idel_pat1_match_rise0_and_r <= #TCQ &idel_pat1_match_rise0_r; idel_pat1_match_fall0_and_r <= #TCQ &idel_pat1_match_fall0_r; idel_pat1_match_rise1_and_r <= #TCQ &idel_pat1_match_rise1_r; idel_pat1_match_fall1_and_r <= #TCQ &idel_pat1_match_fall1_r; idel_pat1_data_match_r <= #TCQ (idel_pat1_match_rise0_and_r && idel_pat1_match_fall0_and_r && idel_pat1_match_rise1_and_r && idel_pat1_match_fall1_and_r); end always @(posedge clk) begin if (sr_valid_r2) idel_pat_data_match <= #TCQ idel_pat0_data_match_r | idel_pat1_data_match_r; end //assign idel_pat_data_match = idel_pat0_data_match_r | // idel_pat1_data_match_r; always @(posedge clk) idel_pat_data_match_r <= #TCQ idel_pat_data_match; // Combine pattern match "subterms" for DQS-PHASER_IN stage always @(posedge clk) begin pat0_match_rise0_and_r <= #TCQ &pat0_match_rise0_r; pat0_match_fall0_and_r <= #TCQ &pat0_match_fall0_r; pat0_match_rise1_and_r <= #TCQ &pat0_match_rise1_r; pat0_match_fall1_and_r <= #TCQ &pat0_match_fall1_r; pat0_data_match_r <= #TCQ (pat0_match_rise0_and_r && pat0_match_fall0_and_r && pat0_match_rise1_and_r && pat0_match_fall1_and_r); end always @(posedge clk) begin pat1_match_rise0_and_r <= #TCQ &pat1_match_rise0_r; pat1_match_fall0_and_r <= #TCQ &pat1_match_fall0_r; pat1_match_rise1_and_r <= #TCQ &pat1_match_rise1_r; pat1_match_fall1_and_r <= #TCQ &pat1_match_fall1_r; pat1_data_match_r <= #TCQ (pat1_match_rise0_and_r && pat1_match_fall0_and_r && pat1_match_rise1_and_r && pat1_match_fall1_and_r); end assign pat_data_match_r = pat0_data_match_r | pat1_data_match_r; end endgenerate always @(posedge clk) begin rdlvl_stg1_start_r <= #TCQ rdlvl_stg1_start; mpr_rdlvl_done_r1 <= #TCQ mpr_rdlvl_done_r; mpr_rdlvl_done_r2 <= #TCQ mpr_rdlvl_done_r1; mpr_rdlvl_start_r <= #TCQ mpr_rdlvl_start; end //*************************************************************************** // First stage calibration: Capture clock //*************************************************************************** //***************************************************************** // Keep track of how many samples have been written to shift registers // Every time RD_SHIFT_LEN samples have been written, then we have a // full read training pattern loaded into the sr_* registers. Then assert // sr_valid_r to indicate that: (1) comparison between the sr_* and // old_sr_* and prev_sr_* registers can take place, (2) transfer of // the contents of sr_* to old_sr_* and prev_sr_* registers can also // take place //***************************************************************** always @(posedge clk) if (rst || (mpr_rdlvl_done_r && ~rdlvl_stg1_start)) begin cnt_shift_r <= #TCQ 'b1; sr_valid_r <= #TCQ 1'b0; mpr_valid_r <= #TCQ 1'b0; end else begin if (mux_rd_valid_r && mpr_rdlvl_start && ~mpr_rdlvl_done_r) begin if (cnt_shift_r == 'b0) mpr_valid_r <= #TCQ 1'b1; else begin mpr_valid_r <= #TCQ 1'b0; cnt_shift_r <= #TCQ cnt_shift_r + 1; end end else mpr_valid_r <= #TCQ 1'b0; if (mux_rd_valid_r && rdlvl_stg1_start) begin if (cnt_shift_r == RD_SHIFT_LEN-1) begin sr_valid_r <= #TCQ 1'b1; cnt_shift_r <= #TCQ 'b0; end else begin sr_valid_r <= #TCQ 1'b0; cnt_shift_r <= #TCQ cnt_shift_r + 1; end end else // When the current mux_rd_* contents are not valid, then // retain the current value of cnt_shift_r, and make sure // that sr_valid_r = 0 to prevent any downstream loads or // comparisons sr_valid_r <= #TCQ 1'b0; end //***************************************************************** // Logic to determine when either edge of the data eye encountered // Pre- and post-IDELAY update data pattern is compared, if they // differ, than an edge has been encountered. Currently no attempt // made to determine if the data pattern itself is "correct", only // whether it changes after incrementing the IDELAY (possible // future enhancement) //***************************************************************** // One-way control for ensuring that state machine request to store // current read data into OLD SR shift register only occurs on a // valid clock cycle. The FSM provides a one-cycle request pulse. // It is the responsibility of the FSM to wait the worst-case time // before relying on any downstream results of this load. always @(posedge clk) if (rst) store_sr_r <= #TCQ 1'b0; else begin if (store_sr_req_r) store_sr_r <= #TCQ 1'b1; else if ((sr_valid_r || mpr_valid_r) && store_sr_r) store_sr_r <= #TCQ 1'b0; end // Transfer current data to old data, prior to incrementing delay // Also store data from current sampling window - so that we can detect // if the current delay tap yields data that is "jittery" generate if (nCK_PER_CLK == 4) begin: gen_old_sr_div4 for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr always @(posedge clk) begin if (sr_valid_r || mpr_valid_r) begin // Load last sample (i.e. from current sampling interval) prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z]; prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z]; prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z]; prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z]; prev_sr_rise2_r[z] <= #TCQ sr_rise2_r[z]; prev_sr_fall2_r[z] <= #TCQ sr_fall2_r[z]; prev_sr_rise3_r[z] <= #TCQ sr_rise3_r[z]; prev_sr_fall3_r[z] <= #TCQ sr_fall3_r[z]; end if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z]; old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z]; old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z]; old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z]; old_sr_rise2_r[z] <= #TCQ sr_rise2_r[z]; old_sr_fall2_r[z] <= #TCQ sr_fall2_r[z]; old_sr_rise3_r[z] <= #TCQ sr_rise3_r[z]; old_sr_fall3_r[z] <= #TCQ sr_fall3_r[z]; end end end end else if (nCK_PER_CLK == 2) begin: gen_old_sr_div2 for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_old_sr always @(posedge clk) begin if (sr_valid_r || mpr_valid_r) begin prev_sr_rise0_r[z] <= #TCQ sr_rise0_r[z]; prev_sr_fall0_r[z] <= #TCQ sr_fall0_r[z]; prev_sr_rise1_r[z] <= #TCQ sr_rise1_r[z]; prev_sr_fall1_r[z] <= #TCQ sr_fall1_r[z]; end if ((sr_valid_r || mpr_valid_r) && store_sr_r) begin old_sr_rise0_r[z] <= #TCQ sr_rise0_r[z]; old_sr_fall0_r[z] <= #TCQ sr_fall0_r[z]; old_sr_rise1_r[z] <= #TCQ sr_rise1_r[z]; old_sr_fall1_r[z] <= #TCQ sr_fall1_r[z]; end end end end endgenerate //******************************************************* // Match determination occurs over 3 cycles - pipelined for better timing //******************************************************* // Match valid with # of cycles of pipelining in match determination always @(posedge clk) begin sr_valid_r1 <= #TCQ sr_valid_r; sr_valid_r2 <= #TCQ sr_valid_r1; mpr_valid_r1 <= #TCQ mpr_valid_r; mpr_valid_r2 <= #TCQ mpr_valid_r1; end generate if (nCK_PER_CLK == 4) begin: gen_sr_match_div4 for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match always @(posedge clk) begin // CYCLE1: Compare all bits in DQS grp, generate separate term for // each bit over four bit times. For example, if there are 8-bits // per DQS group, 32 terms are generated on cycle 1 // NOTE: Structure HDL such that X on data bus will result in a // mismatch. This is required for memory models that can drive the // bus with X's to model uncertainty regions (e.g. Denali) if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z])) old_sr_match_rise0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z]; else old_sr_match_rise0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z])) old_sr_match_fall0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z]; else old_sr_match_fall0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z])) old_sr_match_rise1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z]; else old_sr_match_rise1_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z])) old_sr_match_fall1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z]; else old_sr_match_fall1_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == old_sr_rise2_r[z])) old_sr_match_rise2_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_rise2_r[z] <= #TCQ old_sr_match_rise2_r[z]; else old_sr_match_rise2_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == old_sr_fall2_r[z])) old_sr_match_fall2_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_fall2_r[z] <= #TCQ old_sr_match_fall2_r[z]; else old_sr_match_fall2_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == old_sr_rise3_r[z])) old_sr_match_rise3_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_rise3_r[z] <= #TCQ old_sr_match_rise3_r[z]; else old_sr_match_rise3_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == old_sr_fall3_r[z])) old_sr_match_fall3_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_fall3_r[z] <= #TCQ old_sr_match_fall3_r[z]; else old_sr_match_fall3_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z])) prev_sr_match_rise0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z]; else prev_sr_match_rise0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z])) prev_sr_match_fall0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z]; else prev_sr_match_fall0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z])) prev_sr_match_rise1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z]; else prev_sr_match_rise1_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z])) prev_sr_match_fall1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z]; else prev_sr_match_fall1_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise2_r[z] == prev_sr_rise2_r[z])) prev_sr_match_rise2_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_rise2_r[z] <= #TCQ prev_sr_match_rise2_r[z]; else prev_sr_match_rise2_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall2_r[z] == prev_sr_fall2_r[z])) prev_sr_match_fall2_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_fall2_r[z] <= #TCQ prev_sr_match_fall2_r[z]; else prev_sr_match_fall2_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise3_r[z] == prev_sr_rise3_r[z])) prev_sr_match_rise3_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_rise3_r[z] <= #TCQ prev_sr_match_rise3_r[z]; else prev_sr_match_rise3_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall3_r[z] == prev_sr_fall3_r[z])) prev_sr_match_fall3_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_fall3_r[z] <= #TCQ prev_sr_match_fall3_r[z]; else prev_sr_match_fall3_r[z] <= #TCQ 1'b0; // CYCLE2: Combine all the comparisons for every 8 words (rise0, // fall0,rise1, fall1) in the calibration sequence. Now we're down // to DRAM_WIDTH terms old_sr_match_cyc2_r[z] <= #TCQ old_sr_match_rise0_r[z] & old_sr_match_fall0_r[z] & old_sr_match_rise1_r[z] & old_sr_match_fall1_r[z] & old_sr_match_rise2_r[z] & old_sr_match_fall2_r[z] & old_sr_match_rise3_r[z] & old_sr_match_fall3_r[z]; prev_sr_match_cyc2_r[z] <= #TCQ prev_sr_match_rise0_r[z] & prev_sr_match_fall0_r[z] & prev_sr_match_rise1_r[z] & prev_sr_match_fall1_r[z] & prev_sr_match_rise2_r[z] & prev_sr_match_fall2_r[z] & prev_sr_match_rise3_r[z] & prev_sr_match_fall3_r[z]; // CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen), // and qualify with pipelined valid signal) - probably don't need // a cycle just do do this.... if (sr_valid_r2 || mpr_valid_r2) begin old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z]; prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z]; end else begin old_sr_diff_r[z] <= #TCQ 'b0; prev_sr_diff_r[z] <= #TCQ 'b0; end end end end if (nCK_PER_CLK == 2) begin: gen_sr_match_div2 for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_sr_match always @(posedge clk) begin if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == old_sr_rise0_r[z])) old_sr_match_rise0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_rise0_r[z] <= #TCQ old_sr_match_rise0_r[z]; else old_sr_match_rise0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == old_sr_fall0_r[z])) old_sr_match_fall0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_fall0_r[z] <= #TCQ old_sr_match_fall0_r[z]; else old_sr_match_fall0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == old_sr_rise1_r[z])) old_sr_match_rise1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_rise1_r[z] <= #TCQ old_sr_match_rise1_r[z]; else old_sr_match_rise1_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == old_sr_fall1_r[z])) old_sr_match_fall1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) old_sr_match_fall1_r[z] <= #TCQ old_sr_match_fall1_r[z]; else old_sr_match_fall1_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise0_r[z] == prev_sr_rise0_r[z])) prev_sr_match_rise0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_rise0_r[z] <= #TCQ prev_sr_match_rise0_r[z]; else prev_sr_match_rise0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall0_r[z] == prev_sr_fall0_r[z])) prev_sr_match_fall0_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_fall0_r[z] <= #TCQ prev_sr_match_fall0_r[z]; else prev_sr_match_fall0_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_rise1_r[z] == prev_sr_rise1_r[z])) prev_sr_match_rise1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_rise1_r[z] <= #TCQ prev_sr_match_rise1_r[z]; else prev_sr_match_rise1_r[z] <= #TCQ 1'b0; if ((pat_data_match_r || mpr_valid_r1) && (sr_fall1_r[z] == prev_sr_fall1_r[z])) prev_sr_match_fall1_r[z] <= #TCQ 1'b1; else if (~mpr_valid_r1 && mpr_rdlvl_start && ~mpr_rdlvl_done_r) prev_sr_match_fall1_r[z] <= #TCQ prev_sr_match_fall1_r[z]; else prev_sr_match_fall1_r[z] <= #TCQ 1'b0; old_sr_match_cyc2_r[z] <= #TCQ old_sr_match_rise0_r[z] & old_sr_match_fall0_r[z] & old_sr_match_rise1_r[z] & old_sr_match_fall1_r[z]; prev_sr_match_cyc2_r[z] <= #TCQ prev_sr_match_rise0_r[z] & prev_sr_match_fall0_r[z] & prev_sr_match_rise1_r[z] & prev_sr_match_fall1_r[z]; // CYCLE3: Invert value (i.e. assert when DIFFERENCE in value seen), // and qualify with pipelined valid signal) - probably don't need // a cycle just do do this.... if (sr_valid_r2 || mpr_valid_r2) begin old_sr_diff_r[z] <= #TCQ ~old_sr_match_cyc2_r[z]; prev_sr_diff_r[z] <= #TCQ ~prev_sr_match_cyc2_r[z]; end else begin old_sr_diff_r[z] <= #TCQ 'b0; prev_sr_diff_r[z] <= #TCQ 'b0; end end end end endgenerate //*************************************************************************** // First stage calibration: DQS Capture //*************************************************************************** //******************************************************* // Counters for tracking # of samples compared // For each comparision point (i.e. to determine if an edge has // occurred after each IODELAY increment when read leveling), // multiple samples are compared in order to average out the effects // of jitter. If any one of these samples is different than the "old" // sample corresponding to the previous IODELAY value, then an edge // is declared to be detected. //******************************************************* // Two cascaded counters are used to keep track of # of samples compared, // in order to make it easier to meeting timing on these paths. Once // optimal sampling interval is determined, it may be possible to remove // the second counter always @(posedge clk) samp_edge_cnt0_en_r <= #TCQ (cal1_state_r == CAL1_PAT_DETECT) || (cal1_state_r == CAL1_DETECT_EDGE) || (cal1_state_r == CAL1_PB_DETECT_EDGE) || (cal1_state_r == CAL1_PB_DETECT_EDGE_DQ); // First counter counts # of samples compared always @(posedge clk) if (rst) samp_edge_cnt0_r <= #TCQ 'b0; else begin if (!samp_edge_cnt0_en_r) // Reset sample counter when not in any of the "sampling" states samp_edge_cnt0_r <= #TCQ 'b0; else if (sr_valid_r2 || mpr_valid_r2) // Otherwise, count # of samples compared samp_edge_cnt0_r <= #TCQ samp_edge_cnt0_r + 1; end // Counter #2 enable generation always @(posedge clk) if (rst) samp_edge_cnt1_en_r <= #TCQ 1'b0; else begin // Assert pulse when correct number of samples compared if ((samp_edge_cnt0_r == DETECT_EDGE_SAMPLE_CNT0) && (sr_valid_r2 || mpr_valid_r2)) samp_edge_cnt1_en_r <= #TCQ 1'b1; else samp_edge_cnt1_en_r <= #TCQ 1'b0; end // Counter #2 always @(posedge clk) if (rst) samp_edge_cnt1_r <= #TCQ 'b0; else if (!samp_edge_cnt0_en_r) samp_edge_cnt1_r <= #TCQ 'b0; else if (samp_edge_cnt1_en_r) samp_edge_cnt1_r <= #TCQ samp_edge_cnt1_r + 1; always @(posedge clk) if (rst) samp_cnt_done_r <= #TCQ 1'b0; else begin if (!samp_edge_cnt0_en_r) samp_cnt_done_r <= #TCQ 'b0; else if ((SIM_CAL_OPTION == "FAST_CAL") || (SIM_CAL_OPTION == "FAST_WIN_DETECT")) begin if (samp_edge_cnt0_r == SR_VALID_DELAY-1) // For simulation only, stay in edge detection mode a minimum // amount of time - just enough for two data compares to finish samp_cnt_done_r <= #TCQ 1'b1; end else begin if (samp_edge_cnt1_r == DETECT_EDGE_SAMPLE_CNT1) samp_cnt_done_r <= #TCQ 1'b1; end end //***************************************************************** // Logic to keep track of (on per-bit basis): // 1. When a region of stability preceded by a known edge occurs // 2. If for the current tap, the read data jitters // 3. If an edge occured between the current and previous tap // 4. When the current edge detection/sampling interval can end // Essentially, these are a series of status bits - the stage 1 // calibration FSM monitors these to determine when an edge is // found. Additional information is provided to help the FSM // determine if a left or right edge has been found. //**************************************************************** assign pb_detect_edge_setup = (cal1_state_r == CAL1_STORE_FIRST_WAIT) || (cal1_state_r == CAL1_PB_STORE_FIRST_WAIT) || (cal1_state_r == CAL1_PB_DEC_CPT_LEFT_WAIT); assign pb_detect_edge = (cal1_state_r == CAL1_PAT_DETECT) || (cal1_state_r == CAL1_DETECT_EDGE) || (cal1_state_r == CAL1_PB_DETECT_EDGE) || (cal1_state_r == CAL1_PB_DETECT_EDGE_DQ); generate for (z = 0; z < DRAM_WIDTH; z = z + 1) begin: gen_track_left_edge always @(posedge clk) begin if (pb_detect_edge_setup) begin // Reset eye size, stable eye marker, and jitter marker before // starting new edge detection iteration pb_cnt_eye_size_r[z] <= #TCQ 5'd0; pb_detect_edge_done_r[z] <= #TCQ 1'b0; pb_found_stable_eye_r[z] <= #TCQ 1'b0; pb_last_tap_jitter_r[z] <= #TCQ 1'b0; pb_found_edge_last_r[z] <= #TCQ 1'b0; pb_found_edge_r[z] <= #TCQ 1'b0; pb_found_first_edge_r[z] <= #TCQ 1'b0; end else if (pb_detect_edge) begin // Save information on which DQ bits are already out of the // data valid window - those DQ bits will later not have their // IDELAY tap value incremented pb_found_edge_last_r[z] <= #TCQ pb_found_edge_r[z]; if (!pb_detect_edge_done_r[z]) begin if (samp_cnt_done_r) begin // If we've reached end of sampling interval, no jitter on // current tap has been found (although an edge could have // been found between the current and previous taps), and // the sampling interval is complete. Increment the stable // eye counter if no edge found, and always clear the jitter // flag in preparation for the next tap. pb_last_tap_jitter_r[z] <= #TCQ 1'b0; pb_detect_edge_done_r[z] <= #TCQ 1'b1; if (!pb_found_edge_r[z] && !pb_last_tap_jitter_r[z]) begin // If the data was completely stable during this tap and // no edge was found between this and the previous tap // then increment the stable eye counter "as appropriate" if (pb_cnt_eye_size_r[z] != MIN_EYE_SIZE-1) pb_cnt_eye_size_r[z] <= #TCQ pb_cnt_eye_size_r[z] + 1; else //if (pb_found_first_edge_r[z]) // We've reached minimum stable eye width pb_found_stable_eye_r[z] <= #TCQ 1'b1; end else begin // Otherwise, an edge was found, either because of a // difference between this and the previous tap's read // data, and/or because the previous tap's data jittered // (but not the current tap's data), then just set the // edge found flag, and enable the stable eye counter pb_cnt_eye_size_r[z] <= #TCQ 5'd0; pb_found_stable_eye_r[z] <= #TCQ 1'b0; pb_found_edge_r[z] <= #TCQ 1'b1; pb_detect_edge_done_r[z] <= #TCQ 1'b1; end end else if (prev_sr_diff_r[z]) begin // If we find that the current tap read data jitters, then // set edge and jitter found flags, "enable" the eye size // counter, and stop sampling interval for this bit pb_cnt_eye_size_r[z] <= #TCQ 5'd0; pb_found_stable_eye_r[z] <= #TCQ 1'b0; pb_last_tap_jitter_r[z] <= #TCQ 1'b1; pb_found_edge_r[z] <= #TCQ 1'b1; pb_found_first_edge_r[z] <= #TCQ 1'b1; pb_detect_edge_done_r[z] <= #TCQ 1'b1; end else if (old_sr_diff_r[z] || pb_last_tap_jitter_r[z]) begin // If either an edge was found (i.e. difference between // current tap and previous tap read data), or the previous // tap exhibited jitter (which means by definition that the // current tap cannot match the previous tap because the // previous tap gave unstable data), then set the edge found // flag, and "enable" eye size counter. But do not stop // sampling interval - we still need to check if the current // tap exhibits jitter pb_cnt_eye_size_r[z] <= #TCQ 5'd0; pb_found_stable_eye_r[z] <= #TCQ 1'b0; pb_found_edge_r[z] <= #TCQ 1'b1; pb_found_first_edge_r[z] <= #TCQ 1'b1; end end end else begin // Before every edge detection interval, reset "intra-tap" flags pb_found_edge_r[z] <= #TCQ 1'b0; pb_detect_edge_done_r[z] <= #TCQ 1'b0; end end end endgenerate // Combine the above per-bit status flags into combined terms when // performing deskew on the aggregate data window always @(posedge clk) begin detect_edge_done_r <= #TCQ &pb_detect_edge_done_r; found_edge_r <= #TCQ |pb_found_edge_r; found_edge_all_r <= #TCQ &pb_found_edge_r; found_stable_eye_r <= #TCQ &pb_found_stable_eye_r; end // last IODELAY "stable eye" indicator is updated only after // detect_edge_done_r is asserted - so that when we do find the "right edge" // of the data valid window, found_edge_r = 1, AND found_stable_eye_r = 1 // when detect_edge_done_r = 1 (otherwise, if found_stable_eye_r updates // immediately, then it never possible to have found_stable_eye_r = 1 // when we detect an edge - and we'll never know whether we've found // a "right edge") always @(posedge clk) if (pb_detect_edge_setup) found_stable_eye_last_r <= #TCQ 1'b0; else if (detect_edge_done_r) found_stable_eye_last_r <= #TCQ found_stable_eye_r; //***************************************************************** // Keep track of DQ IDELAYE2 taps used //***************************************************************** // Added additional register stage to improve timing always @(posedge clk) if (rst) idelay_tap_cnt_slice_r <= 5'h0; else idelay_tap_cnt_slice_r <= idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]; always @(posedge clk) if (rst || (SIM_CAL_OPTION == "SKIP_CAL")) begin //|| new_cnt_cpt_r for (s = 0; s < RANKS; s = s + 1) begin for (t = 0; t < DQS_WIDTH; t = t + 1) begin idelay_tap_cnt_r[s][t] <= #TCQ idelaye2_init_val; end end end else if (SIM_CAL_OPTION == "FAST_CAL") begin for (u = 0; u < RANKS; u = u + 1) begin for (w = 0; w < DQS_WIDTH; w = w + 1) begin if (cal1_dq_idel_ce) begin if (cal1_dq_idel_inc) idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] + 1; else idelay_tap_cnt_r[u][w] <= #TCQ idelay_tap_cnt_r[u][w] - 1; end end end end else if ((rnk_cnt_r == RANKS-1) && (RANKS == 2) && rdlvl_rank_done_r && (cal1_state_r == CAL1_IDLE)) begin for (f = 0; f < DQS_WIDTH; f = f + 1) begin idelay_tap_cnt_r[rnk_cnt_r][f] <= #TCQ idelay_tap_cnt_r[(rnk_cnt_r-1)][f]; end end else if (cal1_dq_idel_ce) begin if (cal1_dq_idel_inc) idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r + 5'h1; else idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing] <= #TCQ idelay_tap_cnt_slice_r - 5'h1; end else if (idelay_ld) idelay_tap_cnt_r[0][wrcal_cnt] <= #TCQ 5'b00000; always @(posedge clk) if (rst || new_cnt_cpt_r) idelay_tap_limit_r <= #TCQ 1'b0; else if (idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_r] == 'd31) idelay_tap_limit_r <= #TCQ 1'b1; //***************************************************************** // keep track of edge tap counts found, and current capture clock // tap count //***************************************************************** always @(posedge clk) if (rst || new_cnt_cpt_r || (mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2)) tap_cnt_cpt_r <= #TCQ 'b0; else if (cal1_dlyce_cpt_r) begin if (cal1_dlyinc_cpt_r) tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r + 1; else if (tap_cnt_cpt_r != 'd0) tap_cnt_cpt_r <= #TCQ tap_cnt_cpt_r - 1; end always @(posedge clk) if (rst || new_cnt_cpt_r || (cal1_state_r1 == CAL1_DQ_IDEL_TAP_INC) || (mpr_rdlvl_done_r1 && ~mpr_rdlvl_done_r2)) tap_limit_cpt_r <= #TCQ 1'b0; else if (tap_cnt_cpt_r == 6'd63) tap_limit_cpt_r <= #TCQ 1'b1; always @(posedge clk) cal1_cnt_cpt_timing_r <= #TCQ cal1_cnt_cpt_r; assign cal1_cnt_cpt_timing = {2'b00, cal1_cnt_cpt_r}; // Storing DQS tap values at the end of each DQS read leveling always @(posedge clk) begin if (rst) begin for (a = 0; a < RANKS; a = a + 1) begin: rst_rdlvl_dqs_tap_count_loop for (b = 0; b < DQS_WIDTH; b = b + 1) rdlvl_dqs_tap_cnt_r[a][b] <= #TCQ 'b0; end end else if ((SIM_CAL_OPTION == "FAST_CAL") & (cal1_state_r1 == CAL1_NEXT_DQS)) begin for (p = 0; p < RANKS; p = p +1) begin: rdlvl_dqs_tap_rank_cnt for(q = 0; q < DQS_WIDTH; q = q +1) begin: rdlvl_dqs_tap_cnt rdlvl_dqs_tap_cnt_r[p][q] <= #TCQ tap_cnt_cpt_r; end end end else if (SIM_CAL_OPTION == "SKIP_CAL") begin for (j = 0; j < RANKS; j = j +1) begin: rdlvl_dqs_tap_rnk_cnt for(i = 0; i < DQS_WIDTH; i = i +1) begin: rdlvl_dqs_cnt rdlvl_dqs_tap_cnt_r[j][i] <= #TCQ 6'd31; end end end else if (cal1_state_r1 == CAL1_NEXT_DQS) begin rdlvl_dqs_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing_r] <= #TCQ tap_cnt_cpt_r; end end // Counter to track maximum DQ IODELAY tap usage during the per-bit // deskew portion of stage 1 calibration always @(posedge clk) if (rst) begin idel_tap_cnt_dq_pb_r <= #TCQ 'b0; idel_tap_limit_dq_pb_r <= #TCQ 1'b0; end else if (new_cnt_cpt_r) begin idel_tap_cnt_dq_pb_r <= #TCQ 'b0; idel_tap_limit_dq_pb_r <= #TCQ 1'b0; end else if (|cal1_dlyce_dq_r) begin if (cal1_dlyinc_dq_r) idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r + 1; else idel_tap_cnt_dq_pb_r <= #TCQ idel_tap_cnt_dq_pb_r - 1; if (idel_tap_cnt_dq_pb_r == 31) idel_tap_limit_dq_pb_r <= #TCQ 1'b1; else idel_tap_limit_dq_pb_r <= #TCQ 1'b0; end //***************************************************************** always @(posedge clk) cal1_state_r1 <= #TCQ cal1_state_r; always @(posedge clk) if (rst) begin cal1_cnt_cpt_r <= #TCQ 'b0; cal1_dlyce_cpt_r <= #TCQ 1'b0; cal1_dlyinc_cpt_r <= #TCQ 1'b0; cal1_dq_idel_ce <= #TCQ 1'b0; cal1_dq_idel_inc <= #TCQ 1'b0; cal1_prech_req_r <= #TCQ 1'b0; cal1_state_r <= #TCQ CAL1_IDLE; cnt_idel_dec_cpt_r <= #TCQ 6'bxxxxxx; found_first_edge_r <= #TCQ 1'b0; found_second_edge_r <= #TCQ 1'b0; right_edge_taps_r <= #TCQ 6'bxxxxxx; first_edge_taps_r <= #TCQ 6'bxxxxxx; new_cnt_cpt_r <= #TCQ 1'b0; rdlvl_stg1_done <= #TCQ 1'b0; rdlvl_stg1_err <= #TCQ 1'b0; second_edge_taps_r <= #TCQ 6'bxxxxxx; store_sr_req_pulsed_r <= #TCQ 1'b0; store_sr_req_r <= #TCQ 1'b0; rnk_cnt_r <= #TCQ 2'b00; rdlvl_rank_done_r <= #TCQ 1'b0; idel_dec_cnt <= #TCQ 'd0; rdlvl_last_byte_done <= #TCQ 1'b0; idel_pat_detect_valid_r <= #TCQ 1'b0; mpr_rank_done_r <= #TCQ 1'b0; mpr_last_byte_done <= #TCQ 1'b0; if (OCAL_EN == "ON") mpr_rdlvl_done_r <= #TCQ 1'b0; else mpr_rdlvl_done_r <= #TCQ 1'b1; mpr_dec_cpt_r <= #TCQ 1'b0; end else begin // default (inactive) states for all "pulse" outputs cal1_prech_req_r <= #TCQ 1'b0; cal1_dlyce_cpt_r <= #TCQ 1'b0; cal1_dlyinc_cpt_r <= #TCQ 1'b0; cal1_dq_idel_ce <= #TCQ 1'b0; cal1_dq_idel_inc <= #TCQ 1'b0; new_cnt_cpt_r <= #TCQ 1'b0; store_sr_req_pulsed_r <= #TCQ 1'b0; store_sr_req_r <= #TCQ 1'b0; case (cal1_state_r) CAL1_IDLE: begin rdlvl_rank_done_r <= #TCQ 1'b0; rdlvl_last_byte_done <= #TCQ 1'b0; mpr_rank_done_r <= #TCQ 1'b0; mpr_last_byte_done <= #TCQ 1'b0; if (mpr_rdlvl_start && ~mpr_rdlvl_start_r) begin cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT; end else if (rdlvl_stg1_start && ~rdlvl_stg1_start_r) begin if (SIM_CAL_OPTION == "SKIP_CAL") cal1_state_r <= #TCQ CAL1_REGL_LOAD; else if (SIM_CAL_OPTION == "FAST_CAL") cal1_state_r <= #TCQ CAL1_NEXT_DQS; else begin new_cnt_cpt_r <= #TCQ 1'b1; cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT; end end end CAL1_MPR_NEW_DQS_WAIT: begin cal1_prech_req_r <= #TCQ 1'b0; if (!cal1_wait_r && mpr_valid_r) cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT; end // Wait for the new DQS group to change // also gives time for the read data IN_FIFO to // output the updated data for the new DQS group CAL1_NEW_DQS_WAIT: begin rdlvl_rank_done_r <= #TCQ 1'b0; rdlvl_last_byte_done <= #TCQ 1'b0; mpr_rank_done_r <= #TCQ 1'b0; mpr_last_byte_done <= #TCQ 1'b0; cal1_prech_req_r <= #TCQ 1'b0; if (|pi_counter_read_val) begin //VK_REVIEW mpr_dec_cpt_r <= #TCQ 1'b1; cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT; cnt_idel_dec_cpt_r <= #TCQ pi_counter_read_val; end else if (!cal1_wait_r) begin //if (!cal1_wait_r) begin // Store "previous tap" read data. Technically there is no // "previous" read data, since we are starting a new DQS // group, so we'll never find an edge at tap 0 unless the // data is fluctuating/jittering store_sr_req_r <= #TCQ 1'b1; // If per-bit deskew is disabled, then skip the first // portion of stage 1 calibration if (PER_BIT_DESKEW == "OFF") cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT; else if (PER_BIT_DESKEW == "ON") cal1_state_r <= #TCQ CAL1_PB_STORE_FIRST_WAIT; end end //***************************************************************** // Per-bit deskew states //***************************************************************** // Wait state following storage of initial read data CAL1_PB_STORE_FIRST_WAIT: if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE; // Look for an edge on all DQ bits in current DQS group CAL1_PB_DETECT_EDGE: if (detect_edge_done_r) begin if (found_stable_eye_r) begin // If we've found the left edge for all bits (or more precisely, // we've found the left edge, and then part of the stable // window thereafter), then proceed to positioning the CPT clock // right before the left margin cnt_idel_dec_cpt_r <= #TCQ MIN_EYE_SIZE + 1; cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT; end else begin // If we've reached the end of the sampling time, and haven't // yet found the left margin of all the DQ bits, then: if (!tap_limit_cpt_r) begin // If we still have taps left to use, then store current value // of read data, increment the capture clock, and continue to // look for (left) edges store_sr_req_r <= #TCQ 1'b1; cal1_state_r <= #TCQ CAL1_PB_INC_CPT; end else begin // If we ran out of taps moving the capture clock, and we // haven't finished edge detection, then reset the capture // clock taps to 0 (gradually, one tap at a time... // then exit the per-bit portion of the algorithm - // i.e. proceed to adjust the capture clock and DQ IODELAYs as cnt_idel_dec_cpt_r <= #TCQ 6'd63; cal1_state_r <= #TCQ CAL1_PB_DEC_CPT; end end end // Increment delay for DQS CAL1_PB_INC_CPT: begin cal1_dlyce_cpt_r <= #TCQ 1'b1; cal1_dlyinc_cpt_r <= #TCQ 1'b1; cal1_state_r <= #TCQ CAL1_PB_INC_CPT_WAIT; end // Wait for IODELAY for both capture and internal nodes within // ISERDES to settle, before checking again for an edge CAL1_PB_INC_CPT_WAIT: begin cal1_dlyce_cpt_r <= #TCQ 1'b0; cal1_dlyinc_cpt_r <= #TCQ 1'b0; if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE; end // We've found the left edges of the windows for all DQ bits // (actually, we found it MIN_EYE_SIZE taps ago) Decrement capture // clock IDELAY to position just outside left edge of data window CAL1_PB_DEC_CPT_LEFT: if (cnt_idel_dec_cpt_r == 6'b000000) cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_LEFT_WAIT; else begin cal1_dlyce_cpt_r <= #TCQ 1'b1; cal1_dlyinc_cpt_r <= #TCQ 1'b0; cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1; end CAL1_PB_DEC_CPT_LEFT_WAIT: if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ; // If there is skew between individual DQ bits, then after we've // positioned the CPT clock, we will be "in the window" for some // DQ bits ("early" DQ bits), and "out of the window" for others // ("late" DQ bits). Increase DQ taps until we are out of the // window for all DQ bits CAL1_PB_DETECT_EDGE_DQ: if (detect_edge_done_r) if (found_edge_all_r) begin // We're out of the window for all DQ bits in this DQS group // We're done with per-bit deskew for this group - now decr // capture clock IODELAY tap count back to 0, and proceed // with the rest of stage 1 calibration for this DQS group cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r; cal1_state_r <= #TCQ CAL1_PB_DEC_CPT; end else if (!idel_tap_limit_dq_pb_r) // If we still have DQ taps available for deskew, keep // incrementing IODELAY tap count for the appropriate DQ bits cal1_state_r <= #TCQ CAL1_PB_INC_DQ; else begin // Otherwise, stop immediately (we've done the best we can) // and proceed with rest of stage 1 calibration cnt_idel_dec_cpt_r <= #TCQ tap_cnt_cpt_r; cal1_state_r <= #TCQ CAL1_PB_DEC_CPT; end CAL1_PB_INC_DQ: begin // Increment only those DQ for which an edge hasn't been found yet cal1_dlyce_dq_r <= #TCQ ~pb_found_edge_last_r; cal1_dlyinc_dq_r <= #TCQ 1'b1; cal1_state_r <= #TCQ CAL1_PB_INC_DQ_WAIT; end CAL1_PB_INC_DQ_WAIT: if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_PB_DETECT_EDGE_DQ; // Decrement capture clock taps back to initial value CAL1_PB_DEC_CPT: if (cnt_idel_dec_cpt_r == 6'b000000) cal1_state_r <= #TCQ CAL1_PB_DEC_CPT_WAIT; else begin cal1_dlyce_cpt_r <= #TCQ 1'b1; cal1_dlyinc_cpt_r <= #TCQ 1'b0; cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1; end // Wait for capture clock to settle, then proceed to rest of // state 1 calibration for this DQS group CAL1_PB_DEC_CPT_WAIT: if (!cal1_wait_r) begin store_sr_req_r <= #TCQ 1'b1; cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT; end // When first starting calibration for a DQS group, save the // current value of the read data shift register, and use this // as a reference. Note that for the first iteration of the // edge detection loop, we will in effect be checking for an edge // at IODELAY taps = 0 - normally, we are comparing the read data // for IODELAY taps = N, with the read data for IODELAY taps = N-1 // An edge can only be found at IODELAY taps = 0 if the read data // is changing during this time (possible due to jitter) CAL1_STORE_FIRST_WAIT: begin mpr_dec_cpt_r <= #TCQ 1'b0; if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_PAT_DETECT; end CAL1_VALID_WAIT: begin if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT; end CAL1_MPR_PAT_DETECT: begin // MPR read leveling for centering DQS in valid window before // OCLKDELAYED calibration begins in order to eliminate read issues if (idel_pat_detect_valid_r == 1'b0) begin cal1_state_r <= #TCQ CAL1_VALID_WAIT; idel_pat_detect_valid_r <= #TCQ 1'b1; end else if (idel_pat_detect_valid_r && idel_mpr_pat_detect_r) begin cal1_state_r <= #TCQ CAL1_DETECT_EDGE; idel_dec_cnt <= #TCQ 'd0; end else if (!idelay_tap_limit_r) cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC; else cal1_state_r <= #TCQ CAL1_RDLVL_ERR; end CAL1_PAT_DETECT: begin // All DQ bits associated with a DQS are pushed to the right one IDELAY // tap at a time until first rising DQS is in the tri-state region // before first rising edge window. // The detect_edge_done_r condition included to support averaging // during IDELAY tap increments if (detect_edge_done_r) begin if (idel_pat_data_match) begin cal1_state_r <= #TCQ CAL1_DETECT_EDGE; idel_dec_cnt <= #TCQ 'd0; end else if (!idelay_tap_limit_r) begin cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC; end else begin cal1_state_r <= #TCQ CAL1_RDLVL_ERR; end end end // Increment IDELAY tap by 1 for DQ bits in the byte being calibrated // until left edge of valid window detected CAL1_DQ_IDEL_TAP_INC: begin cal1_dq_idel_ce <= #TCQ 1'b1; cal1_dq_idel_inc <= #TCQ 1'b1; cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_INC_WAIT; idel_pat_detect_valid_r <= #TCQ 1'b0; end CAL1_DQ_IDEL_TAP_INC_WAIT: begin cal1_dq_idel_ce <= #TCQ 1'b0; cal1_dq_idel_inc <= #TCQ 1'b0; if (!cal1_wait_r) begin if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3")) cal1_state_r <= #TCQ CAL1_MPR_PAT_DETECT; else cal1_state_r <= #TCQ CAL1_PAT_DETECT; end end // Decrement by 2 IDELAY taps once idel_pat_data_match detected CAL1_DQ_IDEL_TAP_DEC: begin cal1_dq_idel_inc <= #TCQ 1'b0; cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC_WAIT; if (idel_dec_cnt >= 'd0) cal1_dq_idel_ce <= #TCQ 1'b1; else cal1_dq_idel_ce <= #TCQ 1'b0; if (idel_dec_cnt > 'd0) idel_dec_cnt <= #TCQ idel_dec_cnt - 1; else idel_dec_cnt <= #TCQ idel_dec_cnt; end CAL1_DQ_IDEL_TAP_DEC_WAIT: begin cal1_dq_idel_ce <= #TCQ 1'b0; cal1_dq_idel_inc <= #TCQ 1'b0; if (!cal1_wait_r) begin if ((idel_dec_cnt > 'd0) || (pi_rdval_cnt > 'd0)) cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC; else if (mpr_dec_cpt_r) cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT; else cal1_state_r <= #TCQ CAL1_DETECT_EDGE; end end // Check for presence of data eye edge. During this state, we // sample the read data multiple times, and look for changes // in the read data, specifically: // 1. A change in the read data compared with the value of // read data from the previous delay tap. This indicates // that the most recent tap delay increment has moved us // into either a new window, or moved/kept us in the // transition/jitter region between windows. Note that this // condition only needs to be checked for once, and for // logistical purposes, we check this soon after entering // this state (see comment in CAL1_DETECT_EDGE below for // why this is done) // 2. A change in the read data while we are in this state // (i.e. in the absence of a tap delay increment). This // indicates that we're close enough to a window edge that // jitter will cause the read data to change even in the // absence of a tap delay change CAL1_DETECT_EDGE: begin // Essentially wait for the first comparision to finish, then // store current data into "old" data register. This store // happens now, rather than later (e.g. when we've have already // left this state) in order to avoid the situation the data that // is stored as "old" data has not been used in an "active // comparison" - i.e. data is stored after the last comparison // of this state. In this case, we can miss an edge if the // following sequence occurs: // 1. Comparison completes in this state - no edge found // 2. "Momentary jitter" occurs which "pushes" the data out the // equivalent of one delay tap // 3. We store this jittered data as the "old" data // 4. "Jitter" no longer present // 5. We increment the delay tap by one // 6. Now we compare the current with the "old" data - they're // the same, and no edge is detected // NOTE: Given the large # of comparisons done in this state, it's // highly unlikely the above sequence will occur in actual H/W // Wait for the first load of read data into the comparison // shift register to finish, then load the current read data // into the "old" data register. This allows us to do one // initial comparision between the current read data, and // stored data corresponding to the previous delay tap idel_pat_detect_valid_r <= #TCQ 1'b0; if (!store_sr_req_pulsed_r) begin // Pulse store_sr_req_r only once in this state store_sr_req_r <= #TCQ 1'b1; store_sr_req_pulsed_r <= #TCQ 1'b1; end else begin store_sr_req_r <= #TCQ 1'b0; store_sr_req_pulsed_r <= #TCQ 1'b1; end // Continue to sample read data and look for edges until the // appropriate time interval (shorter for simulation-only, // much, much longer for actual h/w) has elapsed if (detect_edge_done_r) begin if (tap_limit_cpt_r) // Only one edge detected and ran out of taps since only one // bit time worth of taps available for window detection. This // can happen if at tap 0 DQS is in previous window which results // in only left edge being detected. Or at tap 0 DQS is in the // current window resulting in only right edge being detected. // Depending on the frequency this case can also happen if at // tap 0 DQS is in the left noise region resulting in only left // edge being detected. cal1_state_r <= #TCQ CAL1_CALC_IDEL; else if (found_edge_r) begin // Sticky bit - asserted after we encounter an edge, although // the current edge may not be considered the "first edge" this // just means we found at least one edge found_first_edge_r <= #TCQ 1'b1; // Only the right edge of the data valid window is found // Record the inner right edge tap value if (!found_first_edge_r && found_stable_eye_last_r) begin if (tap_cnt_cpt_r == 'd0) right_edge_taps_r <= #TCQ 'd0; else right_edge_taps_r <= #TCQ tap_cnt_cpt_r; end // Both edges of data valid window found: // If we've found a second edge after a region of stability // then we must have just passed the second ("right" edge of // the window. Record this second_edge_taps = current tap-1, // because we're one past the actual second edge tap, where // the edge taps represent the extremes of the data valid // window (i.e. smallest & largest taps where data still valid if (found_first_edge_r && found_stable_eye_last_r) begin found_second_edge_r <= #TCQ 1'b1; second_edge_taps_r <= #TCQ tap_cnt_cpt_r - 1; cal1_state_r <= #TCQ CAL1_CALC_IDEL; end else begin // Otherwise, an edge was found (just not the "second" edge) // Assuming DQS is in the correct window at tap 0 of Phaser IN // fine tap. The first edge found is the right edge of the valid // window and is the beginning of the jitter region hence done! first_edge_taps_r <= #TCQ tap_cnt_cpt_r; cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT; end end else // Otherwise, if we haven't found an edge.... // If we still have taps left to use, then keep incrementing cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT; end end // Increment Phaser_IN delay for DQS CAL1_IDEL_INC_CPT: begin cal1_state_r <= #TCQ CAL1_IDEL_INC_CPT_WAIT; if (~tap_limit_cpt_r) begin cal1_dlyce_cpt_r <= #TCQ 1'b1; cal1_dlyinc_cpt_r <= #TCQ 1'b1; end else begin cal1_dlyce_cpt_r <= #TCQ 1'b0; cal1_dlyinc_cpt_r <= #TCQ 1'b0; end end // Wait for Phaser_In to settle, before checking again for an edge CAL1_IDEL_INC_CPT_WAIT: begin cal1_dlyce_cpt_r <= #TCQ 1'b0; cal1_dlyinc_cpt_r <= #TCQ 1'b0; if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_DETECT_EDGE; end // Calculate final value of Phaser_IN taps. At this point, one or both // edges of data eye have been found, and/or all taps have been // exhausted looking for the edges // NOTE: We're calculating the amount to decrement by, not the // absolute setting for DQS. CAL1_CALC_IDEL: begin // CASE1: If 2 edges found. if (found_second_edge_r) cnt_idel_dec_cpt_r <= #TCQ ((second_edge_taps_r - first_edge_taps_r)>>1) + 1; else if (right_edge_taps_r > 6'd0) // Only right edge detected // right_edge_taps_r is the inner right edge tap value // hence used for calculation cnt_idel_dec_cpt_r <= #TCQ (tap_cnt_cpt_r - (right_edge_taps_r>>1)); else if (found_first_edge_r) // Only left edge detected cnt_idel_dec_cpt_r <= #TCQ ((tap_cnt_cpt_r - first_edge_taps_r)>>1); else cnt_idel_dec_cpt_r <= #TCQ (tap_cnt_cpt_r>>1); // Now use the value we just calculated to decrement CPT taps // to the desired calibration point cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT; end // decrement capture clock for final adjustment - center // capture clock in middle of data eye. This adjustment will occur // only when both the edges are found usign CPT taps. Must do this // incrementally to avoid clock glitching (since CPT drives clock // divider within each ISERDES) CAL1_IDEL_DEC_CPT: begin cal1_dlyce_cpt_r <= #TCQ 1'b1; cal1_dlyinc_cpt_r <= #TCQ 1'b0; // once adjustment is complete, we're done with calibration for // this DQS, repeat for next DQS cnt_idel_dec_cpt_r <= #TCQ cnt_idel_dec_cpt_r - 1; if (cnt_idel_dec_cpt_r == 6'b000001) begin if (mpr_dec_cpt_r) begin if (|idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]) begin idel_dec_cnt <= #TCQ idelay_tap_cnt_r[rnk_cnt_r][cal1_cnt_cpt_timing]; cal1_state_r <= #TCQ CAL1_DQ_IDEL_TAP_DEC; end else cal1_state_r <= #TCQ CAL1_STORE_FIRST_WAIT; end else cal1_state_r <= #TCQ CAL1_NEXT_DQS; end else cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT_WAIT; end CAL1_IDEL_DEC_CPT_WAIT: begin cal1_dlyce_cpt_r <= #TCQ 1'b0; cal1_dlyinc_cpt_r <= #TCQ 1'b0; if (!cal1_wait_r) cal1_state_r <= #TCQ CAL1_IDEL_DEC_CPT; end // Determine whether we're done, or have more DQS's to calibrate // Also request precharge after every byte, as appropriate CAL1_NEXT_DQS: begin //if (mpr_rdlvl_done_r || (DRAM_TYPE == "DDR2")) cal1_prech_req_r <= #TCQ 1'b1; //else // cal1_prech_req_r <= #TCQ 1'b0; cal1_dlyce_cpt_r <= #TCQ 1'b0; cal1_dlyinc_cpt_r <= #TCQ 1'b0; // Prepare for another iteration with next DQS group found_first_edge_r <= #TCQ 1'b0; found_second_edge_r <= #TCQ 1'b0; first_edge_taps_r <= #TCQ 'd0; second_edge_taps_r <= #TCQ 'd0; if ((SIM_CAL_OPTION == "FAST_CAL") || (cal1_cnt_cpt_r >= DQS_WIDTH-1)) begin if (mpr_rdlvl_done_r) begin rdlvl_last_byte_done <= #TCQ 1'b1; mpr_last_byte_done <= #TCQ 1'b0; end else begin rdlvl_last_byte_done <= #TCQ 1'b0; mpr_last_byte_done <= #TCQ 1'b1; end end // Wait until precharge that occurs in between calibration of // DQS groups is finished if (prech_done) begin // || (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3"))) begin if (SIM_CAL_OPTION == "FAST_CAL") begin //rdlvl_rank_done_r <= #TCQ 1'b1; rdlvl_last_byte_done <= #TCQ 1'b0; mpr_last_byte_done <= #TCQ 1'b0; cal1_state_r <= #TCQ CAL1_DONE; //CAL1_REGL_LOAD; end else if (cal1_cnt_cpt_r >= DQS_WIDTH-1) begin if (~mpr_rdlvl_done_r) begin mpr_rank_done_r <= #TCQ 1'b1; // if (rnk_cnt_r == RANKS-1) begin // All DQS groups in all ranks done cal1_state_r <= #TCQ CAL1_DONE; cal1_cnt_cpt_r <= #TCQ 'b0; // end else begin // // Process DQS groups in next rank // rnk_cnt_r <= #TCQ rnk_cnt_r + 1; // new_cnt_cpt_r <= #TCQ 1'b1; // cal1_cnt_cpt_r <= #TCQ 'b0; // cal1_state_r <= #TCQ CAL1_IDLE; // end end else begin // All DQS groups in a rank done rdlvl_rank_done_r <= #TCQ 1'b1; if (rnk_cnt_r == RANKS-1) begin // All DQS groups in all ranks done cal1_state_r <= #TCQ CAL1_REGL_LOAD; end else begin // Process DQS groups in next rank rnk_cnt_r <= #TCQ rnk_cnt_r + 1; new_cnt_cpt_r <= #TCQ 1'b1; cal1_cnt_cpt_r <= #TCQ 'b0; cal1_state_r <= #TCQ CAL1_IDLE; end end end else begin // Process next DQS group new_cnt_cpt_r <= #TCQ 1'b1; cal1_cnt_cpt_r <= #TCQ cal1_cnt_cpt_r + 1; cal1_state_r <= #TCQ CAL1_NEW_DQS_PREWAIT; end end end CAL1_NEW_DQS_PREWAIT: begin if (!cal1_wait_r) begin if (~mpr_rdlvl_done_r & (DRAM_TYPE == "DDR3")) cal1_state_r <= #TCQ CAL1_MPR_NEW_DQS_WAIT; else cal1_state_r <= #TCQ CAL1_NEW_DQS_WAIT; end end // Load rank registers in Phaser_IN CAL1_REGL_LOAD: begin rdlvl_rank_done_r <= #TCQ 1'b0; mpr_rank_done_r <= #TCQ 1'b0; cal1_prech_req_r <= #TCQ 1'b0; cal1_cnt_cpt_r <= #TCQ 'b0; rnk_cnt_r <= #TCQ 2'b00; if ((regl_rank_cnt == RANKS-1) && ((regl_dqs_cnt == DQS_WIDTH-1) && (done_cnt == 4'd1))) begin cal1_state_r <= #TCQ CAL1_DONE; rdlvl_last_byte_done <= #TCQ 1'b0; mpr_last_byte_done <= #TCQ 1'b0; end else cal1_state_r <= #TCQ CAL1_REGL_LOAD; end CAL1_RDLVL_ERR: begin rdlvl_stg1_err <= #TCQ 1'b1; end // Done with this stage of calibration // if used, allow DEBUG_PORT to control taps CAL1_DONE: begin mpr_rdlvl_done_r <= #TCQ 1'b1; cal1_prech_req_r <= #TCQ 1'b0; if (~mpr_rdlvl_done_r && (OCAL_EN=="ON") && (DRAM_TYPE == "DDR3")) begin rdlvl_stg1_done <= #TCQ 1'b0; cal1_state_r <= #TCQ CAL1_IDLE; end else rdlvl_stg1_done <= #TCQ 1'b1; end endcase end endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : arb_mux.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_arb_mux # ( parameter TCQ = 100, parameter EVEN_CWL_2T_MODE = "OFF", parameter ADDR_CMD_MODE = "1T", parameter BANK_VECT_INDX = 11, parameter BANK_WIDTH = 3, parameter BURST_MODE = "8", parameter CS_WIDTH = 4, parameter CL = 5, parameter CWL = 5, parameter DATA_BUF_ADDR_VECT_INDX = 31, parameter DATA_BUF_ADDR_WIDTH = 8, parameter DRAM_TYPE = "DDR3", parameter CKE_ODT_AUX = "FALSE", //Parameter to turn on/off the aux_out signal parameter EARLY_WR_DATA_ADDR = "OFF", parameter ECC = "OFF", parameter nBANK_MACHS = 4, parameter nCK_PER_CLK = 2, // # DRAM CKs per fabric CLKs parameter nCS_PER_RANK = 1, parameter nRAS = 37500, // ACT->PRE cmd period (CKs) parameter nRCD = 12500, // ACT->R/W delay (CKs) parameter nSLOTS = 2, parameter nWR = 6, // Write recovery (CKs) parameter RANKS = 1, parameter RANK_VECT_INDX = 15, parameter RANK_WIDTH = 2, parameter ROW_VECT_INDX = 63, parameter ROW_WIDTH = 16, parameter RTT_NOM = "40", parameter RTT_WR = "120", parameter SLOT_0_CONFIG = 8'b0000_0101, parameter SLOT_1_CONFIG = 8'b0000_1010 ) (/*AUTOARG*/ // Outputs output [ROW_WIDTH-1:0] col_a, // From arb_select0 of arb_select.v output [BANK_WIDTH-1:0] col_ba, // From arb_select0 of arb_select.v output [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr,// From arb_select0 of arb_select.v output col_periodic_rd, // From arb_select0 of arb_select.v output [RANK_WIDTH-1:0] col_ra, // From arb_select0 of arb_select.v output col_rmw, // From arb_select0 of arb_select.v output col_rd_wr, output [ROW_WIDTH-1:0] col_row, // From arb_select0 of arb_select.v output col_size, // From arb_select0 of arb_select.v output [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr,// From arb_select0 of arb_select.v output wire [nCK_PER_CLK-1:0] mc_ras_n, output wire [nCK_PER_CLK-1:0] mc_cas_n, output wire [nCK_PER_CLK-1:0] mc_we_n, output wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address, output wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank, output wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n, output wire [1:0] mc_odt, output wire [nCK_PER_CLK-1:0] mc_cke, output wire [3:0] mc_aux_out0, output wire [3:0] mc_aux_out1, output [2:0] mc_cmd, output [5:0] mc_data_offset, output [5:0] mc_data_offset_1, output [5:0] mc_data_offset_2, output [1:0] mc_cas_slot, output [RANK_WIDTH-1:0] rnk_config, // From arb_select0 of arb_select.v output rnk_config_valid_r, // From arb_row_col0 of arb_row_col.v output [nBANK_MACHS-1:0] sending_row, // From arb_row_col0 of arb_row_col.v output [nBANK_MACHS-1:0] sending_pre, output sent_col, // From arb_row_col0 of arb_row_col.v output sent_col_r, // From arb_row_col0 of arb_row_col.v output sent_row, // From arb_row_col0 of arb_row_col.v output [nBANK_MACHS-1:0] sending_col, output rnk_config_strobe, output insert_maint_r1, output rnk_config_kill_rts_col, // Inputs input clk, input rst, input init_calib_complete, input [6*RANKS-1:0] calib_rddata_offset, input [6*RANKS-1:0] calib_rddata_offset_1, input [6*RANKS-1:0] calib_rddata_offset_2, input [ROW_VECT_INDX:0] col_addr, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] col_rdy_wr, // To arb_row_col0 of arb_row_col.v input insert_maint_r, // To arb_row_col0 of arb_row_col.v input [RANK_WIDTH-1:0] maint_rank_r, // To arb_select0 of arb_select.v input maint_zq_r, // To arb_select0 of arb_select.v input maint_sre_r, // To arb_select0 of arb_select.v input maint_srx_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] rd_wr_r, // To arb_select0 of arb_select.v input [BANK_VECT_INDX:0] req_bank_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_cas, // To arb_select0 of arb_select.v input [DATA_BUF_ADDR_VECT_INDX:0] req_data_buf_addr_r,// To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_periodic_rd_r, // To arb_select0 of arb_select.v input [RANK_VECT_INDX:0] req_rank_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_ras, // To arb_select0 of arb_select.v input [ROW_VECT_INDX:0] req_row_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_size_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_wr_r, // To arb_select0 of arb_select.v input [ROW_VECT_INDX:0] row_addr, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] row_cmd_wr, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] rtc, // To arb_row_col0 of arb_row_col.v input [nBANK_MACHS-1:0] rts_col, // To arb_row_col0 of arb_row_col.v input [nBANK_MACHS-1:0] rts_row, // To arb_row_col0 of arb_row_col.v input [nBANK_MACHS-1:0] rts_pre, // To arb_row_col0 of arb_row_col.v input [7:0] slot_0_present, // To arb_select0 of arb_select.v input [7:0] slot_1_present // To arb_select0 of arb_select.v ); /*AUTOINPUT*/ // Beginning of automatic inputs (from unused autoinst inputs) // End of automatics /*AUTOOUTPUT*/ // Beginning of automatic outputs (from unused autoinst outputs) // End of automatics /*AUTOWIRE*/ // Beginning of automatic wires (for undeclared instantiated-module outputs) wire cs_en0; // From arb_row_col0 of arb_row_col.v wire cs_en1; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_col_r; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_col_wr; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_config_r; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_row_r; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_pre_r; // From arb_row_col0 of arb_row_col.v wire send_cmd0_row; // From arb_row_col0 of arb_row_col.v wire send_cmd0_col; // From arb_row_col0 of arb_row_col.v wire send_cmd1_row; // From arb_row_col0 of arb_row_col.v wire send_cmd1_col; wire send_cmd2_row; wire send_cmd2_col; wire send_cmd2_pre; wire send_cmd3_col; wire [5:0] col_channel_offset; // End of automatics wire sent_col_i; assign sent_col = sent_col_i; mig_7series_v1_9_arb_row_col # (/*AUTOINSTPARAM*/ // Parameters .TCQ (TCQ), .ADDR_CMD_MODE (ADDR_CMD_MODE), .CWL (CWL), .EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR), .nBANK_MACHS (nBANK_MACHS), .nCK_PER_CLK (nCK_PER_CLK), .nRAS (nRAS), .nRCD (nRCD), .nWR (nWR)) arb_row_col0 (/*AUTOINST*/ // Outputs .grant_row_r (grant_row_r[nBANK_MACHS-1:0]), .grant_pre_r (grant_pre_r[nBANK_MACHS-1:0]), .sent_row (sent_row), .sending_row (sending_row[nBANK_MACHS-1:0]), .sending_pre (sending_pre[nBANK_MACHS-1:0]), .grant_config_r (grant_config_r[nBANK_MACHS-1:0]), .rnk_config_strobe (rnk_config_strobe), .rnk_config_kill_rts_col (rnk_config_kill_rts_col), .rnk_config_valid_r (rnk_config_valid_r), .grant_col_r (grant_col_r[nBANK_MACHS-1:0]), .sending_col (sending_col[nBANK_MACHS-1:0]), .sent_col (sent_col_i), .sent_col_r (sent_col_r), .grant_col_wr (grant_col_wr[nBANK_MACHS-1:0]), .send_cmd0_row (send_cmd0_row), .send_cmd0_col (send_cmd0_col), .send_cmd1_row (send_cmd1_row), .send_cmd1_col (send_cmd1_col), .send_cmd2_row (send_cmd2_row), .send_cmd2_col (send_cmd2_col), .send_cmd2_pre (send_cmd2_pre), .send_cmd3_col (send_cmd3_col), .col_channel_offset (col_channel_offset), .cs_en0 (cs_en0), .cs_en1 (cs_en1), .cs_en2 (cs_en2), .cs_en3 (cs_en3), .insert_maint_r1 (insert_maint_r1), // Inputs .clk (clk), .rst (rst), .rts_row (rts_row[nBANK_MACHS-1:0]), .rts_pre (rts_pre[nBANK_MACHS-1:0]), .insert_maint_r (insert_maint_r), .rts_col (rts_col[nBANK_MACHS-1:0]), .rtc (rtc[nBANK_MACHS-1:0]), .col_rdy_wr (col_rdy_wr[nBANK_MACHS-1:0])); mig_7series_v1_9_arb_select # (/*AUTOINSTPARAM*/ // Parameters .TCQ (TCQ), .EVEN_CWL_2T_MODE (EVEN_CWL_2T_MODE), .ADDR_CMD_MODE (ADDR_CMD_MODE), .BANK_VECT_INDX (BANK_VECT_INDX), .BANK_WIDTH (BANK_WIDTH), .BURST_MODE (BURST_MODE), .CS_WIDTH (CS_WIDTH), .CL (CL), .CWL (CWL), .DATA_BUF_ADDR_VECT_INDX (DATA_BUF_ADDR_VECT_INDX), .DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH), .DRAM_TYPE (DRAM_TYPE), .EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR), .ECC (ECC), .CKE_ODT_AUX (CKE_ODT_AUX), .nBANK_MACHS (nBANK_MACHS), .nCK_PER_CLK (nCK_PER_CLK), .nCS_PER_RANK (nCS_PER_RANK), .nSLOTS (nSLOTS), .RANKS (RANKS), .RANK_VECT_INDX (RANK_VECT_INDX), .RANK_WIDTH (RANK_WIDTH), .ROW_VECT_INDX (ROW_VECT_INDX), .ROW_WIDTH (ROW_WIDTH), .RTT_NOM (RTT_NOM), .RTT_WR (RTT_WR), .SLOT_0_CONFIG (SLOT_0_CONFIG), .SLOT_1_CONFIG (SLOT_1_CONFIG)) arb_select0 (/*AUTOINST*/ // Outputs .col_periodic_rd (col_periodic_rd), .col_ra (col_ra[RANK_WIDTH-1:0]), .col_ba (col_ba[BANK_WIDTH-1:0]), .col_a (col_a[ROW_WIDTH-1:0]), .col_rmw (col_rmw), .col_rd_wr (col_rd_wr), .col_size (col_size), .col_row (col_row[ROW_WIDTH-1:0]), .col_data_buf_addr (col_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .col_wr_data_buf_addr (col_wr_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .mc_bank (mc_bank), .mc_address (mc_address), .mc_ras_n (mc_ras_n), .mc_cas_n (mc_cas_n), .mc_we_n (mc_we_n), .mc_cs_n (mc_cs_n), .mc_odt (mc_odt), .mc_cke (mc_cke), .mc_aux_out0 (mc_aux_out0), .mc_aux_out1 (mc_aux_out1), .mc_cmd (mc_cmd), .mc_data_offset (mc_data_offset), .mc_data_offset_1 (mc_data_offset_1), .mc_data_offset_2 (mc_data_offset_2), .mc_cas_slot (mc_cas_slot), .col_channel_offset (col_channel_offset), .rnk_config (rnk_config), // Inputs .clk (clk), .rst (rst), .init_calib_complete (init_calib_complete), .calib_rddata_offset (calib_rddata_offset), .calib_rddata_offset_1 (calib_rddata_offset_1), .calib_rddata_offset_2 (calib_rddata_offset_2), .req_rank_r (req_rank_r[RANK_VECT_INDX:0]), .req_bank_r (req_bank_r[BANK_VECT_INDX:0]), .req_ras (req_ras[nBANK_MACHS-1:0]), .req_cas (req_cas[nBANK_MACHS-1:0]), .req_wr_r (req_wr_r[nBANK_MACHS-1:0]), .grant_row_r (grant_row_r[nBANK_MACHS-1:0]), .grant_pre_r (grant_pre_r[nBANK_MACHS-1:0]), .row_addr (row_addr[ROW_VECT_INDX:0]), .row_cmd_wr (row_cmd_wr[nBANK_MACHS-1:0]), .insert_maint_r1 (insert_maint_r1), .maint_zq_r (maint_zq_r), .maint_sre_r (maint_sre_r), .maint_srx_r (maint_srx_r), .maint_rank_r (maint_rank_r[RANK_WIDTH-1:0]), .req_periodic_rd_r (req_periodic_rd_r[nBANK_MACHS-1:0]), .req_size_r (req_size_r[nBANK_MACHS-1:0]), .rd_wr_r (rd_wr_r[nBANK_MACHS-1:0]), .req_row_r (req_row_r[ROW_VECT_INDX:0]), .col_addr (col_addr[ROW_VECT_INDX:0]), .req_data_buf_addr_r (req_data_buf_addr_r[DATA_BUF_ADDR_VECT_INDX:0]), .grant_col_r (grant_col_r[nBANK_MACHS-1:0]), .grant_col_wr (grant_col_wr[nBANK_MACHS-1:0]), .send_cmd0_row (send_cmd0_row), .send_cmd0_col (send_cmd0_col), .send_cmd1_row (send_cmd1_row), .send_cmd1_col (send_cmd1_col), .send_cmd2_row (send_cmd2_row), .send_cmd2_col (send_cmd2_col), .send_cmd2_pre (send_cmd2_pre), .send_cmd3_col (send_cmd3_col), .sent_col (EVEN_CWL_2T_MODE == "ON" ? sent_col_r : sent_col), .cs_en0 (cs_en0), .cs_en1 (cs_en1), .cs_en2 (cs_en2), .cs_en3 (cs_en3), .grant_config_r (grant_config_r[nBANK_MACHS-1:0]), .rnk_config_strobe (rnk_config_strobe), .slot_0_present (slot_0_present[7:0]), .slot_1_present (slot_1_present[7:0])); endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : arb_mux.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_arb_mux # ( parameter TCQ = 100, parameter EVEN_CWL_2T_MODE = "OFF", parameter ADDR_CMD_MODE = "1T", parameter BANK_VECT_INDX = 11, parameter BANK_WIDTH = 3, parameter BURST_MODE = "8", parameter CS_WIDTH = 4, parameter CL = 5, parameter CWL = 5, parameter DATA_BUF_ADDR_VECT_INDX = 31, parameter DATA_BUF_ADDR_WIDTH = 8, parameter DRAM_TYPE = "DDR3", parameter CKE_ODT_AUX = "FALSE", //Parameter to turn on/off the aux_out signal parameter EARLY_WR_DATA_ADDR = "OFF", parameter ECC = "OFF", parameter nBANK_MACHS = 4, parameter nCK_PER_CLK = 2, // # DRAM CKs per fabric CLKs parameter nCS_PER_RANK = 1, parameter nRAS = 37500, // ACT->PRE cmd period (CKs) parameter nRCD = 12500, // ACT->R/W delay (CKs) parameter nSLOTS = 2, parameter nWR = 6, // Write recovery (CKs) parameter RANKS = 1, parameter RANK_VECT_INDX = 15, parameter RANK_WIDTH = 2, parameter ROW_VECT_INDX = 63, parameter ROW_WIDTH = 16, parameter RTT_NOM = "40", parameter RTT_WR = "120", parameter SLOT_0_CONFIG = 8'b0000_0101, parameter SLOT_1_CONFIG = 8'b0000_1010 ) (/*AUTOARG*/ // Outputs output [ROW_WIDTH-1:0] col_a, // From arb_select0 of arb_select.v output [BANK_WIDTH-1:0] col_ba, // From arb_select0 of arb_select.v output [DATA_BUF_ADDR_WIDTH-1:0] col_data_buf_addr,// From arb_select0 of arb_select.v output col_periodic_rd, // From arb_select0 of arb_select.v output [RANK_WIDTH-1:0] col_ra, // From arb_select0 of arb_select.v output col_rmw, // From arb_select0 of arb_select.v output col_rd_wr, output [ROW_WIDTH-1:0] col_row, // From arb_select0 of arb_select.v output col_size, // From arb_select0 of arb_select.v output [DATA_BUF_ADDR_WIDTH-1:0] col_wr_data_buf_addr,// From arb_select0 of arb_select.v output wire [nCK_PER_CLK-1:0] mc_ras_n, output wire [nCK_PER_CLK-1:0] mc_cas_n, output wire [nCK_PER_CLK-1:0] mc_we_n, output wire [nCK_PER_CLK*ROW_WIDTH-1:0] mc_address, output wire [nCK_PER_CLK*BANK_WIDTH-1:0] mc_bank, output wire [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mc_cs_n, output wire [1:0] mc_odt, output wire [nCK_PER_CLK-1:0] mc_cke, output wire [3:0] mc_aux_out0, output wire [3:0] mc_aux_out1, output [2:0] mc_cmd, output [5:0] mc_data_offset, output [5:0] mc_data_offset_1, output [5:0] mc_data_offset_2, output [1:0] mc_cas_slot, output [RANK_WIDTH-1:0] rnk_config, // From arb_select0 of arb_select.v output rnk_config_valid_r, // From arb_row_col0 of arb_row_col.v output [nBANK_MACHS-1:0] sending_row, // From arb_row_col0 of arb_row_col.v output [nBANK_MACHS-1:0] sending_pre, output sent_col, // From arb_row_col0 of arb_row_col.v output sent_col_r, // From arb_row_col0 of arb_row_col.v output sent_row, // From arb_row_col0 of arb_row_col.v output [nBANK_MACHS-1:0] sending_col, output rnk_config_strobe, output insert_maint_r1, output rnk_config_kill_rts_col, // Inputs input clk, input rst, input init_calib_complete, input [6*RANKS-1:0] calib_rddata_offset, input [6*RANKS-1:0] calib_rddata_offset_1, input [6*RANKS-1:0] calib_rddata_offset_2, input [ROW_VECT_INDX:0] col_addr, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] col_rdy_wr, // To arb_row_col0 of arb_row_col.v input insert_maint_r, // To arb_row_col0 of arb_row_col.v input [RANK_WIDTH-1:0] maint_rank_r, // To arb_select0 of arb_select.v input maint_zq_r, // To arb_select0 of arb_select.v input maint_sre_r, // To arb_select0 of arb_select.v input maint_srx_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] rd_wr_r, // To arb_select0 of arb_select.v input [BANK_VECT_INDX:0] req_bank_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_cas, // To arb_select0 of arb_select.v input [DATA_BUF_ADDR_VECT_INDX:0] req_data_buf_addr_r,// To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_periodic_rd_r, // To arb_select0 of arb_select.v input [RANK_VECT_INDX:0] req_rank_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_ras, // To arb_select0 of arb_select.v input [ROW_VECT_INDX:0] req_row_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_size_r, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] req_wr_r, // To arb_select0 of arb_select.v input [ROW_VECT_INDX:0] row_addr, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] row_cmd_wr, // To arb_select0 of arb_select.v input [nBANK_MACHS-1:0] rtc, // To arb_row_col0 of arb_row_col.v input [nBANK_MACHS-1:0] rts_col, // To arb_row_col0 of arb_row_col.v input [nBANK_MACHS-1:0] rts_row, // To arb_row_col0 of arb_row_col.v input [nBANK_MACHS-1:0] rts_pre, // To arb_row_col0 of arb_row_col.v input [7:0] slot_0_present, // To arb_select0 of arb_select.v input [7:0] slot_1_present // To arb_select0 of arb_select.v ); /*AUTOINPUT*/ // Beginning of automatic inputs (from unused autoinst inputs) // End of automatics /*AUTOOUTPUT*/ // Beginning of automatic outputs (from unused autoinst outputs) // End of automatics /*AUTOWIRE*/ // Beginning of automatic wires (for undeclared instantiated-module outputs) wire cs_en0; // From arb_row_col0 of arb_row_col.v wire cs_en1; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_col_r; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_col_wr; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_config_r; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_row_r; // From arb_row_col0 of arb_row_col.v wire [nBANK_MACHS-1:0] grant_pre_r; // From arb_row_col0 of arb_row_col.v wire send_cmd0_row; // From arb_row_col0 of arb_row_col.v wire send_cmd0_col; // From arb_row_col0 of arb_row_col.v wire send_cmd1_row; // From arb_row_col0 of arb_row_col.v wire send_cmd1_col; wire send_cmd2_row; wire send_cmd2_col; wire send_cmd2_pre; wire send_cmd3_col; wire [5:0] col_channel_offset; // End of automatics wire sent_col_i; assign sent_col = sent_col_i; mig_7series_v1_9_arb_row_col # (/*AUTOINSTPARAM*/ // Parameters .TCQ (TCQ), .ADDR_CMD_MODE (ADDR_CMD_MODE), .CWL (CWL), .EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR), .nBANK_MACHS (nBANK_MACHS), .nCK_PER_CLK (nCK_PER_CLK), .nRAS (nRAS), .nRCD (nRCD), .nWR (nWR)) arb_row_col0 (/*AUTOINST*/ // Outputs .grant_row_r (grant_row_r[nBANK_MACHS-1:0]), .grant_pre_r (grant_pre_r[nBANK_MACHS-1:0]), .sent_row (sent_row), .sending_row (sending_row[nBANK_MACHS-1:0]), .sending_pre (sending_pre[nBANK_MACHS-1:0]), .grant_config_r (grant_config_r[nBANK_MACHS-1:0]), .rnk_config_strobe (rnk_config_strobe), .rnk_config_kill_rts_col (rnk_config_kill_rts_col), .rnk_config_valid_r (rnk_config_valid_r), .grant_col_r (grant_col_r[nBANK_MACHS-1:0]), .sending_col (sending_col[nBANK_MACHS-1:0]), .sent_col (sent_col_i), .sent_col_r (sent_col_r), .grant_col_wr (grant_col_wr[nBANK_MACHS-1:0]), .send_cmd0_row (send_cmd0_row), .send_cmd0_col (send_cmd0_col), .send_cmd1_row (send_cmd1_row), .send_cmd1_col (send_cmd1_col), .send_cmd2_row (send_cmd2_row), .send_cmd2_col (send_cmd2_col), .send_cmd2_pre (send_cmd2_pre), .send_cmd3_col (send_cmd3_col), .col_channel_offset (col_channel_offset), .cs_en0 (cs_en0), .cs_en1 (cs_en1), .cs_en2 (cs_en2), .cs_en3 (cs_en3), .insert_maint_r1 (insert_maint_r1), // Inputs .clk (clk), .rst (rst), .rts_row (rts_row[nBANK_MACHS-1:0]), .rts_pre (rts_pre[nBANK_MACHS-1:0]), .insert_maint_r (insert_maint_r), .rts_col (rts_col[nBANK_MACHS-1:0]), .rtc (rtc[nBANK_MACHS-1:0]), .col_rdy_wr (col_rdy_wr[nBANK_MACHS-1:0])); mig_7series_v1_9_arb_select # (/*AUTOINSTPARAM*/ // Parameters .TCQ (TCQ), .EVEN_CWL_2T_MODE (EVEN_CWL_2T_MODE), .ADDR_CMD_MODE (ADDR_CMD_MODE), .BANK_VECT_INDX (BANK_VECT_INDX), .BANK_WIDTH (BANK_WIDTH), .BURST_MODE (BURST_MODE), .CS_WIDTH (CS_WIDTH), .CL (CL), .CWL (CWL), .DATA_BUF_ADDR_VECT_INDX (DATA_BUF_ADDR_VECT_INDX), .DATA_BUF_ADDR_WIDTH (DATA_BUF_ADDR_WIDTH), .DRAM_TYPE (DRAM_TYPE), .EARLY_WR_DATA_ADDR (EARLY_WR_DATA_ADDR), .ECC (ECC), .CKE_ODT_AUX (CKE_ODT_AUX), .nBANK_MACHS (nBANK_MACHS), .nCK_PER_CLK (nCK_PER_CLK), .nCS_PER_RANK (nCS_PER_RANK), .nSLOTS (nSLOTS), .RANKS (RANKS), .RANK_VECT_INDX (RANK_VECT_INDX), .RANK_WIDTH (RANK_WIDTH), .ROW_VECT_INDX (ROW_VECT_INDX), .ROW_WIDTH (ROW_WIDTH), .RTT_NOM (RTT_NOM), .RTT_WR (RTT_WR), .SLOT_0_CONFIG (SLOT_0_CONFIG), .SLOT_1_CONFIG (SLOT_1_CONFIG)) arb_select0 (/*AUTOINST*/ // Outputs .col_periodic_rd (col_periodic_rd), .col_ra (col_ra[RANK_WIDTH-1:0]), .col_ba (col_ba[BANK_WIDTH-1:0]), .col_a (col_a[ROW_WIDTH-1:0]), .col_rmw (col_rmw), .col_rd_wr (col_rd_wr), .col_size (col_size), .col_row (col_row[ROW_WIDTH-1:0]), .col_data_buf_addr (col_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .col_wr_data_buf_addr (col_wr_data_buf_addr[DATA_BUF_ADDR_WIDTH-1:0]), .mc_bank (mc_bank), .mc_address (mc_address), .mc_ras_n (mc_ras_n), .mc_cas_n (mc_cas_n), .mc_we_n (mc_we_n), .mc_cs_n (mc_cs_n), .mc_odt (mc_odt), .mc_cke (mc_cke), .mc_aux_out0 (mc_aux_out0), .mc_aux_out1 (mc_aux_out1), .mc_cmd (mc_cmd), .mc_data_offset (mc_data_offset), .mc_data_offset_1 (mc_data_offset_1), .mc_data_offset_2 (mc_data_offset_2), .mc_cas_slot (mc_cas_slot), .col_channel_offset (col_channel_offset), .rnk_config (rnk_config), // Inputs .clk (clk), .rst (rst), .init_calib_complete (init_calib_complete), .calib_rddata_offset (calib_rddata_offset), .calib_rddata_offset_1 (calib_rddata_offset_1), .calib_rddata_offset_2 (calib_rddata_offset_2), .req_rank_r (req_rank_r[RANK_VECT_INDX:0]), .req_bank_r (req_bank_r[BANK_VECT_INDX:0]), .req_ras (req_ras[nBANK_MACHS-1:0]), .req_cas (req_cas[nBANK_MACHS-1:0]), .req_wr_r (req_wr_r[nBANK_MACHS-1:0]), .grant_row_r (grant_row_r[nBANK_MACHS-1:0]), .grant_pre_r (grant_pre_r[nBANK_MACHS-1:0]), .row_addr (row_addr[ROW_VECT_INDX:0]), .row_cmd_wr (row_cmd_wr[nBANK_MACHS-1:0]), .insert_maint_r1 (insert_maint_r1), .maint_zq_r (maint_zq_r), .maint_sre_r (maint_sre_r), .maint_srx_r (maint_srx_r), .maint_rank_r (maint_rank_r[RANK_WIDTH-1:0]), .req_periodic_rd_r (req_periodic_rd_r[nBANK_MACHS-1:0]), .req_size_r (req_size_r[nBANK_MACHS-1:0]), .rd_wr_r (rd_wr_r[nBANK_MACHS-1:0]), .req_row_r (req_row_r[ROW_VECT_INDX:0]), .col_addr (col_addr[ROW_VECT_INDX:0]), .req_data_buf_addr_r (req_data_buf_addr_r[DATA_BUF_ADDR_VECT_INDX:0]), .grant_col_r (grant_col_r[nBANK_MACHS-1:0]), .grant_col_wr (grant_col_wr[nBANK_MACHS-1:0]), .send_cmd0_row (send_cmd0_row), .send_cmd0_col (send_cmd0_col), .send_cmd1_row (send_cmd1_row), .send_cmd1_col (send_cmd1_col), .send_cmd2_row (send_cmd2_row), .send_cmd2_col (send_cmd2_col), .send_cmd2_pre (send_cmd2_pre), .send_cmd3_col (send_cmd3_col), .sent_col (EVEN_CWL_2T_MODE == "ON" ? sent_col_r : sent_col), .cs_en0 (cs_en0), .cs_en1 (cs_en1), .cs_en2 (cs_en2), .cs_en3 (cs_en3), .grant_config_r (grant_config_r[nBANK_MACHS-1:0]), .rnk_config_strobe (rnk_config_strobe), .slot_0_present (slot_0_present[7:0]), .slot_1_present (slot_1_present[7:0])); endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: rx_port_reader.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Handles the RX lifecycle and issuing requests for receiving // data input. // for the RIFFA channel. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_RXPORTRD_MAIN_IDLE 6'b00_0001 `define S_RXPORTRD_MAIN_CHECK 6'b00_0010 `define S_RXPORTRD_MAIN_READ 6'b00_0100 `define S_RXPORTRD_MAIN_FLUSH 6'b00_1000 `define S_RXPORTRD_MAIN_DONE 6'b01_0000 `define S_RXPORTRD_MAIN_RESET 6'b10_0000 `define S_RXPORTRD_RX_IDLE 8'b0000_0001 `define S_RXPORTRD_RX_BUF 8'b0000_0010 `define S_RXPORTRD_RX_ADJ_0 8'b0000_0100 `define S_RXPORTRD_RX_ADJ_1 8'b0000_1000 `define S_RXPORTRD_RX_ISSUE 8'b0001_0000 `define S_RXPORTRD_RX_WAIT_0 8'b0010_0000 `define S_RXPORTRD_RX_WAIT_1 8'b0100_0000 `define S_RXPORTRD_RX_DONE 8'b1000_0000 `timescale 1ns/1ns module rx_port_reader #( parameter C_DATA_WIDTH = 9'd64, parameter C_FIFO_DEPTH = 1024, parameter C_MAX_READ_REQ = 2, // Max read: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B // Local parameters parameter C_DATA_WORD_WIDTH = clog2((C_DATA_WIDTH/32)+1), parameter C_FIFO_WORDS = (C_DATA_WIDTH/32)*C_FIFO_DEPTH ) ( input CLK, input RST, input [2:0] CONFIG_MAX_READ_REQUEST_SIZE, // Maximum read payload: 000=128B, 001=256B, 010=512B, 011=1024B, 100=2048B, 101=4096B input [31:0] TXN_DATA, // Read transaction data input TXN_LEN_VALID, // Read transaction length valid input TXN_OFF_LAST_VALID, // Read transaction offset/last valid output [31:0] TXN_DONE_LEN, // Read transaction actual transfer length output TXN_DONE, // Read transaction done output TXN_ERR, // Read transaction completed with error input TXN_DONE_ACK, // Read transaction actual transfer length read output TXN_DATA_FLUSH, // Request that all data in the packer be flushed input TXN_DATA_FLUSHED, // All data in the packer has been flushed output RX_REQ, // Issue a read request output [63:0] RX_ADDR, // Request address output [9:0] RX_LEN, // Request length input RX_REQ_ACK, // Request has been accepted input [C_DATA_WORD_WIDTH-1:0] RX_DATA_EN, // Incoming read data enable input RX_DONE, // Incoming read completed input RX_ERR, // Incoming read completed with error input SG_DONE, // Incoming scatter gather read completed input SG_ERR, // Incoming scatter gather read completed with error input [63:0] SG_ELEM_ADDR, // Scatter gather element address input [31:0] SG_ELEM_LEN, // Scatter gather element length (in words) input SG_ELEM_RDY, // Scatter gather element ready output SG_ELEM_REN, // Scatter gather element read enable output SG_RST, // Scatter gather reset output CHNL_RX, // Signal channel RX output [31:0] CHNL_RX_LEN, // Channel RX length output CHNL_RX_LAST, // Channel RX last output [30:0] CHNL_RX_OFF, // Channel RX offset input CHNL_RX_RECVD, // Channel RX received input CHNL_RX_ACK_RECVD, // Channel RX acknowledgment received input [31:0] CHNL_RX_CONSUMED // Channel words consumed in current RX ); `include "functions.vh" reg [31:0] rTxnData=0, _rTxnData=0; reg rTxnOffLastValid=0, _rTxnOffLastValid=0; reg rTxnLenValid=0, _rTxnLenValid=0; reg [C_DATA_WORD_WIDTH-1:0] rRxDataEn=0, _rRxDataEn=0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [5:0] rMainState=`S_RXPORTRD_MAIN_IDLE, _rMainState=`S_RXPORTRD_MAIN_IDLE; reg [31:0] rOffLast=0, _rOffLast=0; reg [31:0] rReadWords=0, _rReadWords=0; reg rReadWordsZero=0, _rReadWordsZero=0; reg [0:0] rStart=0, _rStart=0; reg [3:0] rFlushed=0, _rFlushed=0; reg [31:0] rDoneLen=0, _rDoneLen=0; reg rTxnDone=0, _rTxnDone=0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [7:0] rRxState=`S_RXPORTRD_RX_IDLE, _rRxState=`S_RXPORTRD_RX_IDLE; reg rSgRen=0, _rSgRen=0; reg [31:0] rWords=0, _rWords=0; reg [31:0] rBufWords=0, _rBufWords=0; reg [31:0] rBufWordsInit=0, _rBufWordsInit=0; reg rLargeBuf=0, _rLargeBuf=0; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [3:0] rValsProp=0, _rValsProp=0; reg [2:0] rCarry=0, _rCarry=0; reg rCopyBufWords=0, _rCopyBufWords=0; reg rUseInit=0, _rUseInit=0; reg [10:0] rPageRem=0, _rPageRem=0; reg rPageSpill=0, _rPageSpill=0; reg rPageSpillInit=0, _rPageSpillInit=0; reg [10:0] rPreLen=0, _rPreLen=0; reg [2:0] rMaxPayloadTrain=0, _rMaxPayloadTrain=0; reg [2:0] rMaxPayloadShift=0, _rMaxPayloadShift=0; reg [9:0] rMaxPayload=0, _rMaxPayload=0; reg rPayloadSpill=0, _rPayloadSpill=0; reg rMaxLen=0, _rMaxLen=0; reg [9:0] rLen=0, _rLen=0; reg rLenEQWordsHi=0, _rLenEQWordsHi=0; reg rLenEQWordsLo=0, _rLenEQWordsLo=0; reg rLenEQBufWordsHi=0, _rLenEQBufWordsHi=0; reg rLenEQBufWordsLo=0, _rLenEQBufWordsLo=0; reg [31:0] rRecvdWords=0, _rRecvdWords=0; reg [31:0] rReqdWords=0, _rReqdWords=0; reg [31:0] rRequestingWords=0, _rRequestingWords=0; reg [31:0] rAvailWords=0, _rAvailWords=0; reg [31:0] rPartWords=0, _rPartWords=0; reg [10:0] rAckCount=0, _rAckCount=0; reg rAckCountEQ0=0, _rAckCountEQ0=0; reg rLastDoneRead=1, _rLastDoneRead=1; reg rTxnDoneAck=0, _rTxnDoneAck=0; reg rPartWordsRecvd=0, _rPartWordsRecvd=0; reg rCarryInv=0, _rCarryInv=0; reg rSpaceAvail=0, _rSpaceAvail=0; reg rPartialDone=0, _rPartialDone=0; reg rReqPartialDone=0, _rReqPartialDone=0; reg rErr=0, _rErr=0; assign TXN_DONE_LEN = rDoneLen; assign TXN_DONE = (rTxnDone | rPartialDone); assign TXN_ERR = rErr; assign TXN_DATA_FLUSH = rMainState[3]; // S_RXPORTRD_MAIN_FLUSH assign RX_REQ = (rRxState[4] & rSpaceAvail); // S_RXPORTRD_RX_ISSUE assign RX_ADDR = rAddr; assign RX_LEN = rLen; assign SG_ELEM_REN = rSgRen; assign SG_RST = rMainState[1]; // S_RXPORTRD_MAIN_CHECK assign CHNL_RX = (rMainState[2] | rMainState[3] | rMainState[4]); // S_RXPORTRD_MAIN_READ | S_RXPORTRD_MAIN_FLUSH | S_RXPORTRD_MAIN_DONE assign CHNL_RX_LEN = rReadWords; assign CHNL_RX_LAST = rOffLast[0]; assign CHNL_RX_OFF = rOffLast[31:1]; // Buffer signals that come from outside the rx_port. always @ (posedge CLK) begin rTxnData <= #1 _rTxnData; rTxnOffLastValid <= #1 _rTxnOffLastValid; rTxnLenValid <= #1 _rTxnLenValid; rTxnDoneAck <= #1 (RST ? 1'd0 : _rTxnDoneAck); rRxDataEn <= #1 _rRxDataEn; end always @ (*) begin _rTxnData = TXN_DATA; _rTxnOffLastValid = TXN_OFF_LAST_VALID; _rTxnLenValid = TXN_LEN_VALID; _rTxnDoneAck = TXN_DONE_ACK; _rRxDataEn = RX_DATA_EN; end // Handle RX lifecycle. always @ (posedge CLK) begin rMainState <= #1 (RST ? `S_RXPORTRD_MAIN_IDLE : _rMainState); rOffLast <= #1 _rOffLast; rReadWords <= #1 _rReadWords; rReadWordsZero <= #1 _rReadWordsZero; rStart <= #1 _rStart; rFlushed <= #1 _rFlushed; rDoneLen <= #1 (RST ? 0 : _rDoneLen); rTxnDone <= #1 _rTxnDone; end always @ (*) begin _rMainState = rMainState; _rDoneLen = rDoneLen; _rTxnDone = rTxnDone; _rOffLast = (rTxnOffLastValid ? rTxnData : rOffLast); _rReadWords = (rMainState[0] & rTxnLenValid ? rTxnData : rReadWords); _rReadWordsZero = (rReadWords == 0); _rStart = ((rStart<<1) | rTxnLenValid); _rFlushed = ((rFlushed<<1) | TXN_DATA_FLUSHED); case (rMainState) `S_RXPORTRD_MAIN_IDLE: begin // Wait for new read transaction offset/last & length _rTxnDone = 0; if (rStart[0]) _rMainState = `S_RXPORTRD_MAIN_CHECK; end `S_RXPORTRD_MAIN_CHECK: begin // See if we should start a transaction if (!rReadWordsZero) _rMainState = `S_RXPORTRD_MAIN_READ; else if (rOffLast[0]) _rMainState = `S_RXPORTRD_MAIN_FLUSH; else _rMainState = `S_RXPORTRD_MAIN_IDLE; end `S_RXPORTRD_MAIN_READ: begin // Issue read transfers, wait for data to arrive if (rRxState[7] & rLastDoneRead) begin // S_RXPORTRD_RX_DONE _rDoneLen = rRecvdWords; _rMainState = `S_RXPORTRD_MAIN_FLUSH; end end `S_RXPORTRD_MAIN_FLUSH: begin // Wait for data to be flushed if (rFlushed[3]) _rMainState = `S_RXPORTRD_MAIN_DONE; end `S_RXPORTRD_MAIN_DONE: begin // Wait for RX to be received and ackd in the channel if (CHNL_RX_RECVD & CHNL_RX_ACK_RECVD) _rMainState = `S_RXPORTRD_MAIN_RESET; end `S_RXPORTRD_MAIN_RESET: begin // Wait until RX has dropped in the channel if (!CHNL_RX_RECVD) begin _rTxnDone = 1; _rMainState = `S_RXPORTRD_MAIN_IDLE; end end default: begin _rMainState = `S_RXPORTRD_MAIN_IDLE; end endcase end // Issue the read requests at the buffer level. Decrement the amount requested // after every request. Continue until all words have been requested. wire [9:0] wAddrLoInv = ~rAddr[11:2]; always @ (posedge CLK) begin rRxState <= #1 (RST ? `S_RXPORTRD_RX_IDLE : _rRxState); rSgRen <= #1 (RST ? 1'd0: _rSgRen); rWords <= #1 _rWords; rBufWords <= #1 _rBufWords; rBufWordsInit <= #1 _rBufWordsInit; rLargeBuf <= #1 _rLargeBuf; rAddr <= #1 _rAddr; rCarry <= #1 _rCarry; rValsProp <= #1 _rValsProp; rPageRem <= #1 _rPageRem; rPageSpill <= #1 _rPageSpill; rPageSpillInit <= #1 _rPageSpillInit; rCopyBufWords <= #1 _rCopyBufWords; rUseInit <= #1 _rUseInit; rPreLen <= #1 _rPreLen; rMaxPayloadTrain <= #1 _rMaxPayloadTrain; rMaxPayloadShift <= #1 _rMaxPayloadShift; rMaxPayload <= #1 _rMaxPayload; rPayloadSpill <= #1 _rPayloadSpill; rMaxLen <= #1 _rMaxLen; rLen <= #1 _rLen; rLenEQWordsHi <= #1 _rLenEQWordsHi; rLenEQWordsLo <= #1 _rLenEQWordsLo; rLenEQBufWordsHi <= #1 _rLenEQBufWordsHi; rLenEQBufWordsLo <= #1 _rLenEQBufWordsLo; end always @ (*) begin _rRxState = rRxState; _rCopyBufWords = rCopyBufWords; _rUseInit = rUseInit; _rSgRen = rSgRen; _rValsProp = ((rValsProp<<1) | rRxState[2]); // S_RXPORTRD_RX_ADJ_0 _rLargeBuf = (SG_ELEM_LEN > rWords); {_rCarry[0], _rAddr[15:0]} = (rRxState[1] ? SG_ELEM_ADDR[15:0] : (rAddr[15:0] + ({12{RX_REQ_ACK}} & {rLen,2'd0}))); {_rCarry[1], _rAddr[31:16]} = (rRxState[1] ? SG_ELEM_ADDR[31:16] : (rAddr[31:16] + rCarry[0])); {_rCarry[2], _rAddr[47:32]} = (rRxState[1] ? SG_ELEM_ADDR[47:32] : (rAddr[47:32] + rCarry[1])); _rAddr[63:48] = (rRxState[1] ? SG_ELEM_ADDR[63:48] : (rAddr[63:48] + rCarry[2])); _rWords = (rRxState[0] ? rReadWords : (rWords - ({10{RX_REQ_ACK}} & rLen))); _rBufWordsInit = (rLargeBuf ? rWords : SG_ELEM_LEN); _rBufWords = (rCopyBufWords ? rBufWordsInit : rBufWords) - ({10{RX_REQ_ACK}} & rLen); _rPageRem = (wAddrLoInv + 1'd1); _rPageSpillInit = (rBufWordsInit > rPageRem); _rPageSpill = (rBufWords > rPageRem); _rPreLen = ((rPageSpillInit & rUseInit) | (rPageSpill & !rUseInit) ? rPageRem : rBufWords[10:0]); _rMaxPayloadTrain = (CONFIG_MAX_READ_REQUEST_SIZE > 3'd4 ? 3'd4 : CONFIG_MAX_READ_REQUEST_SIZE); _rMaxPayloadShift = (C_MAX_READ_REQ[2:0] < rMaxPayloadTrain ? C_MAX_READ_REQ[2:0] : rMaxPayloadTrain); _rMaxPayload = (6'd32<<rMaxPayloadShift); _rPayloadSpill = (rPreLen > rMaxPayload); _rMaxLen = ((rMaxLen & !rValsProp[2]) | RX_REQ_ACK); _rLen = (rPayloadSpill | rMaxLen ? rMaxPayload : rPreLen[9:0]); _rLenEQWordsHi = (16'd0 == rWords[31:16]); _rLenEQWordsLo = ({6'd0, rLen} == rWords[15:0]); _rLenEQBufWordsHi = (16'd0 == rBufWords[31:16]); _rLenEQBufWordsLo = ({6'd0, rLen} == rBufWords[15:0]); case (rRxState) `S_RXPORTRD_RX_IDLE: begin // Wait for a new read transaction if (rMainState[2]) // S_RXPORTRD_MAIN_READ _rRxState = `S_RXPORTRD_RX_BUF; end `S_RXPORTRD_RX_BUF: begin // Wait for buffer length and address if (SG_ELEM_RDY) begin _rSgRen = 1; _rRxState = `S_RXPORTRD_RX_ADJ_0; end else if (rErr) begin _rRxState = `S_RXPORTRD_RX_WAIT_0; end end `S_RXPORTRD_RX_ADJ_0: begin // Fix for large buffer _rSgRen = 0; _rCopyBufWords = rSgRen; _rRxState = `S_RXPORTRD_RX_ADJ_1; end // (bufwords and pagerem valid here) `S_RXPORTRD_RX_ADJ_1: begin // Wait for the value to propagate // Check for page boundary crossing // Fix for page boundary crossing // Check for max read payload // Fix for max read payload _rCopyBufWords = 0; _rUseInit = rCopyBufWords; if (rValsProp[3]) _rRxState = `S_RXPORTRD_RX_ISSUE; end `S_RXPORTRD_RX_ISSUE: begin // Wait for the request to be accepted if (RX_REQ_ACK) begin if (rErr | (rLenEQWordsHi & rLenEQWordsLo)) _rRxState = `S_RXPORTRD_RX_WAIT_0; else if (rLenEQBufWordsHi & rLenEQBufWordsLo) _rRxState = `S_RXPORTRD_RX_BUF; else _rRxState = `S_RXPORTRD_RX_ADJ_0; end end `S_RXPORTRD_RX_WAIT_0: begin // Wait for rAckCount to update _rRxState = `S_RXPORTRD_RX_WAIT_1; end `S_RXPORTRD_RX_WAIT_1: begin // Wait for requested data to arrive if (rAckCountEQ0) _rRxState = `S_RXPORTRD_RX_DONE; end `S_RXPORTRD_RX_DONE: begin // Signal done if (rMainState[3]) // S_RXPORTRD_MAIN_FLUSH _rRxState = `S_RXPORTRD_RX_IDLE; end default: begin _rRxState = `S_RXPORTRD_RX_IDLE; end endcase end // Count the data. always @ (posedge CLK) begin rRecvdWords <= #1 _rRecvdWords; rReqdWords <= #1 _rReqdWords; rPartWords <= #1 _rPartWords; rAckCount <= #1 _rAckCount; rAckCountEQ0 <= #1 _rAckCountEQ0; rPartWordsRecvd <= #1 _rPartWordsRecvd; rRequestingWords <= #1 _rRequestingWords; rAvailWords <= #1 _rAvailWords; rCarryInv <= #1 _rCarryInv; rSpaceAvail <= #1 _rSpaceAvail; rLastDoneRead <= #1 (RST ? 1'd1 : _rLastDoneRead); end always @ (*) begin // Count words as they arrive (words from the rx_engine directly). if (rMainState[0]) // S_RXPORTRD_MAIN_IDLE _rRecvdWords = #1 0; else _rRecvdWords = #1 rRecvdWords + rRxDataEn; // Count words as they are requested. if (rMainState[0]) // S_RXPORTRD_MAIN_IDLE _rReqdWords = #1 0; else _rReqdWords = #1 rReqdWords + ({10{RX_REQ_ACK}} & rLen); // Track outstanding requests if (rMainState[0]) // S_RXPORTRD_MAIN_IDLE _rAckCount = 0; else _rAckCount = rAckCount + RX_REQ_ACK - RX_DONE; _rAckCountEQ0 = (rAckCount == 11'd0); // Track when the user reads the actual transfer amount. _rLastDoneRead = (rTxnDone ? 1'd0 : (rLastDoneRead | rTxnDoneAck)); // Track the amount of words that are expected to arrive. _rPartWords = #1 (rTxnLenValid ? rTxnData : rPartWords); // Compare counts. _rPartWordsRecvd = (rRecvdWords >= rPartWords); _rRequestingWords = rReqdWords + rLen; {_rCarryInv, _rAvailWords[15:0]} = {1'd1, rRequestingWords[15:0]} - CHNL_RX_CONSUMED[15:0]; _rAvailWords[31:16] = rRequestingWords[31:16] - CHNL_RX_CONSUMED[31:16] - !rCarryInv; _rSpaceAvail = (rAvailWords <= C_FIFO_WORDS); end // Facilitate sending a TXN_DONE when we receive a TXN_ACK after the transaction // has begun sending. This will happen when the workstation detects that it has // sent/used all its currently mapped scatter gather elements, but it's not enough // to complete the transaction. The TXN_DONE will let the workstation know it can // release the current scatter gather mappings and allocate new ones. always @ (posedge CLK) begin rPartialDone <= #1 _rPartialDone; rReqPartialDone <= #1 (RST ? 1'd0 : _rReqPartialDone); end always @ (*) begin // Signal TXN_DONE after we've recieved the (seemingly superfluous) TXN_ACK // and received the corresponding amount of words. _rPartialDone = (rReqPartialDone & rPartWordsRecvd); // Keep track of (seemingly superfluous) TXN_ACK requests. if ((rReqPartialDone & rPartWordsRecvd) | rMainState[0]) // S_RXPORTRD_MAIN_IDLE _rReqPartialDone = 0; else _rReqPartialDone = (rReqPartialDone | rTxnLenValid); end // Handle errors in the main data or scatter gather data. always @ (posedge CLK) begin rErr <= #1 (RST ? 1'd0 : _rErr); end always @ (*) begin // Keep track of errors if we encounter them. if (rMainState[0]) // S_RXPORTRD_MAIN_IDLE _rErr = 0; else _rErr = (rErr | (RX_DONE & RX_ERR) | (SG_DONE & SG_ERR)); end /* wire [35:0] wControl0; chipscope_icon_1 cs_icon( .CONTROL0(wControl0) ); chipscope_ila_t8_512 a0( .CLK(CLK), .CONTROL(wControl0), .TRIG0({TXN_LEN_VALID | TXN_DONE_ACK | TXN_DONE | TXN_ERR, 1'd0, rMainState}), .DATA({176'd0, 64'd0, // 64 rAddr, // 64 SG_ELEM_RDY, // 1 1'd0, // 1 1'd0, // 1 1'd0, // 1 rSgRen, // 1 1'd0, // 1 rLastDoneRead, // 1 rLen, // 10 rWords, // 32 rAckCount, // 11 rPartWords, // 32 rPartWordsRecvd, // 1 rReqPartialDone, // 1 rPartialDone, // 1 rTxnDone, // 1 rRxState, // 8 rRecvdWords, // 32 rReadWords, // 32 TXN_LEN_VALID, // 1 TXN_DONE_ACK, // 1 rDoneLen, // 32 rMainState}) // 6 ); */ endmodule
// (C) 2001-2016 Altera Corporation. All rights reserved. // Your use of Altera Corporation's design tools, logic functions and other // software and tools, and its AMPP partner logic functions, and any output // files any of the foregoing (including device programming or simulation // files), and any associated documentation or information are expressly subject // to the terms and conditions of the Altera Program License Subscription // Agreement, Altera MegaCore Function License Agreement, or other applicable // license agreement, including, without limitation, that your use is for the // sole purpose of programming logic devices manufactured by Altera and sold by // Altera or its authorized distributors. Please refer to the applicable // agreement for further details. // THIS FILE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THIS FILE OR THE USE OR OTHER DEALINGS // IN THIS FILE. /****************************************************************************** * * * This module writes data to the RS232 UART Port. * * * ******************************************************************************/ module altera_up_rs232_out_serializer ( // Inputs clk, reset, transmit_data, transmit_data_en, // Bidirectionals // Outputs fifo_write_space, serial_data_out ); /***************************************************************************** * Parameter Declarations * *****************************************************************************/ parameter CW = 9; // Baud counter width parameter BAUD_TICK_COUNT = 433; parameter HALF_BAUD_TICK_COUNT = 216; parameter TDW = 11; // Total data width parameter DW = 9; // Data width /***************************************************************************** * Port Declarations * *****************************************************************************/ // Inputs input clk; input reset; input [DW: 0] transmit_data; input transmit_data_en; // Bidirectionals // Outputs output reg [ 7: 0] fifo_write_space; output reg serial_data_out; /***************************************************************************** * Constant Declarations * *****************************************************************************/ /***************************************************************************** * Internal Wires and Registers Declarations * *****************************************************************************/ // Internal Wires wire shift_data_reg_en; wire all_bits_transmitted; wire read_fifo_en; wire fifo_is_empty; wire fifo_is_full; wire [ 6: 0] fifo_used; wire [DW: 0] data_from_fifo; // Internal Registers reg transmitting_data; reg [DW+1:0] data_out_shift_reg; // State Machine Registers /***************************************************************************** * Finite State Machine(s) * *****************************************************************************/ /***************************************************************************** * Sequential Logic * *****************************************************************************/ always @(posedge clk) begin if (reset) fifo_write_space <= 8'h00; else fifo_write_space <= 8'h80 - {fifo_is_full, fifo_used}; end always @(posedge clk) begin if (reset) serial_data_out <= 1'b1; else serial_data_out <= data_out_shift_reg[0]; end always @(posedge clk) begin if (reset) transmitting_data <= 1'b0; else if (all_bits_transmitted) transmitting_data <= 1'b0; else if (fifo_is_empty == 1'b0) transmitting_data <= 1'b1; end always @(posedge clk) begin if (reset) data_out_shift_reg <= {(DW + 2){1'b1}}; else if (read_fifo_en) data_out_shift_reg <= {data_from_fifo, 1'b0}; else if (shift_data_reg_en) data_out_shift_reg <= {1'b1, data_out_shift_reg[DW+1:1]}; end /***************************************************************************** * Combinational Logic * *****************************************************************************/ assign read_fifo_en = ~transmitting_data & ~fifo_is_empty & ~all_bits_transmitted; /***************************************************************************** * Internal Modules * *****************************************************************************/ altera_up_rs232_counters RS232_Out_Counters ( // Inputs .clk (clk), .reset (reset), .reset_counters (~transmitting_data), // Bidirectionals // Outputs .baud_clock_rising_edge (shift_data_reg_en), .baud_clock_falling_edge (), .all_bits_transmitted (all_bits_transmitted) ); defparam RS232_Out_Counters.CW = CW, RS232_Out_Counters.BAUD_TICK_COUNT = BAUD_TICK_COUNT, RS232_Out_Counters.HALF_BAUD_TICK_COUNT = HALF_BAUD_TICK_COUNT, RS232_Out_Counters.TDW = TDW; altera_up_sync_fifo RS232_Out_FIFO ( // Inputs .clk (clk), .reset (reset), .write_en (transmit_data_en & ~fifo_is_full), .write_data (transmit_data), .read_en (read_fifo_en), // Bidirectionals // Outputs .fifo_is_empty (fifo_is_empty), .fifo_is_full (fifo_is_full), .words_used (fifo_used), .read_data (data_from_fifo) ); defparam RS232_Out_FIFO.DW = DW, RS232_Out_FIFO.DATA_DEPTH = 128, RS232_Out_FIFO.AW = 6; endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: tx_port_writer.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Handles receiving new transaction events and data, and // making requests to tx engine. // for the RIFFA channel. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_TXPORTWR_MAIN_IDLE 8'b0000_0001 `define S_TXPORTWR_MAIN_CHECK 8'b0000_0010 `define S_TXPORTWR_MAIN_SIG_NEW 8'b0000_0100 `define S_TXPORTWR_MAIN_NEW_ACK 8'b0000_1000 `define S_TXPORTWR_MAIN_WRITE 8'b0001_0000 `define S_TXPORTWR_MAIN_DONE 8'b0010_0000 `define S_TXPORTWR_MAIN_SIG_DONE 8'b0100_0000 `define S_TXPORTWR_MAIN_RESET 8'b1000_0000 `define S_TXPORTWR_TX_IDLE 8'b0000_0001 `define S_TXPORTWR_TX_BUF 8'b0000_0010 `define S_TXPORTWR_TX_ADJ_0 8'b0000_0100 `define S_TXPORTWR_TX_ADJ_1 8'b0000_1000 `define S_TXPORTWR_TX_ADJ_2 8'b0001_0000 `define S_TXPORTWR_TX_CHECK_DATA 8'b0010_0000 `define S_TXPORTWR_TX_WRITE 8'b0100_0000 `define S_TXPORTWR_TX_WRITE_REM 8'b1000_0000 `timescale 1ns/1ns module tx_port_writer ( input CLK, input RST, input [2:0] CONFIG_MAX_PAYLOAD_SIZE, // Maximum write payload: 000=128B, 001=256B, 010=512B, 011=1024B output TXN, // Write transaction notification input TXN_ACK, // Write transaction acknowledged output [31:0] TXN_LEN, // Write transaction length output [31:0] TXN_OFF_LAST, // Write transaction offset/last output [31:0] TXN_DONE_LEN, // Write transaction actual transfer length output TXN_DONE, // Write transaction done output TXN_ERR, // Write transaction encountered an error input TXN_DONE_ACK, // Write transaction actual transfer length read input NEW_TXN, // Transaction parameters are valid output NEW_TXN_ACK, // Transaction parameter read, continue input NEW_TXN_LAST, // Channel last write input [31:0] NEW_TXN_LEN, // Channel write length (in 32 bit words) input [30:0] NEW_TXN_OFF, // Channel write offset input [31:0] NEW_TXN_WORDS_RECVD, // Count of data words received in transaction input NEW_TXN_DONE, // Transaction is closed input [63:0] SG_ELEM_ADDR, // Scatter gather element address input [31:0] SG_ELEM_LEN, // Scatter gather element length (in words) input SG_ELEM_RDY, // Scatter gather element ready input SG_ELEM_EMPTY, // Scatter gather elements empty output SG_ELEM_REN, // Scatter gather element read enable output SG_RST, // Scatter gather data reset input SG_ERR, // Scatter gather read encountered an error output TX_REQ, // Outgoing write request input TX_REQ_ACK, // Outgoing write request acknowledged output [63:0] TX_ADDR, // Outgoing write high address output [9:0] TX_LEN, // Outgoing write length (in 32 bit words) output TX_LAST, // Outgoing write is last request for transaction input TX_SENT // Outgoing write complete ); `include "functions.vh" (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [7:0] rMainState=`S_TXPORTWR_MAIN_IDLE, _rMainState=`S_TXPORTWR_MAIN_IDLE; reg [31:0] rOffLast=0, _rOffLast=0; reg rWordsEQ0=0, _rWordsEQ0=0; reg rStarted=0, _rStarted=0; reg [31:0] rDoneLen=0, _rDoneLen=0; reg rSgErr=0, _rSgErr=0; reg rTxErrd=0, _rTxErrd=0; reg rTxnAck=0, _rTxnAck=0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [7:0] rTxState=`S_TXPORTWR_TX_IDLE, _rTxState=`S_TXPORTWR_TX_IDLE; reg [31:0] rSentWords=0, _rSentWords=0; reg [31:0] rWords=0, _rWords=0; reg [31:0] rBufWords=0, _rBufWords=0; reg [31:0] rBufWordsInit=0, _rBufWordsInit=0; reg rLargeBuf=0, _rLargeBuf=0; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [2:0] rCarry=0, _rCarry=0; reg rValsPropagated=0, _rValsPropagated=0; reg [5:0] rValsProp=0, _rValsProp=0; reg rCopyBufWords=0, _rCopyBufWords=0; reg rUseInit=0, _rUseInit=0; reg [10:0] rPageRem=0, _rPageRem=0; reg rPageSpill=0, _rPageSpill=0; reg rPageSpillInit=0, _rPageSpillInit=0; reg [10:0] rPreLen=0, _rPreLen=0; reg [2:0] rMaxPayloadSize=0, _rMaxPayloadSize=0; reg [2:0] rMaxPayloadShift=0, _rMaxPayloadShift=0; reg [9:0] rMaxPayload=0, _rMaxPayload=0; reg rPayloadSpill=0, _rPayloadSpill=0; reg rMaxLen=1, _rMaxLen=1; reg [9:0] rLen=0, _rLen=0; reg [31:0] rSendingWords=0, _rSendingWords=0; reg rAvail=0, _rAvail=0; reg [1:0] rTxnDone=0, _rTxnDone=0; reg [9:0] rLastLen=0, _rLastLen=0; reg rLastLenEQ0=0, _rLastLenEQ0=0; reg rLenEQWords=0, _rLenEQWords=0; reg rLenEQBufWords=0, _rLenEQBufWords=0; reg rNotRequesting=1, _rNotRequesting=1; reg [63:0] rReqAddr=64'd0, _rReqAddr=64'd0; reg [9:0] rReqLen=0, _rReqLen=0; reg rReqLast=0, _rReqLast=0; reg rTxReqAck=0, _rTxReqAck=0; reg rDone=0, _rDone=0; reg [9:0] rAckCount=0, _rAckCount=0; reg rTxSent=0, _rTxSent=0; reg rLastDoneRead=1, _rLastDoneRead=1; reg rTxnDoneAck=0, _rTxnDoneAck=0; reg rReqPartialDone=0, _rReqPartialDone=0; reg rPartialDone=0, _rPartialDone=0; assign NEW_TXN_ACK = rMainState[1]; // S_TXPORTWR_MAIN_CHECK assign TXN = rMainState[2]; // S_TXPORTWR_MAIN_SIG_NEW assign TXN_DONE = (rMainState[6] | rPartialDone); // S_TXPORTWR_MAIN_SIG_DONE assign TXN_LEN = rWords; assign TXN_OFF_LAST = rOffLast; assign TXN_DONE_LEN = rDoneLen; assign TXN_ERR = rTxErrd; assign SG_ELEM_REN = rTxState[2]; // S_TXPORTWR_TX_ADJ_0 assign SG_RST = rMainState[3]; // S_TXPORTWR_MAIN_NEW_ACK assign TX_REQ = !rNotRequesting; assign TX_ADDR = rReqAddr; assign TX_LEN = rReqLen; assign TX_LAST = rReqLast; // Buffer the input signals that come from outside the tx_port. always @ (posedge CLK) begin rTxnAck <= #1 (RST ? 1'd0 : _rTxnAck); rTxnDoneAck <= #1 (RST ? 1'd0 : _rTxnDoneAck); rSgErr <= #1 (RST ? 1'd0 : _rSgErr); rTxReqAck <= #1 (RST ? 1'd0 : _rTxReqAck); rTxSent <= #1 (RST ? 1'd0 : _rTxSent); end always @ (*) begin _rTxnAck = TXN_ACK; _rTxnDoneAck = TXN_DONE_ACK; _rSgErr = SG_ERR; _rTxReqAck = TX_REQ_ACK; _rTxSent = TX_SENT; end // Wait for a NEW_TXN request. Then request transfers until all the data is sent // or until the specified length is reached. Then signal TXN_DONE. always @ (posedge CLK) begin rMainState <= #1 (RST ? `S_TXPORTWR_MAIN_IDLE : _rMainState); rOffLast <= #1 _rOffLast; rWordsEQ0 <= #1 _rWordsEQ0; rStarted <= #1 _rStarted; rDoneLen <= #1 (RST ? 0 : _rDoneLen); rTxErrd <= #1 (RST ? 1'd0 : _rTxErrd); end always @ (*) begin _rMainState = rMainState; _rOffLast = rOffLast; _rWordsEQ0 = rWordsEQ0; _rStarted = rStarted; _rDoneLen = rDoneLen; _rTxErrd = rTxErrd; case (rMainState) `S_TXPORTWR_MAIN_IDLE: begin // Wait for channel write request _rStarted = 0; _rWordsEQ0 = (NEW_TXN_LEN == 0); _rOffLast = {NEW_TXN_OFF, NEW_TXN_LAST}; if (NEW_TXN) _rMainState = `S_TXPORTWR_MAIN_CHECK; end `S_TXPORTWR_MAIN_CHECK: begin // Continue with transaction? if (rOffLast[0] | !rWordsEQ0) _rMainState = `S_TXPORTWR_MAIN_SIG_NEW; else _rMainState = `S_TXPORTWR_MAIN_RESET; end `S_TXPORTWR_MAIN_SIG_NEW: begin // Signal new write _rMainState = `S_TXPORTWR_MAIN_NEW_ACK; end `S_TXPORTWR_MAIN_NEW_ACK: begin // Wait for acknowledgement if (rTxnAck) // ACK'd on PC read of TXN length _rMainState = (rWordsEQ0 ? `S_TXPORTWR_MAIN_SIG_DONE : `S_TXPORTWR_MAIN_WRITE); end `S_TXPORTWR_MAIN_WRITE: begin // Start writing and wait for all writes to complete _rStarted = (rStarted | rTxState[1]); // S_TXPORTWR_TX_BUF _rTxErrd = (rTxErrd | rSgErr); if (rTxState[0] & rStarted) // S_TXPORTWR_TX_IDLE _rMainState = `S_TXPORTWR_MAIN_DONE; end `S_TXPORTWR_MAIN_DONE: begin // Wait for the last transaction to complete if (rDone & rLastDoneRead) begin _rDoneLen = rSentWords; _rMainState = `S_TXPORTWR_MAIN_SIG_DONE; end end `S_TXPORTWR_MAIN_SIG_DONE: begin // Signal the done port _rTxErrd = 0; _rMainState = `S_TXPORTWR_MAIN_RESET; end `S_TXPORTWR_MAIN_RESET: begin // Wait for the channel tx to drop if (NEW_TXN_DONE) _rMainState = `S_TXPORTWR_MAIN_IDLE; end default: begin _rMainState = `S_TXPORTWR_MAIN_IDLE; end endcase end // Manage sending TX requests to the TX engine. Transfers will be limited // by each scatter gather buffer's size, max payload size, and must not // cross a (4KB) page boundary. The request is only made if there is sufficient // data already written to the buffer. wire [9:0] wLastLen = (NEW_TXN_WORDS_RECVD - rSentWords); wire [9:0] wAddrLoInv = ~rAddr[11:2]; wire [10:0] wPageRem = (wAddrLoInv + 1'd1); always @ (posedge CLK) begin rTxState <= #1 (RST | rSgErr ? `S_TXPORTWR_TX_IDLE : _rTxState); rSentWords <= #1 (rMainState[0] ? 0 : _rSentWords); rWords <= #1 _rWords; rBufWords <= #1 _rBufWords; rBufWordsInit <= #1 _rBufWordsInit; rAddr <= #1 _rAddr; rCarry <= #1 _rCarry; rValsPropagated <= #1 _rValsPropagated; rValsProp <= #1 _rValsProp; rLargeBuf <= #1 _rLargeBuf; rPageRem <= #1 _rPageRem; rPageSpill <= #1 _rPageSpill; rPageSpillInit <= #1 _rPageSpillInit; rCopyBufWords <= #1 _rCopyBufWords; rUseInit <= #1 _rUseInit; rPreLen <= #1 _rPreLen; rMaxPayloadSize <= #1 _rMaxPayloadSize; rMaxPayloadShift <= #1 _rMaxPayloadShift; rMaxPayload <= #1 _rMaxPayload; rPayloadSpill <= #1 _rPayloadSpill; rMaxLen <= #1 (RST ? 1'd1 : _rMaxLen); rLen <= #1 _rLen; rSendingWords <= #1 _rSendingWords; rAvail <= #1 _rAvail; rTxnDone <= #1 _rTxnDone; rLastLen <= #1 _rLastLen; rLastLenEQ0 <= #1 _rLastLenEQ0; rLenEQWords <= #1 _rLenEQWords; rLenEQBufWords <= #1 _rLenEQBufWords; end always @ (*) begin _rTxState = rTxState; _rCopyBufWords = rCopyBufWords; _rUseInit = rUseInit; _rValsProp = ((rValsProp<<1) | rTxState[3]); // S_TXPORTWR_TX_ADJ_1 _rValsPropagated = (rValsProp == 6'd0); _rLargeBuf = (SG_ELEM_LEN > rWords); {_rCarry[0], _rAddr[15:0]} = (rTxState[1] ? SG_ELEM_ADDR[15:0] : (rAddr[15:0] + ({12{rTxState[6]}} & {rLen, 2'd0}))); // S_TXPORTWR_TX_WRITE {_rCarry[1], _rAddr[31:16]} = (rTxState[1] ? SG_ELEM_ADDR[31:16] : (rAddr[31:16] + rCarry[0])); {_rCarry[2], _rAddr[47:32]} = (rTxState[1] ? SG_ELEM_ADDR[47:32] : (rAddr[47:32] + rCarry[1])); _rAddr[63:48] = (rTxState[1] ? SG_ELEM_ADDR[63:48] : (rAddr[63:48] + rCarry[2])); _rSentWords = (rTxState[7] ? NEW_TXN_WORDS_RECVD : rSentWords) + ({10{rTxState[6]}} & rLen); // S_TXPORTWR_TX_WRITE _rWords = (NEW_TXN_ACK ? NEW_TXN_LEN : (rWords - ({10{rTxState[6]}} & rLen))); // S_TXPORTWR_TX_WRITE _rBufWordsInit = (rLargeBuf ? rWords : SG_ELEM_LEN); _rBufWords = (rCopyBufWords ? rBufWordsInit : rBufWords) - ({10{rTxState[6]}} & rLen); // S_TXPORTWR_TX_WRITE _rPageRem = wPageRem; _rPageSpillInit = (rBufWordsInit > wPageRem); _rPageSpill = (rBufWords > wPageRem); _rPreLen = ((rPageSpillInit & rUseInit) | (rPageSpill & !rUseInit) ? rPageRem : rBufWords[10:0]); _rMaxPayloadSize = CONFIG_MAX_PAYLOAD_SIZE; _rMaxPayloadShift = (rMaxPayloadSize > 3'd4 ? 3'd4 : rMaxPayloadSize); _rMaxPayload = (6'd32<<rMaxPayloadShift); _rPayloadSpill = (rPreLen > rMaxPayload); _rMaxLen = ((rMaxLen & !rValsProp[1]) | rTxState[6]); // S_TXPORTWR_TX_WRITE _rLen = (rPayloadSpill | rMaxLen ? rMaxPayload : rPreLen[9:0]); _rSendingWords = rSentWords + rLen; _rAvail = (NEW_TXN_WORDS_RECVD >= rSendingWords); _rTxnDone = ((rTxnDone<<1) | NEW_TXN_DONE); _rLastLen = wLastLen; _rLastLenEQ0 = (rLastLen == 10'd0); _rLenEQWords = (rLen == rWords); _rLenEQBufWords = (rLen == rBufWords); case (rTxState) `S_TXPORTWR_TX_IDLE: begin // Wait for channel write request if (rMainState[4] & !rStarted) // S_TXPORTWR_MAIN_WRITE _rTxState = `S_TXPORTWR_TX_BUF; end `S_TXPORTWR_TX_BUF: begin // Wait for buffer length and address if (SG_ELEM_RDY) _rTxState = `S_TXPORTWR_TX_ADJ_0; end `S_TXPORTWR_TX_ADJ_0: begin // Fix for large buffer _rCopyBufWords = 1; _rTxState = `S_TXPORTWR_TX_ADJ_1; end `S_TXPORTWR_TX_ADJ_1: begin // Check for page boundary crossing _rCopyBufWords = 0; _rUseInit = rCopyBufWords; _rTxState = `S_TXPORTWR_TX_ADJ_2; end `S_TXPORTWR_TX_ADJ_2: begin // Wait for values to propagate // Fix for page boundary crossing // Check for max payload // Fix for max payload _rUseInit = 0; if (rValsProp[2]) _rTxState = `S_TXPORTWR_TX_CHECK_DATA; end `S_TXPORTWR_TX_CHECK_DATA: begin // Wait for available data if (rNotRequesting) begin if (rAvail) _rTxState = `S_TXPORTWR_TX_WRITE; else if (rValsPropagated & rTxnDone[1]) _rTxState = (rLastLenEQ0 ? `S_TXPORTWR_TX_IDLE : `S_TXPORTWR_TX_WRITE_REM); end end `S_TXPORTWR_TX_WRITE: begin // Send len and repeat or finish? if (rLenEQWords) _rTxState = `S_TXPORTWR_TX_IDLE; else if (rLenEQBufWords) _rTxState = `S_TXPORTWR_TX_BUF; else _rTxState = `S_TXPORTWR_TX_ADJ_1; end `S_TXPORTWR_TX_WRITE_REM: begin // Send remaining data and finish _rTxState = `S_TXPORTWR_TX_IDLE; end default: begin _rTxState = `S_TXPORTWR_TX_IDLE; end endcase end // Request TX transfers separately so that the TX FSM can continue calculating // the next set of request parameters without having to wait for the TX_REQ_ACK. always @ (posedge CLK) begin rAckCount <= #1 (RST ? 10'd0 : _rAckCount); rNotRequesting <= #1 (RST ? 1'd1 : _rNotRequesting); rReqAddr <= #1 _rReqAddr; rReqLen <= #1 _rReqLen; rReqLast <= #1 _rReqLast; rDone <= #1 _rDone; rLastDoneRead <= #1 (RST ? 1'd1 : _rLastDoneRead); end always @ (*) begin // Start signaling when the TX FSM is ready. if (rTxState[6] | rTxState[7]) // S_TXPORTWR_TX_WRITE _rNotRequesting = 0; else _rNotRequesting = (rNotRequesting | rTxReqAck); // Pass off the rAddr & rLen when ready and wait for TX_REQ_ACK. if (rTxState[6]) begin // S_TXPORTWR_TX_WRITE _rReqAddr = rAddr; _rReqLen = rLen; _rReqLast = rLenEQWords; end else if (rTxState[7]) begin // S_TXPORTWR_TX_WRITE_REM _rReqAddr = rAddr; _rReqLen = rLastLen; _rReqLast = 1; end else begin _rReqAddr = rReqAddr; _rReqLen = rReqLen; _rReqLast = rReqLast; end // Track TX_REQ_ACK and TX_SENT to determine when the transaction is over. _rDone = (rAckCount == 10'd0); if (rMainState[0]) // S_TXPORTWR_MAIN_IDLE _rAckCount = 0; else _rAckCount = rAckCount + rTxState[6] + rTxState[7] - rTxSent; // S_TXPORTWR_TX_WRITE, S_TXPORTWR_TX_WRITE_REM // Track when the user reads the actual transfer amount. _rLastDoneRead = (rMainState[6] ? 1'd0 : (rLastDoneRead | rTxnDoneAck)); // S_TXPORTWR_MAIN_SIG_DONE end // Facilitate sending a TXN_DONE when we receive a TXN_ACK after the transaction // has begun sending. This will happen when the workstation detects that it has // sent/used all its currently mapped scatter gather elements, but it's not enough // to complete the transaction. The TXN_DONE will let the workstation know it can // release the current scatter gather mappings and allocate new ones. always @ (posedge CLK) begin rPartialDone <= #1 _rPartialDone; rReqPartialDone <= #1 (RST ? 1'd0 : _rReqPartialDone); end always @ (*) begin // Signal TXN_DONE after we've recieved the (seemingly superfluous) TXN_ACK, // we have no outstanding transfer requests, we're not currently requesting a // transfer, and there are no more scatter gather elements. _rPartialDone = (rReqPartialDone & rDone & rNotRequesting & SG_ELEM_EMPTY & rTxState[1]); // S_TXPORTWR_TX_BUF // Keep track of (seemingly superfluous) TXN_ACK requests. if ((rReqPartialDone & rDone & rNotRequesting & SG_ELEM_EMPTY & rTxState[1]) | rMainState[0]) // S_TXPORTWR_MAIN_IDLE _rReqPartialDone = 0; else _rReqPartialDone = (rReqPartialDone | (rTxnAck & !rMainState[3])); // !S_TXPORTWR_MAIN_NEW_ACK end /* wire [35:0] wControl0; chipscope_icon_1 cs_icon( .CONTROL0(wControl0) ); chipscope_ila_t8_512 a0( .CLK(CLK), .CONTROL(wControl0), .TRIG0({rTxState[6] | rTxState[7] | rTxSent, rAckCount[6:0]}), .DATA({280'd0, NEW_TXN_WORDS_RECVD, // 32 rSendingWords, // 32 rAvail, // 1 rNotRequesting, // 1 NEW_TXN_LAST, // 1 NEW_TXN_LEN, // 32 NEW_TXN_OFF, // 31 NEW_TXN, // 1 rAckCount, // 10 rLastDoneRead, // 1 rWords, // 32 rBufWords, // 32 rLen, // 10 rTxState, // 8 rMainState}) // 8 ); */ endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: tx_port_writer.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Handles receiving new transaction events and data, and // making requests to tx engine. // for the RIFFA channel. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_TXPORTWR_MAIN_IDLE 8'b0000_0001 `define S_TXPORTWR_MAIN_CHECK 8'b0000_0010 `define S_TXPORTWR_MAIN_SIG_NEW 8'b0000_0100 `define S_TXPORTWR_MAIN_NEW_ACK 8'b0000_1000 `define S_TXPORTWR_MAIN_WRITE 8'b0001_0000 `define S_TXPORTWR_MAIN_DONE 8'b0010_0000 `define S_TXPORTWR_MAIN_SIG_DONE 8'b0100_0000 `define S_TXPORTWR_MAIN_RESET 8'b1000_0000 `define S_TXPORTWR_TX_IDLE 8'b0000_0001 `define S_TXPORTWR_TX_BUF 8'b0000_0010 `define S_TXPORTWR_TX_ADJ_0 8'b0000_0100 `define S_TXPORTWR_TX_ADJ_1 8'b0000_1000 `define S_TXPORTWR_TX_ADJ_2 8'b0001_0000 `define S_TXPORTWR_TX_CHECK_DATA 8'b0010_0000 `define S_TXPORTWR_TX_WRITE 8'b0100_0000 `define S_TXPORTWR_TX_WRITE_REM 8'b1000_0000 `timescale 1ns/1ns module tx_port_writer ( input CLK, input RST, input [2:0] CONFIG_MAX_PAYLOAD_SIZE, // Maximum write payload: 000=128B, 001=256B, 010=512B, 011=1024B output TXN, // Write transaction notification input TXN_ACK, // Write transaction acknowledged output [31:0] TXN_LEN, // Write transaction length output [31:0] TXN_OFF_LAST, // Write transaction offset/last output [31:0] TXN_DONE_LEN, // Write transaction actual transfer length output TXN_DONE, // Write transaction done output TXN_ERR, // Write transaction encountered an error input TXN_DONE_ACK, // Write transaction actual transfer length read input NEW_TXN, // Transaction parameters are valid output NEW_TXN_ACK, // Transaction parameter read, continue input NEW_TXN_LAST, // Channel last write input [31:0] NEW_TXN_LEN, // Channel write length (in 32 bit words) input [30:0] NEW_TXN_OFF, // Channel write offset input [31:0] NEW_TXN_WORDS_RECVD, // Count of data words received in transaction input NEW_TXN_DONE, // Transaction is closed input [63:0] SG_ELEM_ADDR, // Scatter gather element address input [31:0] SG_ELEM_LEN, // Scatter gather element length (in words) input SG_ELEM_RDY, // Scatter gather element ready input SG_ELEM_EMPTY, // Scatter gather elements empty output SG_ELEM_REN, // Scatter gather element read enable output SG_RST, // Scatter gather data reset input SG_ERR, // Scatter gather read encountered an error output TX_REQ, // Outgoing write request input TX_REQ_ACK, // Outgoing write request acknowledged output [63:0] TX_ADDR, // Outgoing write high address output [9:0] TX_LEN, // Outgoing write length (in 32 bit words) output TX_LAST, // Outgoing write is last request for transaction input TX_SENT // Outgoing write complete ); `include "functions.vh" (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [7:0] rMainState=`S_TXPORTWR_MAIN_IDLE, _rMainState=`S_TXPORTWR_MAIN_IDLE; reg [31:0] rOffLast=0, _rOffLast=0; reg rWordsEQ0=0, _rWordsEQ0=0; reg rStarted=0, _rStarted=0; reg [31:0] rDoneLen=0, _rDoneLen=0; reg rSgErr=0, _rSgErr=0; reg rTxErrd=0, _rTxErrd=0; reg rTxnAck=0, _rTxnAck=0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [7:0] rTxState=`S_TXPORTWR_TX_IDLE, _rTxState=`S_TXPORTWR_TX_IDLE; reg [31:0] rSentWords=0, _rSentWords=0; reg [31:0] rWords=0, _rWords=0; reg [31:0] rBufWords=0, _rBufWords=0; reg [31:0] rBufWordsInit=0, _rBufWordsInit=0; reg rLargeBuf=0, _rLargeBuf=0; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [2:0] rCarry=0, _rCarry=0; reg rValsPropagated=0, _rValsPropagated=0; reg [5:0] rValsProp=0, _rValsProp=0; reg rCopyBufWords=0, _rCopyBufWords=0; reg rUseInit=0, _rUseInit=0; reg [10:0] rPageRem=0, _rPageRem=0; reg rPageSpill=0, _rPageSpill=0; reg rPageSpillInit=0, _rPageSpillInit=0; reg [10:0] rPreLen=0, _rPreLen=0; reg [2:0] rMaxPayloadSize=0, _rMaxPayloadSize=0; reg [2:0] rMaxPayloadShift=0, _rMaxPayloadShift=0; reg [9:0] rMaxPayload=0, _rMaxPayload=0; reg rPayloadSpill=0, _rPayloadSpill=0; reg rMaxLen=1, _rMaxLen=1; reg [9:0] rLen=0, _rLen=0; reg [31:0] rSendingWords=0, _rSendingWords=0; reg rAvail=0, _rAvail=0; reg [1:0] rTxnDone=0, _rTxnDone=0; reg [9:0] rLastLen=0, _rLastLen=0; reg rLastLenEQ0=0, _rLastLenEQ0=0; reg rLenEQWords=0, _rLenEQWords=0; reg rLenEQBufWords=0, _rLenEQBufWords=0; reg rNotRequesting=1, _rNotRequesting=1; reg [63:0] rReqAddr=64'd0, _rReqAddr=64'd0; reg [9:0] rReqLen=0, _rReqLen=0; reg rReqLast=0, _rReqLast=0; reg rTxReqAck=0, _rTxReqAck=0; reg rDone=0, _rDone=0; reg [9:0] rAckCount=0, _rAckCount=0; reg rTxSent=0, _rTxSent=0; reg rLastDoneRead=1, _rLastDoneRead=1; reg rTxnDoneAck=0, _rTxnDoneAck=0; reg rReqPartialDone=0, _rReqPartialDone=0; reg rPartialDone=0, _rPartialDone=0; assign NEW_TXN_ACK = rMainState[1]; // S_TXPORTWR_MAIN_CHECK assign TXN = rMainState[2]; // S_TXPORTWR_MAIN_SIG_NEW assign TXN_DONE = (rMainState[6] | rPartialDone); // S_TXPORTWR_MAIN_SIG_DONE assign TXN_LEN = rWords; assign TXN_OFF_LAST = rOffLast; assign TXN_DONE_LEN = rDoneLen; assign TXN_ERR = rTxErrd; assign SG_ELEM_REN = rTxState[2]; // S_TXPORTWR_TX_ADJ_0 assign SG_RST = rMainState[3]; // S_TXPORTWR_MAIN_NEW_ACK assign TX_REQ = !rNotRequesting; assign TX_ADDR = rReqAddr; assign TX_LEN = rReqLen; assign TX_LAST = rReqLast; // Buffer the input signals that come from outside the tx_port. always @ (posedge CLK) begin rTxnAck <= #1 (RST ? 1'd0 : _rTxnAck); rTxnDoneAck <= #1 (RST ? 1'd0 : _rTxnDoneAck); rSgErr <= #1 (RST ? 1'd0 : _rSgErr); rTxReqAck <= #1 (RST ? 1'd0 : _rTxReqAck); rTxSent <= #1 (RST ? 1'd0 : _rTxSent); end always @ (*) begin _rTxnAck = TXN_ACK; _rTxnDoneAck = TXN_DONE_ACK; _rSgErr = SG_ERR; _rTxReqAck = TX_REQ_ACK; _rTxSent = TX_SENT; end // Wait for a NEW_TXN request. Then request transfers until all the data is sent // or until the specified length is reached. Then signal TXN_DONE. always @ (posedge CLK) begin rMainState <= #1 (RST ? `S_TXPORTWR_MAIN_IDLE : _rMainState); rOffLast <= #1 _rOffLast; rWordsEQ0 <= #1 _rWordsEQ0; rStarted <= #1 _rStarted; rDoneLen <= #1 (RST ? 0 : _rDoneLen); rTxErrd <= #1 (RST ? 1'd0 : _rTxErrd); end always @ (*) begin _rMainState = rMainState; _rOffLast = rOffLast; _rWordsEQ0 = rWordsEQ0; _rStarted = rStarted; _rDoneLen = rDoneLen; _rTxErrd = rTxErrd; case (rMainState) `S_TXPORTWR_MAIN_IDLE: begin // Wait for channel write request _rStarted = 0; _rWordsEQ0 = (NEW_TXN_LEN == 0); _rOffLast = {NEW_TXN_OFF, NEW_TXN_LAST}; if (NEW_TXN) _rMainState = `S_TXPORTWR_MAIN_CHECK; end `S_TXPORTWR_MAIN_CHECK: begin // Continue with transaction? if (rOffLast[0] | !rWordsEQ0) _rMainState = `S_TXPORTWR_MAIN_SIG_NEW; else _rMainState = `S_TXPORTWR_MAIN_RESET; end `S_TXPORTWR_MAIN_SIG_NEW: begin // Signal new write _rMainState = `S_TXPORTWR_MAIN_NEW_ACK; end `S_TXPORTWR_MAIN_NEW_ACK: begin // Wait for acknowledgement if (rTxnAck) // ACK'd on PC read of TXN length _rMainState = (rWordsEQ0 ? `S_TXPORTWR_MAIN_SIG_DONE : `S_TXPORTWR_MAIN_WRITE); end `S_TXPORTWR_MAIN_WRITE: begin // Start writing and wait for all writes to complete _rStarted = (rStarted | rTxState[1]); // S_TXPORTWR_TX_BUF _rTxErrd = (rTxErrd | rSgErr); if (rTxState[0] & rStarted) // S_TXPORTWR_TX_IDLE _rMainState = `S_TXPORTWR_MAIN_DONE; end `S_TXPORTWR_MAIN_DONE: begin // Wait for the last transaction to complete if (rDone & rLastDoneRead) begin _rDoneLen = rSentWords; _rMainState = `S_TXPORTWR_MAIN_SIG_DONE; end end `S_TXPORTWR_MAIN_SIG_DONE: begin // Signal the done port _rTxErrd = 0; _rMainState = `S_TXPORTWR_MAIN_RESET; end `S_TXPORTWR_MAIN_RESET: begin // Wait for the channel tx to drop if (NEW_TXN_DONE) _rMainState = `S_TXPORTWR_MAIN_IDLE; end default: begin _rMainState = `S_TXPORTWR_MAIN_IDLE; end endcase end // Manage sending TX requests to the TX engine. Transfers will be limited // by each scatter gather buffer's size, max payload size, and must not // cross a (4KB) page boundary. The request is only made if there is sufficient // data already written to the buffer. wire [9:0] wLastLen = (NEW_TXN_WORDS_RECVD - rSentWords); wire [9:0] wAddrLoInv = ~rAddr[11:2]; wire [10:0] wPageRem = (wAddrLoInv + 1'd1); always @ (posedge CLK) begin rTxState <= #1 (RST | rSgErr ? `S_TXPORTWR_TX_IDLE : _rTxState); rSentWords <= #1 (rMainState[0] ? 0 : _rSentWords); rWords <= #1 _rWords; rBufWords <= #1 _rBufWords; rBufWordsInit <= #1 _rBufWordsInit; rAddr <= #1 _rAddr; rCarry <= #1 _rCarry; rValsPropagated <= #1 _rValsPropagated; rValsProp <= #1 _rValsProp; rLargeBuf <= #1 _rLargeBuf; rPageRem <= #1 _rPageRem; rPageSpill <= #1 _rPageSpill; rPageSpillInit <= #1 _rPageSpillInit; rCopyBufWords <= #1 _rCopyBufWords; rUseInit <= #1 _rUseInit; rPreLen <= #1 _rPreLen; rMaxPayloadSize <= #1 _rMaxPayloadSize; rMaxPayloadShift <= #1 _rMaxPayloadShift; rMaxPayload <= #1 _rMaxPayload; rPayloadSpill <= #1 _rPayloadSpill; rMaxLen <= #1 (RST ? 1'd1 : _rMaxLen); rLen <= #1 _rLen; rSendingWords <= #1 _rSendingWords; rAvail <= #1 _rAvail; rTxnDone <= #1 _rTxnDone; rLastLen <= #1 _rLastLen; rLastLenEQ0 <= #1 _rLastLenEQ0; rLenEQWords <= #1 _rLenEQWords; rLenEQBufWords <= #1 _rLenEQBufWords; end always @ (*) begin _rTxState = rTxState; _rCopyBufWords = rCopyBufWords; _rUseInit = rUseInit; _rValsProp = ((rValsProp<<1) | rTxState[3]); // S_TXPORTWR_TX_ADJ_1 _rValsPropagated = (rValsProp == 6'd0); _rLargeBuf = (SG_ELEM_LEN > rWords); {_rCarry[0], _rAddr[15:0]} = (rTxState[1] ? SG_ELEM_ADDR[15:0] : (rAddr[15:0] + ({12{rTxState[6]}} & {rLen, 2'd0}))); // S_TXPORTWR_TX_WRITE {_rCarry[1], _rAddr[31:16]} = (rTxState[1] ? SG_ELEM_ADDR[31:16] : (rAddr[31:16] + rCarry[0])); {_rCarry[2], _rAddr[47:32]} = (rTxState[1] ? SG_ELEM_ADDR[47:32] : (rAddr[47:32] + rCarry[1])); _rAddr[63:48] = (rTxState[1] ? SG_ELEM_ADDR[63:48] : (rAddr[63:48] + rCarry[2])); _rSentWords = (rTxState[7] ? NEW_TXN_WORDS_RECVD : rSentWords) + ({10{rTxState[6]}} & rLen); // S_TXPORTWR_TX_WRITE _rWords = (NEW_TXN_ACK ? NEW_TXN_LEN : (rWords - ({10{rTxState[6]}} & rLen))); // S_TXPORTWR_TX_WRITE _rBufWordsInit = (rLargeBuf ? rWords : SG_ELEM_LEN); _rBufWords = (rCopyBufWords ? rBufWordsInit : rBufWords) - ({10{rTxState[6]}} & rLen); // S_TXPORTWR_TX_WRITE _rPageRem = wPageRem; _rPageSpillInit = (rBufWordsInit > wPageRem); _rPageSpill = (rBufWords > wPageRem); _rPreLen = ((rPageSpillInit & rUseInit) | (rPageSpill & !rUseInit) ? rPageRem : rBufWords[10:0]); _rMaxPayloadSize = CONFIG_MAX_PAYLOAD_SIZE; _rMaxPayloadShift = (rMaxPayloadSize > 3'd4 ? 3'd4 : rMaxPayloadSize); _rMaxPayload = (6'd32<<rMaxPayloadShift); _rPayloadSpill = (rPreLen > rMaxPayload); _rMaxLen = ((rMaxLen & !rValsProp[1]) | rTxState[6]); // S_TXPORTWR_TX_WRITE _rLen = (rPayloadSpill | rMaxLen ? rMaxPayload : rPreLen[9:0]); _rSendingWords = rSentWords + rLen; _rAvail = (NEW_TXN_WORDS_RECVD >= rSendingWords); _rTxnDone = ((rTxnDone<<1) | NEW_TXN_DONE); _rLastLen = wLastLen; _rLastLenEQ0 = (rLastLen == 10'd0); _rLenEQWords = (rLen == rWords); _rLenEQBufWords = (rLen == rBufWords); case (rTxState) `S_TXPORTWR_TX_IDLE: begin // Wait for channel write request if (rMainState[4] & !rStarted) // S_TXPORTWR_MAIN_WRITE _rTxState = `S_TXPORTWR_TX_BUF; end `S_TXPORTWR_TX_BUF: begin // Wait for buffer length and address if (SG_ELEM_RDY) _rTxState = `S_TXPORTWR_TX_ADJ_0; end `S_TXPORTWR_TX_ADJ_0: begin // Fix for large buffer _rCopyBufWords = 1; _rTxState = `S_TXPORTWR_TX_ADJ_1; end `S_TXPORTWR_TX_ADJ_1: begin // Check for page boundary crossing _rCopyBufWords = 0; _rUseInit = rCopyBufWords; _rTxState = `S_TXPORTWR_TX_ADJ_2; end `S_TXPORTWR_TX_ADJ_2: begin // Wait for values to propagate // Fix for page boundary crossing // Check for max payload // Fix for max payload _rUseInit = 0; if (rValsProp[2]) _rTxState = `S_TXPORTWR_TX_CHECK_DATA; end `S_TXPORTWR_TX_CHECK_DATA: begin // Wait for available data if (rNotRequesting) begin if (rAvail) _rTxState = `S_TXPORTWR_TX_WRITE; else if (rValsPropagated & rTxnDone[1]) _rTxState = (rLastLenEQ0 ? `S_TXPORTWR_TX_IDLE : `S_TXPORTWR_TX_WRITE_REM); end end `S_TXPORTWR_TX_WRITE: begin // Send len and repeat or finish? if (rLenEQWords) _rTxState = `S_TXPORTWR_TX_IDLE; else if (rLenEQBufWords) _rTxState = `S_TXPORTWR_TX_BUF; else _rTxState = `S_TXPORTWR_TX_ADJ_1; end `S_TXPORTWR_TX_WRITE_REM: begin // Send remaining data and finish _rTxState = `S_TXPORTWR_TX_IDLE; end default: begin _rTxState = `S_TXPORTWR_TX_IDLE; end endcase end // Request TX transfers separately so that the TX FSM can continue calculating // the next set of request parameters without having to wait for the TX_REQ_ACK. always @ (posedge CLK) begin rAckCount <= #1 (RST ? 10'd0 : _rAckCount); rNotRequesting <= #1 (RST ? 1'd1 : _rNotRequesting); rReqAddr <= #1 _rReqAddr; rReqLen <= #1 _rReqLen; rReqLast <= #1 _rReqLast; rDone <= #1 _rDone; rLastDoneRead <= #1 (RST ? 1'd1 : _rLastDoneRead); end always @ (*) begin // Start signaling when the TX FSM is ready. if (rTxState[6] | rTxState[7]) // S_TXPORTWR_TX_WRITE _rNotRequesting = 0; else _rNotRequesting = (rNotRequesting | rTxReqAck); // Pass off the rAddr & rLen when ready and wait for TX_REQ_ACK. if (rTxState[6]) begin // S_TXPORTWR_TX_WRITE _rReqAddr = rAddr; _rReqLen = rLen; _rReqLast = rLenEQWords; end else if (rTxState[7]) begin // S_TXPORTWR_TX_WRITE_REM _rReqAddr = rAddr; _rReqLen = rLastLen; _rReqLast = 1; end else begin _rReqAddr = rReqAddr; _rReqLen = rReqLen; _rReqLast = rReqLast; end // Track TX_REQ_ACK and TX_SENT to determine when the transaction is over. _rDone = (rAckCount == 10'd0); if (rMainState[0]) // S_TXPORTWR_MAIN_IDLE _rAckCount = 0; else _rAckCount = rAckCount + rTxState[6] + rTxState[7] - rTxSent; // S_TXPORTWR_TX_WRITE, S_TXPORTWR_TX_WRITE_REM // Track when the user reads the actual transfer amount. _rLastDoneRead = (rMainState[6] ? 1'd0 : (rLastDoneRead | rTxnDoneAck)); // S_TXPORTWR_MAIN_SIG_DONE end // Facilitate sending a TXN_DONE when we receive a TXN_ACK after the transaction // has begun sending. This will happen when the workstation detects that it has // sent/used all its currently mapped scatter gather elements, but it's not enough // to complete the transaction. The TXN_DONE will let the workstation know it can // release the current scatter gather mappings and allocate new ones. always @ (posedge CLK) begin rPartialDone <= #1 _rPartialDone; rReqPartialDone <= #1 (RST ? 1'd0 : _rReqPartialDone); end always @ (*) begin // Signal TXN_DONE after we've recieved the (seemingly superfluous) TXN_ACK, // we have no outstanding transfer requests, we're not currently requesting a // transfer, and there are no more scatter gather elements. _rPartialDone = (rReqPartialDone & rDone & rNotRequesting & SG_ELEM_EMPTY & rTxState[1]); // S_TXPORTWR_TX_BUF // Keep track of (seemingly superfluous) TXN_ACK requests. if ((rReqPartialDone & rDone & rNotRequesting & SG_ELEM_EMPTY & rTxState[1]) | rMainState[0]) // S_TXPORTWR_MAIN_IDLE _rReqPartialDone = 0; else _rReqPartialDone = (rReqPartialDone | (rTxnAck & !rMainState[3])); // !S_TXPORTWR_MAIN_NEW_ACK end /* wire [35:0] wControl0; chipscope_icon_1 cs_icon( .CONTROL0(wControl0) ); chipscope_ila_t8_512 a0( .CLK(CLK), .CONTROL(wControl0), .TRIG0({rTxState[6] | rTxState[7] | rTxSent, rAckCount[6:0]}), .DATA({280'd0, NEW_TXN_WORDS_RECVD, // 32 rSendingWords, // 32 rAvail, // 1 rNotRequesting, // 1 NEW_TXN_LAST, // 1 NEW_TXN_LEN, // 32 NEW_TXN_OFF, // 31 NEW_TXN, // 1 rAckCount, // 10 rLastDoneRead, // 1 rWords, // 32 rBufWords, // 32 rLen, // 10 rTxState, // 8 rMainState}) // 8 ); */ endmodule
////////////////////////////////////////////////////////////////////// //// //// //// spi_clgen.v //// //// //// //// This file is part of the SPI IP core project //// //// http://www.opencores.org/projects/spi/ //// //// //// //// Author(s): //// //// - Simon Srot ([email protected]) //// //// //// //// All additional information is avaliable in the Readme.txt //// //// file. //// //// //// ////////////////////////////////////////////////////////////////////// //// //// //// Copyright (C) 2002 Authors //// //// //// //// This source file may be used and distributed without //// //// restriction provided that this copyright statement is not //// //// removed from the file and that any derivative work contains //// //// the original copyright notice and the associated disclaimer. //// //// //// //// This source file is free software; you can redistribute it //// //// and/or modify it under the terms of the GNU Lesser General //// //// Public License as published by the Free Software Foundation; //// //// either version 2.1 of the License, or (at your option) any //// //// later version. //// //// //// //// This source is distributed in the hope that it will be //// //// useful, but WITHOUT ANY WARRANTY; without even the implied //// //// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR //// //// PURPOSE. See the GNU Lesser General Public License for more //// //// details. //// //// //// //// You should have received a copy of the GNU Lesser General //// //// Public License along with this source; if not, download it //// //// from http://www.opencores.org/lgpl.shtml //// //// //// ////////////////////////////////////////////////////////////////////// `include "spi_defines.v" `include "timescale.v" module spi_clgen (clk_in, rst, go, enable, last_clk, divider, clk_out, pos_edge, neg_edge); parameter Tp = 1; input clk_in; // input clock (system clock) input rst; // reset input enable; // clock enable input go; // start transfer input last_clk; // last clock input [`SPI_DIVIDER_LEN-1:0] divider; // clock divider (output clock is divided by this value) output clk_out; // output clock output pos_edge; // pulse marking positive edge of clk_out output neg_edge; // pulse marking negative edge of clk_out reg clk_out; reg pos_edge; reg neg_edge; reg [`SPI_DIVIDER_LEN-1:0] cnt; // clock counter wire cnt_zero; // conter is equal to zero wire cnt_one; // conter is equal to one assign cnt_zero = cnt == {`SPI_DIVIDER_LEN{1'b0}}; assign cnt_one = cnt == {{`SPI_DIVIDER_LEN-1{1'b0}}, 1'b1}; // Counter counts half period always @(posedge clk_in or posedge rst) begin if(rst) cnt <= #Tp {`SPI_DIVIDER_LEN{1'b1}}; else begin if(!enable || cnt_zero) cnt <= #Tp divider; else cnt <= #Tp cnt - {{`SPI_DIVIDER_LEN-1{1'b0}}, 1'b1}; end end // clk_out is asserted every other half period always @(posedge clk_in or posedge rst) begin if(rst) clk_out <= #Tp 1'b0; else clk_out <= #Tp (enable && cnt_zero && (!last_clk || clk_out)) ? ~clk_out : clk_out; end // Pos and neg edge signals always @(posedge clk_in or posedge rst) begin if(rst) begin pos_edge <= #Tp 1'b0; neg_edge <= #Tp 1'b0; end else begin pos_edge <= #Tp (enable && !clk_out && cnt_one) || (!(|divider) && clk_out) || (!(|divider) && go && !enable); neg_edge <= #Tp (enable && clk_out && cnt_one) || (!(|divider) && !clk_out && enable); end end endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 22:36:46 09/06/2015 // Design Name: // Module Name: FPU_Multiplication_Function // Project Name: // Target Devices: // Tool versions: // Description: // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module FPU_Multiplication_Function //SINGLE PRECISION PARAMETERS /*# (parameter W = 32, parameter EW = 8, parameter SW = 23) // */ //DOUBLE PRECISION PARAMETERS # (parameter W = 64, parameter EW = 11, parameter SW = 52) // */ ( input wire clk, input wire rst, input wire beg_FSM, input wire ack_FSM, input wire [W-1:0] Data_MX, input wire [W-1:0] Data_MY, input wire [1:0] round_mode, output wire overflow_flag, output wire underflow_flag, output wire ready, output wire [W-1:0] final_result_ieee ); //GENERAL wire rst_int; //** //FSM_load_signals wire FSM_first_phase_load; //** wire FSM_load_first_step; /*Zero flag, Exp operation underflow, Sgf operation first reg, sign result reg*/ wire FSM_exp_operation_load_result; //Exp operation result, wire FSM_load_second_step; //Exp operation Overflow, Sgf operation second reg wire FSM_barrel_shifter_load; wire FSM_adder_round_norm_load; wire FSM_final_result_load; //ZERO FLAG //Op_MX; //Op_MY wire zero_flag; //FIRST PHASE wire [W-1:0] Op_MX; wire [W-1:0] Op_MY; //Mux S-> exp_operation OPER_A_i////////// wire FSM_selector_A; //D0=Op_MX[W-2:W-EW-1] //D1=exp_oper_result wire [EW:0] S_Oper_A_exp; //Mux S-> exp_operation OPER_B_i////////// wire [1:0] FSM_selector_B; //D0=Op_MY[W-2:W-EW-1] //D1=LZA_output //D2=1 wire [EW-1:0] S_Oper_B_exp; ///////////exp_operation/////////////////////////// wire FSM_exp_operation_A_S; //oper_A= S_Oper_A_exp //oper_B= S_Oper_B_exp wire [EW:0] exp_oper_result; //Sgf operation////////////////// //Op_A={1'b1, Op_MX[SW-1:0]} //Op_B={1'b1, Op_MY[SW-1:0]} wire [2*SW+1:0] P_Sgf; wire[SW:0] significand; wire[SW:0] non_significand; //Sign Operation wire sign_final_result; //barrel shifter multiplexers wire [SW:0] S_Data_Shift; //barrel shifter wire [SW:0] Sgf_normalized_result; //adder rounding wire FSM_add_overflow_flag; //Oper_A_i=norm result //Oper_B_i=1 wire [SW:0] Add_result; //round decoder wire FSM_round_flag; //Selecto moltiplexers wire selector_A; wire [1:0] selector_B; wire load_b; wire selector_C; //Barrel shifter multiplexer /////////////////////////////////////////FSM//////////////////////////////////////////// FSM_Mult_Function FS_Module ( .clk(clk), //** .rst(rst), //** .beg_FSM(beg_FSM), //** .ack_FSM(ack_FSM), //** .zero_flag_i(zero_flag), .Mult_shift_i(P_Sgf[2*SW+1]), .round_flag_i(FSM_round_flag), .Add_Overflow_i(FSM_add_overflow_flag), .load_0_o(FSM_first_phase_load), .load_1_o(FSM_load_first_step), .load_2_o(FSM_exp_operation_load_result), .load_3_o(FSM_load_second_step), .load_4_o(FSM_adder_round_norm_load), .load_5_o(FSM_final_result_load), .load_6_o(FSM_barrel_shifter_load), .ctrl_select_a_o(selector_A), .ctrl_select_b_o(load_b), .selector_b_o(selector_B), .ctrl_select_c_o(selector_C), .exp_op_o(FSM_exp_operation_A_S), .shift_value_o(FSM_Shift_Value), .rst_int(rst_int), // .ready(ready) ); /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////Selector's registers////////////////////////////// RegisterAdd #(.W(1)) Sel_A ( //Selector_A register .clk(clk), .rst(rst_int), .load(selector_A), .D(1'b1), .Q(FSM_selector_A) ); RegisterAdd #(.W(1)) Sel_C ( //Selector_C register .clk(clk), .rst(rst_int), .load(selector_C), .D(1'b1), .Q(FSM_selector_C) ); RegisterAdd #(.W(2)) Sel_B ( //Selector_B register .clk(clk), .rst(rst_int), .load(load_b), .D(selector_B), .Q(FSM_selector_B) ); /////////////////////////////////////////////////////////////////////////////////////////// First_Phase_M #(.W(W)) Operands_load_reg ( // .clk(clk), //** .rst(rst_int), //** .load(FSM_first_phase_load), //** .Data_MX(Data_MX), //** .Data_MY(Data_MY), //** .Op_MX(Op_MX), .Op_MY(Op_MY) ); Zero_InfMult_Unit #(.W(W)) Zero_Result_Detect ( .clk(clk), .rst(rst_int), .load(FSM_load_first_step), .Data_A(Op_MX [W-2:0]), .Data_B(Op_MY [W-2:0]), .zero_m_flag(zero_flag) ); ///////////Mux exp_operation OPER_A_i////////// Multiplexer_AC #(.W(EW+1)) Exp_Oper_A_mux( .ctrl(FSM_selector_A), .D0 ({1'b0,Op_MX[W-2:W-EW-1]}), .D1 (exp_oper_result), .S (S_Oper_A_exp) ); ///////////Mux exp_operation OPER_B_i////////// wire [EW-1:0] Exp_oper_B_D1, Exp_oper_B_D2; Mux_3x1 #(.W(EW)) Exp_Oper_B_mux( .ctrl(FSM_selector_B), .D0 (Op_MY[W-2:W-EW-1]), .D1 (Exp_oper_B_D1), .D2 (Exp_oper_B_D2), .S(S_Oper_B_exp) ); generate case(EW) 8:begin assign Exp_oper_B_D1 = 8'd127; assign Exp_oper_B_D2 = 8'd1; end default:begin assign Exp_oper_B_D1 = 11'd1023; assign Exp_oper_B_D2 = 11'd1; end endcase endgenerate ///////////exp_operation/////////////////////////// Exp_Operation_m #(.EW(EW)) Exp_module ( .clk(clk), .rst(rst_int), .load_a_i(FSM_load_first_step), .load_b_i(FSM_load_second_step), .load_c_i(FSM_exp_operation_load_result), .Data_A_i(S_Oper_A_exp), .Data_B_i({1'b0,S_Oper_B_exp}), .Add_Subt_i(FSM_exp_operation_A_S), .Data_Result_o(exp_oper_result), .Overflow_flag_o(overflow_flag), .Underflow_flag_o(underflow_flag) ); ////////Sign_operation////////////////////////////// XOR_M Sign_operation ( .Sgn_X(Op_MX[W-1]), .Sgn_Y(Op_MY[W-1]), .Sgn_Info(sign_final_result) ); /////Significant_Operation////////////////////////// Sgf_Multiplication #(.SW(SW+1)) Sgf_operation ( .clk(clk), .rst(rst), .load_b_i(FSM_load_second_step), .Data_A_i({1'b1,Op_MX[SW-1:0]}), .Data_B_i({1'b1,Op_MY[SW-1:0]}), .sgf_result_o(P_Sgf) ); //////////Mux Barrel shifter shift_Value///////////////// assign significand = P_Sgf [2*SW:SW]; assign non_significand = P_Sgf [SW-1:0]; ///////////Mux Barrel shifter Data_in////// Multiplexer_AC #(.W(SW+1)) Barrel_Shifter_D_I_mux( .ctrl(FSM_selector_C), .D0 (significand), .D1 (Add_result), .S (S_Data_Shift) ); ///////////Barrel_Shifter////////////////////////// Barrel_Shifter_M #(.SW(SW+1)) Barrel_Shifter_module ( .clk(clk), .rst(rst_int), .load_i(FSM_barrel_shifter_load), .Shift_Value_i(FSM_Shift_Value), .Shift_Data_i(S_Data_Shift), .N_mant_o(Sgf_normalized_result) ); ////Round decoder///////////////////////////////// Round_decoder_M #(.SW(SW)) Round_Decoder ( .Round_Bits_i(non_significand), .Round_Mode_i(round_mode), .Sign_Result_i(sign_final_result), .Round_Flag_o(FSM_round_flag) ); //rounding_adder wire [SW:0] Add_Sgf_Oper_B; assign Add_Sgf_Oper_B = (SW)*1'b1; Adder_Round #(.SW(SW+1)) Adder_M ( .clk(clk), .rst(rst_int), .load_i(FSM_adder_round_norm_load), .Data_A_i(Sgf_normalized_result), .Data_B_i(Add_Sgf_Oper_B), .Data_Result_o(Add_result), .FSM_C_o(FSM_add_overflow_flag) ); ////Final Result/////////////////////////////// Tenth_Phase #(.W(W),.EW(EW),.SW(SW)) final_result_ieee_Module( .clk(clk), .rst(rst_int), .load_i(FSM_final_result_load), .sel_a_i(overflow_flag), .sel_b_i(underflow_flag), .sign_i(sign_final_result), .exp_ieee_i(exp_oper_result[EW-1:0]), .sgf_ieee_i(Sgf_normalized_result[SW-1:0]), .final_result_ieee_o(final_result_ieee) ); endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 22:36:46 09/06/2015 // Design Name: // Module Name: FPU_Multiplication_Function // Project Name: // Target Devices: // Tool versions: // Description: // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module FPU_Multiplication_Function //SINGLE PRECISION PARAMETERS /*# (parameter W = 32, parameter EW = 8, parameter SW = 23) // */ //DOUBLE PRECISION PARAMETERS # (parameter W = 64, parameter EW = 11, parameter SW = 52) // */ ( input wire clk, input wire rst, input wire beg_FSM, input wire ack_FSM, input wire [W-1:0] Data_MX, input wire [W-1:0] Data_MY, input wire [1:0] round_mode, output wire overflow_flag, output wire underflow_flag, output wire ready, output wire [W-1:0] final_result_ieee ); //GENERAL wire rst_int; //** //FSM_load_signals wire FSM_first_phase_load; //** wire FSM_load_first_step; /*Zero flag, Exp operation underflow, Sgf operation first reg, sign result reg*/ wire FSM_exp_operation_load_result; //Exp operation result, wire FSM_load_second_step; //Exp operation Overflow, Sgf operation second reg wire FSM_barrel_shifter_load; wire FSM_adder_round_norm_load; wire FSM_final_result_load; //ZERO FLAG //Op_MX; //Op_MY wire zero_flag; //FIRST PHASE wire [W-1:0] Op_MX; wire [W-1:0] Op_MY; //Mux S-> exp_operation OPER_A_i////////// wire FSM_selector_A; //D0=Op_MX[W-2:W-EW-1] //D1=exp_oper_result wire [EW:0] S_Oper_A_exp; //Mux S-> exp_operation OPER_B_i////////// wire [1:0] FSM_selector_B; //D0=Op_MY[W-2:W-EW-1] //D1=LZA_output //D2=1 wire [EW-1:0] S_Oper_B_exp; ///////////exp_operation/////////////////////////// wire FSM_exp_operation_A_S; //oper_A= S_Oper_A_exp //oper_B= S_Oper_B_exp wire [EW:0] exp_oper_result; //Sgf operation////////////////// //Op_A={1'b1, Op_MX[SW-1:0]} //Op_B={1'b1, Op_MY[SW-1:0]} wire [2*SW+1:0] P_Sgf; wire[SW:0] significand; wire[SW:0] non_significand; //Sign Operation wire sign_final_result; //barrel shifter multiplexers wire [SW:0] S_Data_Shift; //barrel shifter wire [SW:0] Sgf_normalized_result; //adder rounding wire FSM_add_overflow_flag; //Oper_A_i=norm result //Oper_B_i=1 wire [SW:0] Add_result; //round decoder wire FSM_round_flag; //Selecto moltiplexers wire selector_A; wire [1:0] selector_B; wire load_b; wire selector_C; //Barrel shifter multiplexer /////////////////////////////////////////FSM//////////////////////////////////////////// FSM_Mult_Function FS_Module ( .clk(clk), //** .rst(rst), //** .beg_FSM(beg_FSM), //** .ack_FSM(ack_FSM), //** .zero_flag_i(zero_flag), .Mult_shift_i(P_Sgf[2*SW+1]), .round_flag_i(FSM_round_flag), .Add_Overflow_i(FSM_add_overflow_flag), .load_0_o(FSM_first_phase_load), .load_1_o(FSM_load_first_step), .load_2_o(FSM_exp_operation_load_result), .load_3_o(FSM_load_second_step), .load_4_o(FSM_adder_round_norm_load), .load_5_o(FSM_final_result_load), .load_6_o(FSM_barrel_shifter_load), .ctrl_select_a_o(selector_A), .ctrl_select_b_o(load_b), .selector_b_o(selector_B), .ctrl_select_c_o(selector_C), .exp_op_o(FSM_exp_operation_A_S), .shift_value_o(FSM_Shift_Value), .rst_int(rst_int), // .ready(ready) ); /////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////Selector's registers////////////////////////////// RegisterAdd #(.W(1)) Sel_A ( //Selector_A register .clk(clk), .rst(rst_int), .load(selector_A), .D(1'b1), .Q(FSM_selector_A) ); RegisterAdd #(.W(1)) Sel_C ( //Selector_C register .clk(clk), .rst(rst_int), .load(selector_C), .D(1'b1), .Q(FSM_selector_C) ); RegisterAdd #(.W(2)) Sel_B ( //Selector_B register .clk(clk), .rst(rst_int), .load(load_b), .D(selector_B), .Q(FSM_selector_B) ); /////////////////////////////////////////////////////////////////////////////////////////// First_Phase_M #(.W(W)) Operands_load_reg ( // .clk(clk), //** .rst(rst_int), //** .load(FSM_first_phase_load), //** .Data_MX(Data_MX), //** .Data_MY(Data_MY), //** .Op_MX(Op_MX), .Op_MY(Op_MY) ); Zero_InfMult_Unit #(.W(W)) Zero_Result_Detect ( .clk(clk), .rst(rst_int), .load(FSM_load_first_step), .Data_A(Op_MX [W-2:0]), .Data_B(Op_MY [W-2:0]), .zero_m_flag(zero_flag) ); ///////////Mux exp_operation OPER_A_i////////// Multiplexer_AC #(.W(EW+1)) Exp_Oper_A_mux( .ctrl(FSM_selector_A), .D0 ({1'b0,Op_MX[W-2:W-EW-1]}), .D1 (exp_oper_result), .S (S_Oper_A_exp) ); ///////////Mux exp_operation OPER_B_i////////// wire [EW-1:0] Exp_oper_B_D1, Exp_oper_B_D2; Mux_3x1 #(.W(EW)) Exp_Oper_B_mux( .ctrl(FSM_selector_B), .D0 (Op_MY[W-2:W-EW-1]), .D1 (Exp_oper_B_D1), .D2 (Exp_oper_B_D2), .S(S_Oper_B_exp) ); generate case(EW) 8:begin assign Exp_oper_B_D1 = 8'd127; assign Exp_oper_B_D2 = 8'd1; end default:begin assign Exp_oper_B_D1 = 11'd1023; assign Exp_oper_B_D2 = 11'd1; end endcase endgenerate ///////////exp_operation/////////////////////////// Exp_Operation_m #(.EW(EW)) Exp_module ( .clk(clk), .rst(rst_int), .load_a_i(FSM_load_first_step), .load_b_i(FSM_load_second_step), .load_c_i(FSM_exp_operation_load_result), .Data_A_i(S_Oper_A_exp), .Data_B_i({1'b0,S_Oper_B_exp}), .Add_Subt_i(FSM_exp_operation_A_S), .Data_Result_o(exp_oper_result), .Overflow_flag_o(overflow_flag), .Underflow_flag_o(underflow_flag) ); ////////Sign_operation////////////////////////////// XOR_M Sign_operation ( .Sgn_X(Op_MX[W-1]), .Sgn_Y(Op_MY[W-1]), .Sgn_Info(sign_final_result) ); /////Significant_Operation////////////////////////// Sgf_Multiplication #(.SW(SW+1)) Sgf_operation ( .clk(clk), .rst(rst), .load_b_i(FSM_load_second_step), .Data_A_i({1'b1,Op_MX[SW-1:0]}), .Data_B_i({1'b1,Op_MY[SW-1:0]}), .sgf_result_o(P_Sgf) ); //////////Mux Barrel shifter shift_Value///////////////// assign significand = P_Sgf [2*SW:SW]; assign non_significand = P_Sgf [SW-1:0]; ///////////Mux Barrel shifter Data_in////// Multiplexer_AC #(.W(SW+1)) Barrel_Shifter_D_I_mux( .ctrl(FSM_selector_C), .D0 (significand), .D1 (Add_result), .S (S_Data_Shift) ); ///////////Barrel_Shifter////////////////////////// Barrel_Shifter_M #(.SW(SW+1)) Barrel_Shifter_module ( .clk(clk), .rst(rst_int), .load_i(FSM_barrel_shifter_load), .Shift_Value_i(FSM_Shift_Value), .Shift_Data_i(S_Data_Shift), .N_mant_o(Sgf_normalized_result) ); ////Round decoder///////////////////////////////// Round_decoder_M #(.SW(SW)) Round_Decoder ( .Round_Bits_i(non_significand), .Round_Mode_i(round_mode), .Sign_Result_i(sign_final_result), .Round_Flag_o(FSM_round_flag) ); //rounding_adder wire [SW:0] Add_Sgf_Oper_B; assign Add_Sgf_Oper_B = (SW)*1'b1; Adder_Round #(.SW(SW+1)) Adder_M ( .clk(clk), .rst(rst_int), .load_i(FSM_adder_round_norm_load), .Data_A_i(Sgf_normalized_result), .Data_B_i(Add_Sgf_Oper_B), .Data_Result_o(Add_result), .FSM_C_o(FSM_add_overflow_flag) ); ////Final Result/////////////////////////////// Tenth_Phase #(.W(W),.EW(EW),.SW(SW)) final_result_ieee_Module( .clk(clk), .rst(rst_int), .load_i(FSM_final_result_load), .sel_a_i(overflow_flag), .sel_b_i(underflow_flag), .sign_i(sign_final_result), .exp_ieee_i(exp_oper_result[EW-1:0]), .sgf_ieee_i(Sgf_normalized_result[SW-1:0]), .final_result_ieee_o(final_result_ieee) ); endmodule
/* * * Copyright (c) 2011-2013 [email protected] * * * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * */ /* * When tx_ready is high, uart_transmitter is ready to send a new byte. Drive * rx_new_byte high for one cycle, and the byte to transmit on rx_byte for one * cycle. */ module uart_transmitter # ( parameter comm_clk_frequency = 100000000, parameter baud_rate = 115200 ) ( input clk, // UART interface output uart_tx, // Data to send input rx_new_byte, input [7:0] rx_byte, // Status output tx_ready ); localparam [15:0] baud_delay = (comm_clk_frequency / baud_rate) - 1; reg [15:0] delay_cnt = 16'd0; reg [9:0] state = 10'd1023, outgoing = 10'd1023; assign uart_tx = outgoing[0]; assign tx_ready = state[0] & ~rx_new_byte; always @ (posedge clk) begin delay_cnt <= delay_cnt + 16'd1; if (delay_cnt >= baud_delay) begin delay_cnt <= 16'd0; state <= {1'b1, state[9:1]}; outgoing <= {1'b1, outgoing[9:1]}; end if (rx_new_byte && state[0]) begin delay_cnt <= 16'd0; state <= 10'd0; outgoing <= {1'b1, rx_byte, 1'b0}; end end endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ddr_mc_phy_wrapper.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Oct 10 2010 // \___\/\___\ // //Device : 7 Series //Design Name : DDR3 SDRAM //Purpose : Wrapper file that encompasses the MC_PHY module // instantiation and handles the vector remapping between // the MC_PHY ports and the user's DDR3 ports. Vector // remapping affects DDR3 control, address, and DQ/DQS/DM. //Reference : //Revision History : //***************************************************************************** `timescale 1 ps / 1 ps module mig_7series_v1_9_ddr_mc_phy_wrapper # ( parameter TCQ = 100, // Register delay (simulation only) parameter tCK = 2500, // ps parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO" parameter DATA_IO_PRIM_TYPE = "DEFAULT", // # = "HP_LP", "HR_LP", "DEFAULT" parameter DATA_IO_IDLE_PWRDWN = "ON", // "ON" or "OFF" parameter IODELAY_GRP = "IODELAY_MIG", parameter nCK_PER_CLK = 4, // Memory:Logic clock ratio parameter nCS_PER_RANK = 1, // # of unique CS outputs per rank parameter BANK_WIDTH = 3, // # of bank address parameter CKE_WIDTH = 1, // # of clock enable outputs parameter CS_WIDTH = 1, // # of chip select parameter CK_WIDTH = 1, // # of CK parameter CWL = 5, // CAS Write latency parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2 parameter DM_WIDTH = 8, // # of data mask parameter DQ_WIDTH = 16, // # of data bits parameter DQS_CNT_WIDTH = 3, // ceil(log2(DQS_WIDTH)) parameter DQS_WIDTH = 8, // # of strobe pairs parameter DRAM_TYPE = "DDR3", // DRAM type (DDR2, DDR3) parameter RANKS = 4, // # of ranks parameter ODT_WIDTH = 1, // # of ODT outputs parameter REG_CTRL = "OFF", // "ON" for registered DIMM parameter ROW_WIDTH = 16, // # of row/column address parameter USE_CS_PORT = 1, // Support chip select output parameter USE_DM_PORT = 1, // Support data mask output parameter USE_ODT_PORT = 1, // Support ODT output parameter IBUF_LPWR_MODE = "OFF", // input buffer low power option parameter LP_DDR_CK_WIDTH = 2, // Hard PHY parameters parameter PHYCTL_CMD_FIFO = "FALSE", parameter DATA_CTL_B0 = 4'hc, parameter DATA_CTL_B1 = 4'hf, parameter DATA_CTL_B2 = 4'hf, parameter DATA_CTL_B3 = 4'hf, parameter DATA_CTL_B4 = 4'hf, parameter BYTE_LANES_B0 = 4'b1111, parameter BYTE_LANES_B1 = 4'b0000, parameter BYTE_LANES_B2 = 4'b0000, parameter BYTE_LANES_B3 = 4'b0000, parameter BYTE_LANES_B4 = 4'b0000, parameter PHY_0_BITLANES = 48'h0000_0000_0000, parameter PHY_1_BITLANES = 48'h0000_0000_0000, parameter PHY_2_BITLANES = 48'h0000_0000_0000, // Parameters calculated outside of this block parameter HIGHEST_BANK = 3, // Highest I/O bank index parameter HIGHEST_LANE = 12, // Highest byte lane index // ** Pin mapping parameters // Parameters for mapping between hard PHY and physical DDR3 signals // There are 2 classes of parameters: // - DQS_BYTE_MAP, CK_BYTE_MAP, CKE_ODT_BYTE_MAP: These consist of // 8-bit elements. Each element indicates the bank and byte lane // location of that particular signal. The bit lane in this case // doesn't need to be specified, either because there's only one // pin pair in each byte lane that the DQS or CK pair can be // located at, or in the case of CKE_ODT_BYTE_MAP, only the byte // lane needs to be specified in order to determine which byte // lane generates the RCLK (Note that CKE, and ODT must be located // in the same bank, thus only one element in CKE_ODT_BYTE_MAP) // [7:4] = bank # (0-4) // [3:0] = byte lane # (0-3) // - All other MAP parameters: These consist of 12-bit elements. Each // element indicates the bank, byte lane, and bit lane location of // that particular signal: // [11:8] = bank # (0-4) // [7:4] = byte lane # (0-3) // [3:0] = bit lane # (0-11) // Note that not all elements in all parameters will be used - it // depends on the actual widths of the DDR3 buses. The parameters are // structured to support a maximum of: // - DQS groups: 18 // - data mask bits: 18 // In addition, the default parameter size of some of the parameters will // support a certain number of bits, however, this can be expanded at // compile time by expanding the width of the vector passed into this // parameter // - chip selects: 10 // - bank bits: 3 // - address bits: 16 parameter CK_BYTE_MAP = 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00, parameter ADDR_MAP = 192'h000_000_000_000_000_000_000_000_000_000_000_000_000_000_000_000, parameter BANK_MAP = 36'h000_000_000, parameter CAS_MAP = 12'h000, parameter CKE_ODT_BYTE_MAP = 8'h00, parameter CKE_MAP = 96'h000_000_000_000_000_000_000_000, parameter ODT_MAP = 96'h000_000_000_000_000_000_000_000, parameter CKE_ODT_AUX = "FALSE", parameter CS_MAP = 120'h000_000_000_000_000_000_000_000_000_000, parameter PARITY_MAP = 12'h000, parameter RAS_MAP = 12'h000, parameter WE_MAP = 12'h000, parameter DQS_BYTE_MAP = 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00, // DATAx_MAP parameter is used for byte lane X in the design parameter DATA0_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA1_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA2_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA3_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA4_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA5_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA6_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA7_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA8_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA9_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA10_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA11_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA12_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA13_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA14_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA15_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA16_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA17_MAP = 96'h000_000_000_000_000_000_000_000, // MASK0_MAP used for bytes [8:0], MASK1_MAP for bytes [17:9] parameter MASK0_MAP = 108'h000_000_000_000_000_000_000_000_000, parameter MASK1_MAP = 108'h000_000_000_000_000_000_000_000_000, // Simulation options parameter SIM_CAL_OPTION = "NONE", // The PHY_CONTROL primitive in the bank where PLL exists is declared // as the Master PHY_CONTROL. parameter MASTER_PHY_CTL = 1 ) ( input rst, input clk, input freq_refclk, input mem_refclk, input pll_lock, input sync_pulse, input idelayctrl_refclk, input phy_cmd_wr_en, input phy_data_wr_en, input [31:0] phy_ctl_wd, input phy_ctl_wr, input phy_if_empty_def, input phy_if_reset, input [5:0] data_offset_1, input [5:0] data_offset_2, input [3:0] aux_in_1, input [3:0] aux_in_2, output [4:0] idelaye2_init_val, output [5:0] oclkdelay_init_val, output if_empty, output phy_ctl_full, output phy_cmd_full, output phy_data_full, output phy_pre_data_a_full, output [(CK_WIDTH * LP_DDR_CK_WIDTH)-1:0] ddr_clk, output phy_mc_go, input phy_write_calib, input phy_read_calib, input calib_in_common, input [5:0] calib_sel, input [HIGHEST_BANK-1:0] calib_zero_inputs, input [HIGHEST_BANK-1:0] calib_zero_ctrl, input [2:0] po_fine_enable, input [2:0] po_coarse_enable, input [2:0] po_fine_inc, input [2:0] po_coarse_inc, input po_counter_load_en, input po_counter_read_en, input [2:0] po_sel_fine_oclk_delay, input [8:0] po_counter_load_val, output [8:0] po_counter_read_val, output [5:0] pi_counter_read_val, input [HIGHEST_BANK-1:0] pi_rst_dqs_find, input pi_fine_enable, input pi_fine_inc, input pi_counter_load_en, input [5:0] pi_counter_load_val, input idelay_ce, input idelay_inc, input idelay_ld, input idle, output pi_phase_locked, output pi_phase_locked_all, output pi_dqs_found, output pi_dqs_found_all, output pi_dqs_out_of_range, // From/to calibration logic/soft PHY input phy_init_data_sel, input [nCK_PER_CLK*ROW_WIDTH-1:0] mux_address, input [nCK_PER_CLK*BANK_WIDTH-1:0] mux_bank, input [nCK_PER_CLK-1:0] mux_cas_n, input [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mux_cs_n, input [nCK_PER_CLK-1:0] mux_ras_n, input [1:0] mux_odt, input [nCK_PER_CLK-1:0] mux_cke, input [nCK_PER_CLK-1:0] mux_we_n, input [nCK_PER_CLK-1:0] parity_in, input [2*nCK_PER_CLK*DQ_WIDTH-1:0] mux_wrdata, input [2*nCK_PER_CLK*(DQ_WIDTH/8)-1:0] mux_wrdata_mask, input mux_reset_n, output [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data, // Memory I/F output [ROW_WIDTH-1:0] ddr_addr, output [BANK_WIDTH-1:0] ddr_ba, output ddr_cas_n, output [CKE_WIDTH-1:0] ddr_cke, output [CS_WIDTH*nCS_PER_RANK-1:0] ddr_cs_n, output [DM_WIDTH-1:0] ddr_dm, output [ODT_WIDTH-1:0] ddr_odt, output ddr_parity, output ddr_ras_n, output ddr_we_n, output ddr_reset_n, inout [DQ_WIDTH-1:0] ddr_dq, inout [DQS_WIDTH-1:0] ddr_dqs, inout [DQS_WIDTH-1:0] ddr_dqs_n ,input dbg_pi_counter_read_en ,output ref_dll_lock ,input rst_phaser_ref ,output [11:0] dbg_pi_phase_locked_phy4lanes ,output [11:0] dbg_pi_dqs_found_lanes_phy4lanes ); function [71:0] generate_bytelanes_ddr_ck; input [143:0] ck_byte_map; integer v ; begin generate_bytelanes_ddr_ck = 'b0 ; for (v = 0; v < CK_WIDTH; v = v + 1) begin if ((CK_BYTE_MAP[((v*8)+4)+:4]) == 2) generate_bytelanes_ddr_ck[48+(4*v)+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; else if ((CK_BYTE_MAP[((v*8)+4)+:4]) == 1) generate_bytelanes_ddr_ck[24+(4*v)+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; else generate_bytelanes_ddr_ck[4*v+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; end end endfunction function [(2*CK_WIDTH*8)-1:0] generate_ddr_ck_map; input [143:0] ck_byte_map; integer g; begin generate_ddr_ck_map = 'b0 ; for(g = 0 ; g < CK_WIDTH ; g= g + 1) begin generate_ddr_ck_map[(g*2*8)+:8] = (ck_byte_map[(g*8)+:4] == 4'd0) ? "A" : (ck_byte_map[(g*8)+:4] == 4'd1) ? "B" : (ck_byte_map[(g*8)+:4] == 4'd2) ? "C" : "D" ; generate_ddr_ck_map[(((g*2)+1)*8)+:8] = (ck_byte_map[((g*8)+4)+:4] == 4'd0) ? "0" : (ck_byte_map[((g*8)+4)+:4] == 4'd1) ? "1" : "2" ; //each STRING charater takes 0 location end end endfunction // Enable low power mode for input buffer localparam IBUF_LOW_PWR = (IBUF_LPWR_MODE == "OFF") ? "FALSE" : ((IBUF_LPWR_MODE == "ON") ? "TRUE" : "ILLEGAL"); // Ratio of data to strobe localparam DQ_PER_DQS = DQ_WIDTH / DQS_WIDTH; // number of data phases per internal clock localparam PHASE_PER_CLK = 2*nCK_PER_CLK; // used to determine routing to OUT_FIFO for control/address for 2:1 // vs. 4:1 memory:internal clock ratio modes localparam PHASE_DIV = 4 / nCK_PER_CLK; localparam CLK_PERIOD = tCK * nCK_PER_CLK; // Create an aggregate parameters for data mapping to reduce # of generate // statements required in remapping code. Need to account for the case // when the DQ:DQS ratio is not 8:1 - in this case, each DATAx_MAP // parameter will have fewer than 8 elements used localparam FULL_DATA_MAP = {DATA17_MAP[12*DQ_PER_DQS-1:0], DATA16_MAP[12*DQ_PER_DQS-1:0], DATA15_MAP[12*DQ_PER_DQS-1:0], DATA14_MAP[12*DQ_PER_DQS-1:0], DATA13_MAP[12*DQ_PER_DQS-1:0], DATA12_MAP[12*DQ_PER_DQS-1:0], DATA11_MAP[12*DQ_PER_DQS-1:0], DATA10_MAP[12*DQ_PER_DQS-1:0], DATA9_MAP[12*DQ_PER_DQS-1:0], DATA8_MAP[12*DQ_PER_DQS-1:0], DATA7_MAP[12*DQ_PER_DQS-1:0], DATA6_MAP[12*DQ_PER_DQS-1:0], DATA5_MAP[12*DQ_PER_DQS-1:0], DATA4_MAP[12*DQ_PER_DQS-1:0], DATA3_MAP[12*DQ_PER_DQS-1:0], DATA2_MAP[12*DQ_PER_DQS-1:0], DATA1_MAP[12*DQ_PER_DQS-1:0], DATA0_MAP[12*DQ_PER_DQS-1:0]}; // Same deal, but for data mask mapping localparam FULL_MASK_MAP = {MASK1_MAP, MASK0_MAP}; localparam TMP_BYTELANES_DDR_CK = generate_bytelanes_ddr_ck(CK_BYTE_MAP) ; localparam TMP_GENERATE_DDR_CK_MAP = generate_ddr_ck_map(CK_BYTE_MAP) ; // Temporary parameters to determine which bank is outputting the CK/CK# // Eventually there will be support for multiple CK/CK# output //localparam TMP_DDR_CLK_SELECT_BANK = (CK_BYTE_MAP[7:4]); //// Temporary method to force MC_PHY to generate ODDR associated with //// CK/CK# output only for a single byte lane in the design. All banks //// that won't be generating the CK/CK# will have "UNUSED" as their //// PHY_GENERATE_DDR_CK parameter //localparam TMP_PHY_0_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 0) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); //localparam TMP_PHY_1_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 1) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); //localparam TMP_PHY_2_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 2) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); // Function to generate MC_PHY parameters PHY_BITLANES_OUTONLYx // which indicates which bit lanes in data byte lanes are // output-only bitlanes (e.g. used specifically for data mask outputs) function [143:0] calc_phy_bitlanes_outonly; input [215:0] data_mask_in; integer z; begin calc_phy_bitlanes_outonly = 'b0; // Only enable BITLANES parameters for data masks if, well, if // the data masks are actually enabled if (USE_DM_PORT == 1) for (z = 0; z < DM_WIDTH; z = z + 1) calc_phy_bitlanes_outonly[48*data_mask_in[(12*z+8)+:3] + 12*data_mask_in[(12*z+4)+:2] + data_mask_in[12*z+:4]] = 1'b1; end endfunction localparam PHY_BITLANES_OUTONLY = calc_phy_bitlanes_outonly(FULL_MASK_MAP); localparam PHY_0_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[47:0]; localparam PHY_1_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[95:48]; localparam PHY_2_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[143:96]; // Determine which bank and byte lane generates the RCLK used to clock // out the auxilliary (ODT, CKE) outputs localparam CKE_ODT_RCLK_SELECT_BANK_AUX_ON = (CKE_ODT_BYTE_MAP[7:4] == 4'h0) ? 0 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h1) ? 1 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h2) ? 2 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h3) ? 3 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h4) ? 4 : -1)))); localparam CKE_ODT_RCLK_SELECT_LANE_AUX_ON = (CKE_ODT_BYTE_MAP[3:0] == 4'h0) ? "A" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h1) ? "B" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h2) ? "C" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h3) ? "D" : "ILLEGAL"))); localparam CKE_ODT_RCLK_SELECT_BANK_AUX_OFF = (CKE_MAP[11:8] == 4'h0) ? 0 : ((CKE_MAP[11:8] == 4'h1) ? 1 : ((CKE_MAP[11:8] == 4'h2) ? 2 : ((CKE_MAP[11:8] == 4'h3) ? 3 : ((CKE_MAP[11:8] == 4'h4) ? 4 : -1)))); localparam CKE_ODT_RCLK_SELECT_LANE_AUX_OFF = (CKE_MAP[7:4] == 4'h0) ? "A" : ((CKE_MAP[7:4] == 4'h1) ? "B" : ((CKE_MAP[7:4] == 4'h2) ? "C" : ((CKE_MAP[7:4] == 4'h3) ? "D" : "ILLEGAL"))); localparam CKE_ODT_RCLK_SELECT_BANK = (CKE_ODT_AUX == "TRUE") ? CKE_ODT_RCLK_SELECT_BANK_AUX_ON : CKE_ODT_RCLK_SELECT_BANK_AUX_OFF ; localparam CKE_ODT_RCLK_SELECT_LANE = (CKE_ODT_AUX == "TRUE") ? CKE_ODT_RCLK_SELECT_LANE_AUX_ON : CKE_ODT_RCLK_SELECT_LANE_AUX_OFF ; //*************************************************************************** // OCLKDELAYED tap setting calculation: // Parameters for calculating amount of phase shifting output clock to // achieve 90 degree offset between DQS and DQ on writes //*************************************************************************** //90 deg equivalent to 0.25 for MEM_RefClk <= 300 MHz // and 1.25 for Mem_RefClk > 300 MHz localparam PO_OCLKDELAY_INV = (((SIM_CAL_OPTION == "NONE") && (tCK > 2500)) || (tCK >= 3333)) ? "FALSE" : "TRUE"; //DIV1: MemRefClk >= 400 MHz, DIV2: 200 <= MemRefClk < 400, //DIV4: MemRefClk < 200 MHz localparam PHY_0_A_PI_FREQ_REF_DIV = tCK > 5000 ? "DIV4" : tCK > 2500 ? "DIV2": "NONE"; localparam FREQ_REF_DIV = (PHY_0_A_PI_FREQ_REF_DIV == "DIV4" ? 4 : PHY_0_A_PI_FREQ_REF_DIV == "DIV2" ? 2 : 1); // Intrinsic delay between OCLK and OCLK_DELAYED Phaser Output localparam real INT_DELAY = 0.4392/FREQ_REF_DIV + 100.0/tCK; // Whether OCLK_DELAY output comes inverted or not localparam real HALF_CYCLE_DELAY = 0.5*(PO_OCLKDELAY_INV == "TRUE" ? 1 : 0); // Phaser-Out Stage3 Tap delay for 90 deg shift. // Maximum tap delay is FreqRefClk period distributed over 64 taps // localparam real TAP_DELAY = MC_OCLK_DELAY/64/FREQ_REF_DIV; localparam real MC_OCLK_DELAY = ((PO_OCLKDELAY_INV == "TRUE" ? 1.25 : 0.25) - (INT_DELAY + HALF_CYCLE_DELAY)) * 63 * FREQ_REF_DIV; //localparam integer PHY_0_A_PO_OCLK_DELAY = MC_OCLK_DELAY; localparam integer PHY_0_A_PO_OCLK_DELAY_HW = (tCK > 2273) ? 34 : (tCK > 2000) ? 33 : (tCK > 1724) ? 32 : (tCK > 1515) ? 31 : (tCK > 1315) ? 30 : (tCK > 1136) ? 29 : (tCK > 1021) ? 28 : 27; // Note that simulation requires a different value than in H/W because of the // difference in the way delays are modeled localparam integer PHY_0_A_PO_OCLK_DELAY = (SIM_CAL_OPTION == "NONE") ? ((tCK > 2500) ? 8 : (DRAM_TYPE == "DDR3") ? PHY_0_A_PO_OCLK_DELAY_HW : 30) : MC_OCLK_DELAY; // Initial DQ IDELAY value localparam PHY_0_A_IDELAYE2_IDELAY_VALUE = (SIM_CAL_OPTION != "FAST_CAL") ? 0 : (tCK < 1000) ? 0 : (tCK < 1330) ? 0 : (tCK < 2300) ? 0 : (tCK < 2500) ? 2 : 0; //localparam PHY_0_A_IDELAYE2_IDELAY_VALUE = 0; // Aux_out parameters RD_CMD_OFFSET = CL+2? and WR_CMD_OFFSET = CWL+3? localparam PHY_0_RD_CMD_OFFSET_0 = 10; localparam PHY_0_RD_CMD_OFFSET_1 = 10; localparam PHY_0_RD_CMD_OFFSET_2 = 10; localparam PHY_0_RD_CMD_OFFSET_3 = 10; // 4:1 and 2:1 have WR_CMD_OFFSET values for ODT timing localparam PHY_0_WR_CMD_OFFSET_0 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_1 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_2 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_3 = (nCK_PER_CLK == 4) ? 8 : 4; // 4:1 and 2:1 have different values localparam PHY_0_WR_DURATION_0 = 7; localparam PHY_0_WR_DURATION_1 = 7; localparam PHY_0_WR_DURATION_2 = 7; localparam PHY_0_WR_DURATION_3 = 7; // Aux_out parameters for toggle mode (CKE) localparam CWL_M = (REG_CTRL == "ON") ? CWL + 1 : CWL; localparam PHY_0_CMD_OFFSET = (nCK_PER_CLK == 4) ? (CWL_M % 2) ? 8 : 9 : (CWL < 7) ? 4 + ((CWL_M % 2) ? 0 : 1) : 5 + ((CWL_M % 2) ? 0 : 1); // temporary parameter to enable/disable PHY PC counters. In both 4:1 and // 2:1 cases, this should be disabled. For now, enable for 4:1 mode to // avoid making too many changes at once. localparam PHY_COUNT_EN = (nCK_PER_CLK == 4) ? "TRUE" : "FALSE"; wire [((HIGHEST_LANE+3)/4)*4-1:0] aux_out; wire [HIGHEST_LANE-1:0] mem_dqs_in; wire [HIGHEST_LANE-1:0] mem_dqs_out; wire [HIGHEST_LANE-1:0] mem_dqs_ts; wire [HIGHEST_LANE*10-1:0] mem_dq_in; wire [HIGHEST_LANE*12-1:0] mem_dq_out; wire [HIGHEST_LANE*12-1:0] mem_dq_ts; wire [DQ_WIDTH-1:0] in_dq; wire [DQS_WIDTH-1:0] in_dqs; wire [ROW_WIDTH-1:0] out_addr; wire [BANK_WIDTH-1:0] out_ba; wire out_cas_n; wire [CS_WIDTH*nCS_PER_RANK-1:0] out_cs_n; wire [DM_WIDTH-1:0] out_dm; wire [ODT_WIDTH -1:0] out_odt; wire [CKE_WIDTH -1 :0] out_cke ; wire [DQ_WIDTH-1:0] out_dq; wire [DQS_WIDTH-1:0] out_dqs; wire out_parity; wire out_ras_n; wire out_we_n; wire [HIGHEST_LANE*80-1:0] phy_din; wire [HIGHEST_LANE*80-1:0] phy_dout; wire phy_rd_en; wire [DM_WIDTH-1:0] ts_dm; wire [DQ_WIDTH-1:0] ts_dq; wire [DQS_WIDTH-1:0] ts_dqs; reg [31:0] phy_ctl_wd_i1; reg [31:0] phy_ctl_wd_i2; reg phy_ctl_wr_i1; reg phy_ctl_wr_i2; reg [5:0] data_offset_1_i1; reg [5:0] data_offset_1_i2; reg [5:0] data_offset_2_i1; reg [5:0] data_offset_2_i2; wire [31:0] phy_ctl_wd_temp; wire phy_ctl_wr_temp; wire [5:0] data_offset_1_temp; wire [5:0] data_offset_2_temp; wire [5:0] data_offset_1_of; wire [5:0] data_offset_2_of; wire [31:0] phy_ctl_wd_of; (* keep = "true", max_fanout = 3 *) wire phy_ctl_wr_of /* synthesis syn_maxfan = 1 */; wire [3:0] phy_ctl_full_temp; wire data_io_idle_pwrdwn; // Always read from input data FIFOs when not empty assign phy_rd_en = !if_empty; // IDELAYE2 initial value assign idelaye2_init_val = PHY_0_A_IDELAYE2_IDELAY_VALUE; assign oclkdelay_init_val = PHY_0_A_PO_OCLK_DELAY; // Idle powerdown when there are no pending reads in the MC assign data_io_idle_pwrdwn = DATA_IO_IDLE_PWRDWN == "ON" ? idle : 1'b0; //*************************************************************************** // Auxiliary output steering //*************************************************************************** // For a 4 rank I/F the aux_out[3:0] from the addr/ctl bank will be // mapped to ddr_odt and the aux_out[7:4] from one of the data banks // will map to ddr_cke. For I/Fs less than 4 the aux_out[3:0] from the // addr/ctl bank would bank would map to both ddr_odt and ddr_cke. generate if(CKE_ODT_AUX == "TRUE")begin:cke_thru_auxpins if (CKE_WIDTH == 1) begin : gen_cke // Explicitly instantiate OBUF to ensure that these are present // in the netlist. Typically this is not required since NGDBUILD // at the top-level knows to infer an I/O/IOBUF and therefore a // top-level LOC constraint can be attached to that pin. This does // not work when a hierarchical flow is used and the LOC is applied // at the individual core-level UCF OBUF u_cke_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK]), .O (ddr_cke) ); end else begin: gen_2rank_cke OBUF u_cke0_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK]), .O (ddr_cke[0]) ); OBUF u_cke1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_cke[1]) ); end end endgenerate generate if(CKE_ODT_AUX == "TRUE")begin:odt_thru_auxpins if (USE_ODT_PORT == 1) begin : gen_use_odt // Explicitly instantiate OBUF to ensure that these are present // in the netlist. Typically this is not required since NGDBUILD // at the top-level knows to infer an I/O/IOBUF and therefore a // top-level LOC constraint can be attached to that pin. This does // not work when a hierarchical flow is used and the LOC is applied // at the individual core-level UCF OBUF u_odt_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+1]), .O (ddr_odt[0]) ); if (ODT_WIDTH == 2 && RANKS == 1) begin: gen_2port_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_odt[1]) ); end else if (ODT_WIDTH == 2 && RANKS == 2) begin: gen_2rank_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+3]), .O (ddr_odt[1]) ); end else if (ODT_WIDTH == 3 && RANKS == 1) begin: gen_3port_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_odt[1]) ); OBUF u_odt2_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+3]), .O (ddr_odt[2]) ); end end else begin assign ddr_odt = 'b0; end end endgenerate //*************************************************************************** // Read data bit steering //*************************************************************************** // Transpose elements of rd_data_map to form final read data output: // phy_din elements are grouped according to "physical bit" - e.g. // for nCK_PER_CLK = 4, there are 8 data phases transfered per physical // bit per clock cycle: // = {dq0_fall3, dq0_rise3, dq0_fall2, dq0_rise2, // dq0_fall1, dq0_rise1, dq0_fall0, dq0_rise0} // whereas rd_data is are grouped according to "phase" - e.g. // = {dq7_rise0, dq6_rise0, dq5_rise0, dq4_rise0, // dq3_rise0, dq2_rise0, dq1_rise0, dq0_rise0} // therefore rd_data is formed by transposing phy_din - e.g. // for nCK_PER_CLK = 4, and DQ_WIDTH = 16, and assuming MC_PHY // bit_lane[0] maps to DQ[0], and bit_lane[1] maps to DQ[1], then // the assignments for bits of rd_data corresponding to DQ[1:0] // would be: // {rd_data[112], rd_data[96], rd_data[80], rd_data[64], // rd_data[48], rd_data[32], rd_data[16], rd_data[0]} = phy_din[7:0] // {rd_data[113], rd_data[97], rd_data[81], rd_data[65], // rd_data[49], rd_data[33], rd_data[17], rd_data[1]} = phy_din[15:8] generate genvar i, j; for (i = 0; i < DQ_WIDTH; i = i + 1) begin: gen_loop_rd_data_1 for (j = 0; j < PHASE_PER_CLK; j = j + 1) begin: gen_loop_rd_data_2 assign rd_data[DQ_WIDTH*j + i] = phy_din[(320*FULL_DATA_MAP[(12*i+8)+:3]+ 80*FULL_DATA_MAP[(12*i+4)+:2] + 8*FULL_DATA_MAP[12*i+:4]) + j]; end end endgenerate //*************************************************************************** // Control/address //*************************************************************************** assign out_cas_n = mem_dq_out[48*CAS_MAP[10:8] + 12*CAS_MAP[5:4] + CAS_MAP[3:0]]; generate // if signal placed on bit lanes [0-9] if (CAS_MAP[3:0] < 4'hA) begin: gen_cas_lt10 // Determine routing based on clock ratio mode. If running in 4:1 // mode, then all four bits from logic are used. If 2:1 mode, only // 2-bits are provided by logic, and each bit is repeated 2x to form // 4-bit input to IN_FIFO, e.g. // 4:1 mode: phy_dout[] = {in[3], in[2], in[1], in[0]} // 2:1 mode: phy_dout[] = {in[1], in[1], in[0], in[0]} assign phy_dout[(320*CAS_MAP[10:8] + 80*CAS_MAP[5:4] + 8*CAS_MAP[3:0])+:4] = {mux_cas_n[3/PHASE_DIV], mux_cas_n[2/PHASE_DIV], mux_cas_n[1/PHASE_DIV], mux_cas_n[0]}; end else begin: gen_cas_ge10 // If signal is placed in bit lane [10] or [11], route to upper // nibble of phy_dout lane [5] or [6] respectively (in this case // phy_dout lane [5, 6] are multiplexed to take input for two // different SDR signals - this is how bits[10,11] need to be // provided to the OUT_FIFO assign phy_dout[(320*CAS_MAP[10:8] + 80*CAS_MAP[5:4] + 8*(CAS_MAP[3:0]-5) + 4)+:4] = {mux_cas_n[3/PHASE_DIV], mux_cas_n[2/PHASE_DIV], mux_cas_n[1/PHASE_DIV], mux_cas_n[0]}; end endgenerate assign out_ras_n = mem_dq_out[48*RAS_MAP[10:8] + 12*RAS_MAP[5:4] + RAS_MAP[3:0]]; generate if (RAS_MAP[3:0] < 4'hA) begin: gen_ras_lt10 assign phy_dout[(320*RAS_MAP[10:8] + 80*RAS_MAP[5:4] + 8*RAS_MAP[3:0])+:4] = {mux_ras_n[3/PHASE_DIV], mux_ras_n[2/PHASE_DIV], mux_ras_n[1/PHASE_DIV], mux_ras_n[0]}; end else begin: gen_ras_ge10 assign phy_dout[(320*RAS_MAP[10:8] + 80*RAS_MAP[5:4] + 8*(RAS_MAP[3:0]-5) + 4)+:4] = {mux_ras_n[3/PHASE_DIV], mux_ras_n[2/PHASE_DIV], mux_ras_n[1/PHASE_DIV], mux_ras_n[0]}; end endgenerate assign out_we_n = mem_dq_out[48*WE_MAP[10:8] + 12*WE_MAP[5:4] + WE_MAP[3:0]]; generate if (WE_MAP[3:0] < 4'hA) begin: gen_we_lt10 assign phy_dout[(320*WE_MAP[10:8] + 80*WE_MAP[5:4] + 8*WE_MAP[3:0])+:4] = {mux_we_n[3/PHASE_DIV], mux_we_n[2/PHASE_DIV], mux_we_n[1/PHASE_DIV], mux_we_n[0]}; end else begin: gen_we_ge10 assign phy_dout[(320*WE_MAP[10:8] + 80*WE_MAP[5:4] + 8*(WE_MAP[3:0]-5) + 4)+:4] = {mux_we_n[3/PHASE_DIV], mux_we_n[2/PHASE_DIV], mux_we_n[1/PHASE_DIV], mux_we_n[0]}; end endgenerate generate if (REG_CTRL == "ON") begin: gen_parity_out // Generate addr/ctrl parity output only for DDR3 and DDR2 registered DIMMs assign out_parity = mem_dq_out[48*PARITY_MAP[10:8] + 12*PARITY_MAP[5:4] + PARITY_MAP[3:0]]; if (PARITY_MAP[3:0] < 4'hA) begin: gen_lt10 assign phy_dout[(320*PARITY_MAP[10:8] + 80*PARITY_MAP[5:4] + 8*PARITY_MAP[3:0])+:4] = {parity_in[3/PHASE_DIV], parity_in[2/PHASE_DIV], parity_in[1/PHASE_DIV], parity_in[0]}; end else begin: gen_ge10 assign phy_dout[(320*PARITY_MAP[10:8] + 80*PARITY_MAP[5:4] + 8*(PARITY_MAP[3:0]-5) + 4)+:4] = {parity_in[3/PHASE_DIV], parity_in[2/PHASE_DIV], parity_in[1/PHASE_DIV], parity_in[0]}; end end endgenerate //***************************************************************** generate genvar m, n,x; //***************************************************************** // Control/address (multi-bit) buses //***************************************************************** // Row/Column address for (m = 0; m < ROW_WIDTH; m = m + 1) begin: gen_addr_out assign out_addr[m] = mem_dq_out[48*ADDR_MAP[(12*m+8)+:3] + 12*ADDR_MAP[(12*m+4)+:2] + ADDR_MAP[12*m+:4]]; if (ADDR_MAP[12*m+:4] < 4'hA) begin: gen_lt10 // For multi-bit buses, we also have to deal with transposition // when going from the logic-side control bus to phy_dout for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ADDR_MAP[(12*m+8)+:3] + 80*ADDR_MAP[(12*m+4)+:2] + 8*ADDR_MAP[12*m+:4] + n] = mux_address[ROW_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ADDR_MAP[(12*m+8)+:3] + 80*ADDR_MAP[(12*m+4)+:2] + 8*(ADDR_MAP[12*m+:4]-5) + 4 + n] = mux_address[ROW_WIDTH*(n/PHASE_DIV) + m]; end end end // Bank address for (m = 0; m < BANK_WIDTH; m = m + 1) begin: gen_ba_out assign out_ba[m] = mem_dq_out[48*BANK_MAP[(12*m+8)+:3] + 12*BANK_MAP[(12*m+4)+:2] + BANK_MAP[12*m+:4]]; if (BANK_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*BANK_MAP[(12*m+8)+:3] + 80*BANK_MAP[(12*m+4)+:2] + 8*BANK_MAP[12*m+:4] + n] = mux_bank[BANK_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*BANK_MAP[(12*m+8)+:3] + 80*BANK_MAP[(12*m+4)+:2] + 8*(BANK_MAP[12*m+:4]-5) + 4 + n] = mux_bank[BANK_WIDTH*(n/PHASE_DIV) + m]; end end end // Chip select if (USE_CS_PORT == 1) begin: gen_cs_n_out for (m = 0; m < CS_WIDTH*nCS_PER_RANK; m = m + 1) begin: gen_cs_out assign out_cs_n[m] = mem_dq_out[48*CS_MAP[(12*m+8)+:3] + 12*CS_MAP[(12*m+4)+:2] + CS_MAP[12*m+:4]]; if (CS_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CS_MAP[(12*m+8)+:3] + 80*CS_MAP[(12*m+4)+:2] + 8*CS_MAP[12*m+:4] + n] = mux_cs_n[CS_WIDTH*nCS_PER_RANK*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CS_MAP[(12*m+8)+:3] + 80*CS_MAP[(12*m+4)+:2] + 8*(CS_MAP[12*m+:4]-5) + 4 + n] = mux_cs_n[CS_WIDTH*nCS_PER_RANK*(n/PHASE_DIV) + m]; end end end end if(CKE_ODT_AUX == "FALSE") begin // ODT_ports wire [ODT_WIDTH*nCK_PER_CLK -1 :0] mux_odt_remap ; if(RANKS == 1) begin for(x =0 ; x < nCK_PER_CLK ; x = x+1) begin assign mux_odt_remap[(x*ODT_WIDTH)+:ODT_WIDTH] = {ODT_WIDTH{mux_odt[0]}} ; end end else begin for(x =0 ; x < 2*nCK_PER_CLK ; x = x+2) begin assign mux_odt_remap[(x*ODT_WIDTH/RANKS)+:ODT_WIDTH/RANKS] = {ODT_WIDTH/RANKS{mux_odt[0]}} ; assign mux_odt_remap[((x*ODT_WIDTH/RANKS)+(ODT_WIDTH/RANKS))+:ODT_WIDTH/RANKS] = {ODT_WIDTH/RANKS{mux_odt[1]}} ; end end if (USE_ODT_PORT == 1) begin: gen_odt_out for (m = 0; m < ODT_WIDTH; m = m + 1) begin: gen_odt_out_1 assign out_odt[m] = mem_dq_out[48*ODT_MAP[(12*m+8)+:3] + 12*ODT_MAP[(12*m+4)+:2] + ODT_MAP[12*m+:4]]; if (ODT_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ODT_MAP[(12*m+8)+:3] + 80*ODT_MAP[(12*m+4)+:2] + 8*ODT_MAP[12*m+:4] + n] = mux_odt_remap[ODT_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ODT_MAP[(12*m+8)+:3] + 80*ODT_MAP[(12*m+4)+:2] + 8*(ODT_MAP[12*m+:4]-5) + 4 + n] = mux_odt_remap[ODT_WIDTH*(n/PHASE_DIV) + m]; end end end end wire [CKE_WIDTH*nCK_PER_CLK -1:0] mux_cke_remap ; for(x = 0 ; x < nCK_PER_CLK ; x = x +1) begin assign mux_cke_remap[(x*CKE_WIDTH)+:CKE_WIDTH] = {CKE_WIDTH{mux_cke[x]}} ; end for (m = 0; m < CKE_WIDTH; m = m + 1) begin: gen_cke_out assign out_cke[m] = mem_dq_out[48*CKE_MAP[(12*m+8)+:3] + 12*CKE_MAP[(12*m+4)+:2] + CKE_MAP[12*m+:4]]; if (CKE_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CKE_MAP[(12*m+8)+:3] + 80*CKE_MAP[(12*m+4)+:2] + 8*CKE_MAP[12*m+:4] + n] = mux_cke_remap[CKE_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CKE_MAP[(12*m+8)+:3] + 80*CKE_MAP[(12*m+4)+:2] + 8*(CKE_MAP[12*m+:4]-5) + 4 + n] = mux_cke_remap[CKE_WIDTH*(n/PHASE_DIV) + m]; end end end end //***************************************************************** // Data mask //***************************************************************** if (USE_DM_PORT == 1) begin: gen_dm_out for (m = 0; m < DM_WIDTH; m = m + 1) begin: gen_dm_out assign out_dm[m] = mem_dq_out[48*FULL_MASK_MAP[(12*m+8)+:3] + 12*FULL_MASK_MAP[(12*m+4)+:2] + FULL_MASK_MAP[12*m+:4]]; assign ts_dm[m] = mem_dq_ts[48*FULL_MASK_MAP[(12*m+8)+:3] + 12*FULL_MASK_MAP[(12*m+4)+:2] + FULL_MASK_MAP[12*m+:4]]; for (n = 0; n < PHASE_PER_CLK; n = n + 1) begin: loop_xpose assign phy_dout[320*FULL_MASK_MAP[(12*m+8)+:3] + 80*FULL_MASK_MAP[(12*m+4)+:2] + 8*FULL_MASK_MAP[12*m+:4] + n] = mux_wrdata_mask[DM_WIDTH*n + m]; end end end //***************************************************************** // Input and output DQ //***************************************************************** for (m = 0; m < DQ_WIDTH; m = m + 1) begin: gen_dq_inout // to MC_PHY assign mem_dq_in[40*FULL_DATA_MAP[(12*m+8)+:3] + 10*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]] = in_dq[m]; // to I/O buffers assign out_dq[m] = mem_dq_out[48*FULL_DATA_MAP[(12*m+8)+:3] + 12*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]]; assign ts_dq[m] = mem_dq_ts[48*FULL_DATA_MAP[(12*m+8)+:3] + 12*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]]; for (n = 0; n < PHASE_PER_CLK; n = n + 1) begin: loop_xpose assign phy_dout[320*FULL_DATA_MAP[(12*m+8)+:3] + 80*FULL_DATA_MAP[(12*m+4)+:2] + 8*FULL_DATA_MAP[12*m+:4] + n] = mux_wrdata[DQ_WIDTH*n + m]; end end //***************************************************************** // Input and output DQS //***************************************************************** for (m = 0; m < DQS_WIDTH; m = m + 1) begin: gen_dqs_inout // to MC_PHY assign mem_dqs_in[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]] = in_dqs[m]; // to I/O buffers assign out_dqs[m] = mem_dqs_out[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]]; assign ts_dqs[m] = mem_dqs_ts[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]]; end endgenerate //*************************************************************************** // Memory I/F output and I/O buffer instantiation //*************************************************************************** // Note on instantiation - generally at the minimum, it's not required to // instantiate the output buffers - they can be inferred by the synthesis // tool, and there aren't any attributes that need to be associated with // them. Consider as a future option to take out the OBUF instantiations OBUF u_cas_n_obuf ( .I (out_cas_n), .O (ddr_cas_n) ); OBUF u_ras_n_obuf ( .I (out_ras_n), .O (ddr_ras_n) ); OBUF u_we_n_obuf ( .I (out_we_n), .O (ddr_we_n) ); generate genvar p; for (p = 0; p < ROW_WIDTH; p = p + 1) begin: gen_addr_obuf OBUF u_addr_obuf ( .I (out_addr[p]), .O (ddr_addr[p]) ); end for (p = 0; p < BANK_WIDTH; p = p + 1) begin: gen_bank_obuf OBUF u_bank_obuf ( .I (out_ba[p]), .O (ddr_ba[p]) ); end if (USE_CS_PORT == 1) begin: gen_cs_n_obuf for (p = 0; p < CS_WIDTH*nCS_PER_RANK; p = p + 1) begin: gen_cs_obuf OBUF u_cs_n_obuf ( .I (out_cs_n[p]), .O (ddr_cs_n[p]) ); end end if(CKE_ODT_AUX == "FALSE")begin:cke_odt_thru_outfifo if (USE_ODT_PORT== 1) begin: gen_odt_obuf for (p = 0; p < ODT_WIDTH; p = p + 1) begin: gen_odt_obuf OBUF u_cs_n_obuf ( .I (out_odt[p]), .O (ddr_odt[p]) ); end end for (p = 0; p < CKE_WIDTH; p = p + 1) begin: gen_cke_obuf OBUF u_cs_n_obuf ( .I (out_cke[p]), .O (ddr_cke[p]) ); end end if (REG_CTRL == "ON") begin: gen_parity_obuf // Generate addr/ctrl parity output only for DDR3 registered DIMMs OBUF u_parity_obuf ( .I (out_parity), .O (ddr_parity) ); end else begin: gen_parity_tieoff assign ddr_parity = 1'b0; end if ((DRAM_TYPE == "DDR3") || (REG_CTRL == "ON")) begin: gen_reset_obuf // Generate reset output only for DDR3 and DDR2 RDIMMs OBUF u_reset_obuf ( .I (mux_reset_n), .O (ddr_reset_n) ); end else begin: gen_reset_tieoff assign ddr_reset_n = 1'b1; end if (USE_DM_PORT == 1) begin: gen_dm_obuf for (p = 0; p < DM_WIDTH; p = p + 1) begin: loop_dm OBUFT u_dm_obuf ( .I (out_dm[p]), .T (ts_dm[p]), .O (ddr_dm[p]) ); end end else begin: gen_dm_tieoff assign ddr_dm = 'b0; end if (DATA_IO_PRIM_TYPE == "HP_LP") begin: gen_dq_iobuf_HP for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end else if (DATA_IO_PRIM_TYPE == "HR_LP") begin: gen_dq_iobuf_HR for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end else begin: gen_dq_iobuf_default for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end if (DATA_IO_PRIM_TYPE == "HP_LP") begin: gen_dqs_iobuf_HP for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end else if (DATA_IO_PRIM_TYPE == "HR_LP") begin: gen_dqs_iobuf_HR for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end else begin: gen_dqs_iobuf_default for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end endgenerate always @(posedge clk) begin phy_ctl_wd_i1 <= #TCQ phy_ctl_wd; phy_ctl_wr_i1 <= #TCQ phy_ctl_wr; phy_ctl_wd_i2 <= #TCQ phy_ctl_wd_i1; phy_ctl_wr_i2 <= #TCQ phy_ctl_wr_i1; data_offset_1_i1 <= #TCQ data_offset_1; data_offset_1_i2 <= #TCQ data_offset_1_i1; data_offset_2_i1 <= #TCQ data_offset_2; data_offset_2_i2 <= #TCQ data_offset_2_i1; end // 2 cycles of command delay needed for 4;1 mode. 2:1 mode does not need it. // 2:1 mode the command goes through pre fifo assign phy_ctl_wd_temp = (nCK_PER_CLK == 4) ? phy_ctl_wd_i2 : phy_ctl_wd_of; assign phy_ctl_wr_temp = (nCK_PER_CLK == 4) ? phy_ctl_wr_i2 : phy_ctl_wr_of; assign data_offset_1_temp = (nCK_PER_CLK == 4) ? data_offset_1_i2 : data_offset_1_of; assign data_offset_2_temp = (nCK_PER_CLK == 4) ? data_offset_2_i2 : data_offset_2_of; generate begin mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (32) ) phy_ctl_pre_fifo_0 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[1]), .wr_en_in (phy_ctl_wr), .d_in (phy_ctl_wd), .wr_en_out (phy_ctl_wr_of), .d_out (phy_ctl_wd_of) ); mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (6) ) phy_ctl_pre_fifo_1 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[2]), .wr_en_in (phy_ctl_wr), .d_in (data_offset_1), .wr_en_out (), .d_out (data_offset_1_of) ); mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (6) ) phy_ctl_pre_fifo_2 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[3]), .wr_en_in (phy_ctl_wr), .d_in (data_offset_2), .wr_en_out (), .d_out (data_offset_2_of) ); end endgenerate //*************************************************************************** // Hard PHY instantiation //*************************************************************************** assign phy_ctl_full = phy_ctl_full_temp[0]; mig_7series_v1_9_ddr_mc_phy # ( .BYTE_LANES_B0 (BYTE_LANES_B0), .BYTE_LANES_B1 (BYTE_LANES_B1), .BYTE_LANES_B2 (BYTE_LANES_B2), .BYTE_LANES_B3 (BYTE_LANES_B3), .BYTE_LANES_B4 (BYTE_LANES_B4), .DATA_CTL_B0 (DATA_CTL_B0), .DATA_CTL_B1 (DATA_CTL_B1), .DATA_CTL_B2 (DATA_CTL_B2), .DATA_CTL_B3 (DATA_CTL_B3), .DATA_CTL_B4 (DATA_CTL_B4), .PHY_0_BITLANES (PHY_0_BITLANES), .PHY_1_BITLANES (PHY_1_BITLANES), .PHY_2_BITLANES (PHY_2_BITLANES), .PHY_0_BITLANES_OUTONLY (PHY_0_BITLANES_OUTONLY), .PHY_1_BITLANES_OUTONLY (PHY_1_BITLANES_OUTONLY), .PHY_2_BITLANES_OUTONLY (PHY_2_BITLANES_OUTONLY), .RCLK_SELECT_BANK (CKE_ODT_RCLK_SELECT_BANK), .RCLK_SELECT_LANE (CKE_ODT_RCLK_SELECT_LANE), //.CKE_ODT_AUX (CKE_ODT_AUX), .GENERATE_DDR_CK_MAP (TMP_GENERATE_DDR_CK_MAP), .BYTELANES_DDR_CK (TMP_BYTELANES_DDR_CK), .NUM_DDR_CK (CK_WIDTH), .LP_DDR_CK_WIDTH (LP_DDR_CK_WIDTH), .PO_CTL_COARSE_BYPASS ("FALSE"), .PHYCTL_CMD_FIFO ("FALSE"), .PHY_CLK_RATIO (nCK_PER_CLK), .MASTER_PHY_CTL (MASTER_PHY_CTL), .PHY_FOUR_WINDOW_CLOCKS (63), .PHY_EVENTS_DELAY (18), .PHY_COUNT_EN ("FALSE"), //PHY_COUNT_EN .PHY_SYNC_MODE ("FALSE"), .SYNTHESIS ((SIM_CAL_OPTION == "NONE") ? "TRUE" : "FALSE"), .PHY_DISABLE_SEQ_MATCH ("TRUE"), //"TRUE" .PHY_0_GENERATE_IDELAYCTRL ("FALSE"), .PHY_0_A_PI_FREQ_REF_DIV (PHY_0_A_PI_FREQ_REF_DIV), .PHY_0_CMD_OFFSET (PHY_0_CMD_OFFSET), //for CKE .PHY_0_RD_CMD_OFFSET_0 (PHY_0_RD_CMD_OFFSET_0), .PHY_0_RD_CMD_OFFSET_1 (PHY_0_RD_CMD_OFFSET_1), .PHY_0_RD_CMD_OFFSET_2 (PHY_0_RD_CMD_OFFSET_2), .PHY_0_RD_CMD_OFFSET_3 (PHY_0_RD_CMD_OFFSET_3), .PHY_0_RD_DURATION_0 (6), .PHY_0_RD_DURATION_1 (6), .PHY_0_RD_DURATION_2 (6), .PHY_0_RD_DURATION_3 (6), .PHY_0_WR_CMD_OFFSET_0 (PHY_0_WR_CMD_OFFSET_0), .PHY_0_WR_CMD_OFFSET_1 (PHY_0_WR_CMD_OFFSET_1), .PHY_0_WR_CMD_OFFSET_2 (PHY_0_WR_CMD_OFFSET_2), .PHY_0_WR_CMD_OFFSET_3 (PHY_0_WR_CMD_OFFSET_3), .PHY_0_WR_DURATION_0 (PHY_0_WR_DURATION_0), .PHY_0_WR_DURATION_1 (PHY_0_WR_DURATION_1), .PHY_0_WR_DURATION_2 (PHY_0_WR_DURATION_2), .PHY_0_WR_DURATION_3 (PHY_0_WR_DURATION_3), .PHY_0_AO_TOGGLE ((RANKS == 1) ? 1 : 5), .PHY_0_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_A_PO_OCLKDELAY_INV (PO_OCLKDELAY_INV), .PHY_0_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_GENERATE_IDELAYCTRL ("FALSE"), //.PHY_1_GENERATE_DDR_CK (TMP_PHY_1_GENERATE_DDR_CK), //.PHY_1_NUM_DDR_CK (1), .PHY_1_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_GENERATE_IDELAYCTRL ("FALSE"), //.PHY_2_GENERATE_DDR_CK (TMP_PHY_2_GENERATE_DDR_CK), //.PHY_2_NUM_DDR_CK (1), .PHY_2_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .TCK (tCK), .PHY_0_IODELAY_GRP (IODELAY_GRP) ,.PHY_1_IODELAY_GRP (IODELAY_GRP) ,.PHY_2_IODELAY_GRP (IODELAY_GRP) ,.BANK_TYPE (BANK_TYPE) ,.CKE_ODT_AUX (CKE_ODT_AUX) ) u_ddr_mc_phy ( .rst (rst), // Don't use MC_PHY to generate DDR_RESET_N output. Instead // generate this output outside of MC_PHY (and synchronous to CLK) .ddr_rst_in_n (1'b1), .phy_clk (clk), .freq_refclk (freq_refclk), .mem_refclk (mem_refclk), // Remove later - always same connection as phy_clk port .mem_refclk_div4 (clk), .pll_lock (pll_lock), .auxout_clk (), .sync_pulse (sync_pulse), // IDELAYCTRL instantiated outside of mc_phy module .idelayctrl_refclk (), .phy_dout (phy_dout), .phy_cmd_wr_en (phy_cmd_wr_en), .phy_data_wr_en (phy_data_wr_en), .phy_rd_en (phy_rd_en), .phy_ctl_wd (phy_ctl_wd_temp), .phy_ctl_wr (phy_ctl_wr_temp), .if_empty_def (phy_if_empty_def), .if_rst (phy_if_reset), .phyGo ('b1), .aux_in_1 (aux_in_1), .aux_in_2 (aux_in_2), // No support yet for different data offsets for different I/O banks // (possible use in supporting wider range of skew among bytes) .data_offset_1 (data_offset_1_temp), .data_offset_2 (data_offset_2_temp), .cke_in (), .if_a_empty (), .if_empty (if_empty), .if_empty_or (), .if_empty_and (), .of_ctl_a_full (), // .of_data_a_full (phy_data_full), .of_ctl_full (phy_cmd_full), .of_data_full (), .pre_data_a_full (phy_pre_data_a_full), .idelay_ld (idelay_ld), .idelay_ce (idelay_ce), .idelay_inc (idelay_inc), .input_sink (), .phy_din (phy_din), .phy_ctl_a_full (), .phy_ctl_full (phy_ctl_full_temp), .mem_dq_out (mem_dq_out), .mem_dq_ts (mem_dq_ts), .mem_dq_in (mem_dq_in), .mem_dqs_out (mem_dqs_out), .mem_dqs_ts (mem_dqs_ts), .mem_dqs_in (mem_dqs_in), .aux_out (aux_out), .phy_ctl_ready (), .rst_out (), .ddr_clk (ddr_clk), //.rclk (), .mcGo (phy_mc_go), .phy_write_calib (phy_write_calib), .phy_read_calib (phy_read_calib), .calib_sel (calib_sel), .calib_in_common (calib_in_common), .calib_zero_inputs (calib_zero_inputs), .calib_zero_ctrl (calib_zero_ctrl), .calib_zero_lanes ('b0), .po_fine_enable (po_fine_enable), .po_coarse_enable (po_coarse_enable), .po_fine_inc (po_fine_inc), .po_coarse_inc (po_coarse_inc), .po_counter_load_en (po_counter_load_en), .po_sel_fine_oclk_delay (po_sel_fine_oclk_delay), .po_counter_load_val (po_counter_load_val), .po_counter_read_en (po_counter_read_en), .po_coarse_overflow (), .po_fine_overflow (), .po_counter_read_val (po_counter_read_val), .pi_rst_dqs_find (pi_rst_dqs_find), .pi_fine_enable (pi_fine_enable), .pi_fine_inc (pi_fine_inc), .pi_counter_load_en (pi_counter_load_en), .pi_counter_read_en (dbg_pi_counter_read_en), .pi_counter_load_val (pi_counter_load_val), .pi_fine_overflow (), .pi_counter_read_val (pi_counter_read_val), .pi_phase_locked (pi_phase_locked), .pi_phase_locked_all (pi_phase_locked_all), .pi_dqs_found (), .pi_dqs_found_any (pi_dqs_found), .pi_dqs_found_all (pi_dqs_found_all), .pi_dqs_found_lanes (dbg_pi_dqs_found_lanes_phy4lanes), // Currently not being used. May be used in future if periodic // reads become a requirement. This output could be used to signal // a catastrophic failure in read capture and the need for // re-calibration. .pi_dqs_out_of_range (pi_dqs_out_of_range) ,.ref_dll_lock (ref_dll_lock) ,.pi_phase_locked_lanes (dbg_pi_phase_locked_phy4lanes) // ,.rst_phaser_ref (rst_phaser_ref) ); endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ddr_mc_phy_wrapper.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Oct 10 2010 // \___\/\___\ // //Device : 7 Series //Design Name : DDR3 SDRAM //Purpose : Wrapper file that encompasses the MC_PHY module // instantiation and handles the vector remapping between // the MC_PHY ports and the user's DDR3 ports. Vector // remapping affects DDR3 control, address, and DQ/DQS/DM. //Reference : //Revision History : //***************************************************************************** `timescale 1 ps / 1 ps module mig_7series_v1_9_ddr_mc_phy_wrapper # ( parameter TCQ = 100, // Register delay (simulation only) parameter tCK = 2500, // ps parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO" parameter DATA_IO_PRIM_TYPE = "DEFAULT", // # = "HP_LP", "HR_LP", "DEFAULT" parameter DATA_IO_IDLE_PWRDWN = "ON", // "ON" or "OFF" parameter IODELAY_GRP = "IODELAY_MIG", parameter nCK_PER_CLK = 4, // Memory:Logic clock ratio parameter nCS_PER_RANK = 1, // # of unique CS outputs per rank parameter BANK_WIDTH = 3, // # of bank address parameter CKE_WIDTH = 1, // # of clock enable outputs parameter CS_WIDTH = 1, // # of chip select parameter CK_WIDTH = 1, // # of CK parameter CWL = 5, // CAS Write latency parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2 parameter DM_WIDTH = 8, // # of data mask parameter DQ_WIDTH = 16, // # of data bits parameter DQS_CNT_WIDTH = 3, // ceil(log2(DQS_WIDTH)) parameter DQS_WIDTH = 8, // # of strobe pairs parameter DRAM_TYPE = "DDR3", // DRAM type (DDR2, DDR3) parameter RANKS = 4, // # of ranks parameter ODT_WIDTH = 1, // # of ODT outputs parameter REG_CTRL = "OFF", // "ON" for registered DIMM parameter ROW_WIDTH = 16, // # of row/column address parameter USE_CS_PORT = 1, // Support chip select output parameter USE_DM_PORT = 1, // Support data mask output parameter USE_ODT_PORT = 1, // Support ODT output parameter IBUF_LPWR_MODE = "OFF", // input buffer low power option parameter LP_DDR_CK_WIDTH = 2, // Hard PHY parameters parameter PHYCTL_CMD_FIFO = "FALSE", parameter DATA_CTL_B0 = 4'hc, parameter DATA_CTL_B1 = 4'hf, parameter DATA_CTL_B2 = 4'hf, parameter DATA_CTL_B3 = 4'hf, parameter DATA_CTL_B4 = 4'hf, parameter BYTE_LANES_B0 = 4'b1111, parameter BYTE_LANES_B1 = 4'b0000, parameter BYTE_LANES_B2 = 4'b0000, parameter BYTE_LANES_B3 = 4'b0000, parameter BYTE_LANES_B4 = 4'b0000, parameter PHY_0_BITLANES = 48'h0000_0000_0000, parameter PHY_1_BITLANES = 48'h0000_0000_0000, parameter PHY_2_BITLANES = 48'h0000_0000_0000, // Parameters calculated outside of this block parameter HIGHEST_BANK = 3, // Highest I/O bank index parameter HIGHEST_LANE = 12, // Highest byte lane index // ** Pin mapping parameters // Parameters for mapping between hard PHY and physical DDR3 signals // There are 2 classes of parameters: // - DQS_BYTE_MAP, CK_BYTE_MAP, CKE_ODT_BYTE_MAP: These consist of // 8-bit elements. Each element indicates the bank and byte lane // location of that particular signal. The bit lane in this case // doesn't need to be specified, either because there's only one // pin pair in each byte lane that the DQS or CK pair can be // located at, or in the case of CKE_ODT_BYTE_MAP, only the byte // lane needs to be specified in order to determine which byte // lane generates the RCLK (Note that CKE, and ODT must be located // in the same bank, thus only one element in CKE_ODT_BYTE_MAP) // [7:4] = bank # (0-4) // [3:0] = byte lane # (0-3) // - All other MAP parameters: These consist of 12-bit elements. Each // element indicates the bank, byte lane, and bit lane location of // that particular signal: // [11:8] = bank # (0-4) // [7:4] = byte lane # (0-3) // [3:0] = bit lane # (0-11) // Note that not all elements in all parameters will be used - it // depends on the actual widths of the DDR3 buses. The parameters are // structured to support a maximum of: // - DQS groups: 18 // - data mask bits: 18 // In addition, the default parameter size of some of the parameters will // support a certain number of bits, however, this can be expanded at // compile time by expanding the width of the vector passed into this // parameter // - chip selects: 10 // - bank bits: 3 // - address bits: 16 parameter CK_BYTE_MAP = 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00, parameter ADDR_MAP = 192'h000_000_000_000_000_000_000_000_000_000_000_000_000_000_000_000, parameter BANK_MAP = 36'h000_000_000, parameter CAS_MAP = 12'h000, parameter CKE_ODT_BYTE_MAP = 8'h00, parameter CKE_MAP = 96'h000_000_000_000_000_000_000_000, parameter ODT_MAP = 96'h000_000_000_000_000_000_000_000, parameter CKE_ODT_AUX = "FALSE", parameter CS_MAP = 120'h000_000_000_000_000_000_000_000_000_000, parameter PARITY_MAP = 12'h000, parameter RAS_MAP = 12'h000, parameter WE_MAP = 12'h000, parameter DQS_BYTE_MAP = 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00, // DATAx_MAP parameter is used for byte lane X in the design parameter DATA0_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA1_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA2_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA3_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA4_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA5_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA6_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA7_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA8_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA9_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA10_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA11_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA12_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA13_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA14_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA15_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA16_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA17_MAP = 96'h000_000_000_000_000_000_000_000, // MASK0_MAP used for bytes [8:0], MASK1_MAP for bytes [17:9] parameter MASK0_MAP = 108'h000_000_000_000_000_000_000_000_000, parameter MASK1_MAP = 108'h000_000_000_000_000_000_000_000_000, // Simulation options parameter SIM_CAL_OPTION = "NONE", // The PHY_CONTROL primitive in the bank where PLL exists is declared // as the Master PHY_CONTROL. parameter MASTER_PHY_CTL = 1 ) ( input rst, input clk, input freq_refclk, input mem_refclk, input pll_lock, input sync_pulse, input idelayctrl_refclk, input phy_cmd_wr_en, input phy_data_wr_en, input [31:0] phy_ctl_wd, input phy_ctl_wr, input phy_if_empty_def, input phy_if_reset, input [5:0] data_offset_1, input [5:0] data_offset_2, input [3:0] aux_in_1, input [3:0] aux_in_2, output [4:0] idelaye2_init_val, output [5:0] oclkdelay_init_val, output if_empty, output phy_ctl_full, output phy_cmd_full, output phy_data_full, output phy_pre_data_a_full, output [(CK_WIDTH * LP_DDR_CK_WIDTH)-1:0] ddr_clk, output phy_mc_go, input phy_write_calib, input phy_read_calib, input calib_in_common, input [5:0] calib_sel, input [HIGHEST_BANK-1:0] calib_zero_inputs, input [HIGHEST_BANK-1:0] calib_zero_ctrl, input [2:0] po_fine_enable, input [2:0] po_coarse_enable, input [2:0] po_fine_inc, input [2:0] po_coarse_inc, input po_counter_load_en, input po_counter_read_en, input [2:0] po_sel_fine_oclk_delay, input [8:0] po_counter_load_val, output [8:0] po_counter_read_val, output [5:0] pi_counter_read_val, input [HIGHEST_BANK-1:0] pi_rst_dqs_find, input pi_fine_enable, input pi_fine_inc, input pi_counter_load_en, input [5:0] pi_counter_load_val, input idelay_ce, input idelay_inc, input idelay_ld, input idle, output pi_phase_locked, output pi_phase_locked_all, output pi_dqs_found, output pi_dqs_found_all, output pi_dqs_out_of_range, // From/to calibration logic/soft PHY input phy_init_data_sel, input [nCK_PER_CLK*ROW_WIDTH-1:0] mux_address, input [nCK_PER_CLK*BANK_WIDTH-1:0] mux_bank, input [nCK_PER_CLK-1:0] mux_cas_n, input [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mux_cs_n, input [nCK_PER_CLK-1:0] mux_ras_n, input [1:0] mux_odt, input [nCK_PER_CLK-1:0] mux_cke, input [nCK_PER_CLK-1:0] mux_we_n, input [nCK_PER_CLK-1:0] parity_in, input [2*nCK_PER_CLK*DQ_WIDTH-1:0] mux_wrdata, input [2*nCK_PER_CLK*(DQ_WIDTH/8)-1:0] mux_wrdata_mask, input mux_reset_n, output [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data, // Memory I/F output [ROW_WIDTH-1:0] ddr_addr, output [BANK_WIDTH-1:0] ddr_ba, output ddr_cas_n, output [CKE_WIDTH-1:0] ddr_cke, output [CS_WIDTH*nCS_PER_RANK-1:0] ddr_cs_n, output [DM_WIDTH-1:0] ddr_dm, output [ODT_WIDTH-1:0] ddr_odt, output ddr_parity, output ddr_ras_n, output ddr_we_n, output ddr_reset_n, inout [DQ_WIDTH-1:0] ddr_dq, inout [DQS_WIDTH-1:0] ddr_dqs, inout [DQS_WIDTH-1:0] ddr_dqs_n ,input dbg_pi_counter_read_en ,output ref_dll_lock ,input rst_phaser_ref ,output [11:0] dbg_pi_phase_locked_phy4lanes ,output [11:0] dbg_pi_dqs_found_lanes_phy4lanes ); function [71:0] generate_bytelanes_ddr_ck; input [143:0] ck_byte_map; integer v ; begin generate_bytelanes_ddr_ck = 'b0 ; for (v = 0; v < CK_WIDTH; v = v + 1) begin if ((CK_BYTE_MAP[((v*8)+4)+:4]) == 2) generate_bytelanes_ddr_ck[48+(4*v)+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; else if ((CK_BYTE_MAP[((v*8)+4)+:4]) == 1) generate_bytelanes_ddr_ck[24+(4*v)+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; else generate_bytelanes_ddr_ck[4*v+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; end end endfunction function [(2*CK_WIDTH*8)-1:0] generate_ddr_ck_map; input [143:0] ck_byte_map; integer g; begin generate_ddr_ck_map = 'b0 ; for(g = 0 ; g < CK_WIDTH ; g= g + 1) begin generate_ddr_ck_map[(g*2*8)+:8] = (ck_byte_map[(g*8)+:4] == 4'd0) ? "A" : (ck_byte_map[(g*8)+:4] == 4'd1) ? "B" : (ck_byte_map[(g*8)+:4] == 4'd2) ? "C" : "D" ; generate_ddr_ck_map[(((g*2)+1)*8)+:8] = (ck_byte_map[((g*8)+4)+:4] == 4'd0) ? "0" : (ck_byte_map[((g*8)+4)+:4] == 4'd1) ? "1" : "2" ; //each STRING charater takes 0 location end end endfunction // Enable low power mode for input buffer localparam IBUF_LOW_PWR = (IBUF_LPWR_MODE == "OFF") ? "FALSE" : ((IBUF_LPWR_MODE == "ON") ? "TRUE" : "ILLEGAL"); // Ratio of data to strobe localparam DQ_PER_DQS = DQ_WIDTH / DQS_WIDTH; // number of data phases per internal clock localparam PHASE_PER_CLK = 2*nCK_PER_CLK; // used to determine routing to OUT_FIFO for control/address for 2:1 // vs. 4:1 memory:internal clock ratio modes localparam PHASE_DIV = 4 / nCK_PER_CLK; localparam CLK_PERIOD = tCK * nCK_PER_CLK; // Create an aggregate parameters for data mapping to reduce # of generate // statements required in remapping code. Need to account for the case // when the DQ:DQS ratio is not 8:1 - in this case, each DATAx_MAP // parameter will have fewer than 8 elements used localparam FULL_DATA_MAP = {DATA17_MAP[12*DQ_PER_DQS-1:0], DATA16_MAP[12*DQ_PER_DQS-1:0], DATA15_MAP[12*DQ_PER_DQS-1:0], DATA14_MAP[12*DQ_PER_DQS-1:0], DATA13_MAP[12*DQ_PER_DQS-1:0], DATA12_MAP[12*DQ_PER_DQS-1:0], DATA11_MAP[12*DQ_PER_DQS-1:0], DATA10_MAP[12*DQ_PER_DQS-1:0], DATA9_MAP[12*DQ_PER_DQS-1:0], DATA8_MAP[12*DQ_PER_DQS-1:0], DATA7_MAP[12*DQ_PER_DQS-1:0], DATA6_MAP[12*DQ_PER_DQS-1:0], DATA5_MAP[12*DQ_PER_DQS-1:0], DATA4_MAP[12*DQ_PER_DQS-1:0], DATA3_MAP[12*DQ_PER_DQS-1:0], DATA2_MAP[12*DQ_PER_DQS-1:0], DATA1_MAP[12*DQ_PER_DQS-1:0], DATA0_MAP[12*DQ_PER_DQS-1:0]}; // Same deal, but for data mask mapping localparam FULL_MASK_MAP = {MASK1_MAP, MASK0_MAP}; localparam TMP_BYTELANES_DDR_CK = generate_bytelanes_ddr_ck(CK_BYTE_MAP) ; localparam TMP_GENERATE_DDR_CK_MAP = generate_ddr_ck_map(CK_BYTE_MAP) ; // Temporary parameters to determine which bank is outputting the CK/CK# // Eventually there will be support for multiple CK/CK# output //localparam TMP_DDR_CLK_SELECT_BANK = (CK_BYTE_MAP[7:4]); //// Temporary method to force MC_PHY to generate ODDR associated with //// CK/CK# output only for a single byte lane in the design. All banks //// that won't be generating the CK/CK# will have "UNUSED" as their //// PHY_GENERATE_DDR_CK parameter //localparam TMP_PHY_0_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 0) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); //localparam TMP_PHY_1_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 1) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); //localparam TMP_PHY_2_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 2) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); // Function to generate MC_PHY parameters PHY_BITLANES_OUTONLYx // which indicates which bit lanes in data byte lanes are // output-only bitlanes (e.g. used specifically for data mask outputs) function [143:0] calc_phy_bitlanes_outonly; input [215:0] data_mask_in; integer z; begin calc_phy_bitlanes_outonly = 'b0; // Only enable BITLANES parameters for data masks if, well, if // the data masks are actually enabled if (USE_DM_PORT == 1) for (z = 0; z < DM_WIDTH; z = z + 1) calc_phy_bitlanes_outonly[48*data_mask_in[(12*z+8)+:3] + 12*data_mask_in[(12*z+4)+:2] + data_mask_in[12*z+:4]] = 1'b1; end endfunction localparam PHY_BITLANES_OUTONLY = calc_phy_bitlanes_outonly(FULL_MASK_MAP); localparam PHY_0_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[47:0]; localparam PHY_1_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[95:48]; localparam PHY_2_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[143:96]; // Determine which bank and byte lane generates the RCLK used to clock // out the auxilliary (ODT, CKE) outputs localparam CKE_ODT_RCLK_SELECT_BANK_AUX_ON = (CKE_ODT_BYTE_MAP[7:4] == 4'h0) ? 0 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h1) ? 1 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h2) ? 2 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h3) ? 3 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h4) ? 4 : -1)))); localparam CKE_ODT_RCLK_SELECT_LANE_AUX_ON = (CKE_ODT_BYTE_MAP[3:0] == 4'h0) ? "A" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h1) ? "B" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h2) ? "C" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h3) ? "D" : "ILLEGAL"))); localparam CKE_ODT_RCLK_SELECT_BANK_AUX_OFF = (CKE_MAP[11:8] == 4'h0) ? 0 : ((CKE_MAP[11:8] == 4'h1) ? 1 : ((CKE_MAP[11:8] == 4'h2) ? 2 : ((CKE_MAP[11:8] == 4'h3) ? 3 : ((CKE_MAP[11:8] == 4'h4) ? 4 : -1)))); localparam CKE_ODT_RCLK_SELECT_LANE_AUX_OFF = (CKE_MAP[7:4] == 4'h0) ? "A" : ((CKE_MAP[7:4] == 4'h1) ? "B" : ((CKE_MAP[7:4] == 4'h2) ? "C" : ((CKE_MAP[7:4] == 4'h3) ? "D" : "ILLEGAL"))); localparam CKE_ODT_RCLK_SELECT_BANK = (CKE_ODT_AUX == "TRUE") ? CKE_ODT_RCLK_SELECT_BANK_AUX_ON : CKE_ODT_RCLK_SELECT_BANK_AUX_OFF ; localparam CKE_ODT_RCLK_SELECT_LANE = (CKE_ODT_AUX == "TRUE") ? CKE_ODT_RCLK_SELECT_LANE_AUX_ON : CKE_ODT_RCLK_SELECT_LANE_AUX_OFF ; //*************************************************************************** // OCLKDELAYED tap setting calculation: // Parameters for calculating amount of phase shifting output clock to // achieve 90 degree offset between DQS and DQ on writes //*************************************************************************** //90 deg equivalent to 0.25 for MEM_RefClk <= 300 MHz // and 1.25 for Mem_RefClk > 300 MHz localparam PO_OCLKDELAY_INV = (((SIM_CAL_OPTION == "NONE") && (tCK > 2500)) || (tCK >= 3333)) ? "FALSE" : "TRUE"; //DIV1: MemRefClk >= 400 MHz, DIV2: 200 <= MemRefClk < 400, //DIV4: MemRefClk < 200 MHz localparam PHY_0_A_PI_FREQ_REF_DIV = tCK > 5000 ? "DIV4" : tCK > 2500 ? "DIV2": "NONE"; localparam FREQ_REF_DIV = (PHY_0_A_PI_FREQ_REF_DIV == "DIV4" ? 4 : PHY_0_A_PI_FREQ_REF_DIV == "DIV2" ? 2 : 1); // Intrinsic delay between OCLK and OCLK_DELAYED Phaser Output localparam real INT_DELAY = 0.4392/FREQ_REF_DIV + 100.0/tCK; // Whether OCLK_DELAY output comes inverted or not localparam real HALF_CYCLE_DELAY = 0.5*(PO_OCLKDELAY_INV == "TRUE" ? 1 : 0); // Phaser-Out Stage3 Tap delay for 90 deg shift. // Maximum tap delay is FreqRefClk period distributed over 64 taps // localparam real TAP_DELAY = MC_OCLK_DELAY/64/FREQ_REF_DIV; localparam real MC_OCLK_DELAY = ((PO_OCLKDELAY_INV == "TRUE" ? 1.25 : 0.25) - (INT_DELAY + HALF_CYCLE_DELAY)) * 63 * FREQ_REF_DIV; //localparam integer PHY_0_A_PO_OCLK_DELAY = MC_OCLK_DELAY; localparam integer PHY_0_A_PO_OCLK_DELAY_HW = (tCK > 2273) ? 34 : (tCK > 2000) ? 33 : (tCK > 1724) ? 32 : (tCK > 1515) ? 31 : (tCK > 1315) ? 30 : (tCK > 1136) ? 29 : (tCK > 1021) ? 28 : 27; // Note that simulation requires a different value than in H/W because of the // difference in the way delays are modeled localparam integer PHY_0_A_PO_OCLK_DELAY = (SIM_CAL_OPTION == "NONE") ? ((tCK > 2500) ? 8 : (DRAM_TYPE == "DDR3") ? PHY_0_A_PO_OCLK_DELAY_HW : 30) : MC_OCLK_DELAY; // Initial DQ IDELAY value localparam PHY_0_A_IDELAYE2_IDELAY_VALUE = (SIM_CAL_OPTION != "FAST_CAL") ? 0 : (tCK < 1000) ? 0 : (tCK < 1330) ? 0 : (tCK < 2300) ? 0 : (tCK < 2500) ? 2 : 0; //localparam PHY_0_A_IDELAYE2_IDELAY_VALUE = 0; // Aux_out parameters RD_CMD_OFFSET = CL+2? and WR_CMD_OFFSET = CWL+3? localparam PHY_0_RD_CMD_OFFSET_0 = 10; localparam PHY_0_RD_CMD_OFFSET_1 = 10; localparam PHY_0_RD_CMD_OFFSET_2 = 10; localparam PHY_0_RD_CMD_OFFSET_3 = 10; // 4:1 and 2:1 have WR_CMD_OFFSET values for ODT timing localparam PHY_0_WR_CMD_OFFSET_0 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_1 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_2 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_3 = (nCK_PER_CLK == 4) ? 8 : 4; // 4:1 and 2:1 have different values localparam PHY_0_WR_DURATION_0 = 7; localparam PHY_0_WR_DURATION_1 = 7; localparam PHY_0_WR_DURATION_2 = 7; localparam PHY_0_WR_DURATION_3 = 7; // Aux_out parameters for toggle mode (CKE) localparam CWL_M = (REG_CTRL == "ON") ? CWL + 1 : CWL; localparam PHY_0_CMD_OFFSET = (nCK_PER_CLK == 4) ? (CWL_M % 2) ? 8 : 9 : (CWL < 7) ? 4 + ((CWL_M % 2) ? 0 : 1) : 5 + ((CWL_M % 2) ? 0 : 1); // temporary parameter to enable/disable PHY PC counters. In both 4:1 and // 2:1 cases, this should be disabled. For now, enable for 4:1 mode to // avoid making too many changes at once. localparam PHY_COUNT_EN = (nCK_PER_CLK == 4) ? "TRUE" : "FALSE"; wire [((HIGHEST_LANE+3)/4)*4-1:0] aux_out; wire [HIGHEST_LANE-1:0] mem_dqs_in; wire [HIGHEST_LANE-1:0] mem_dqs_out; wire [HIGHEST_LANE-1:0] mem_dqs_ts; wire [HIGHEST_LANE*10-1:0] mem_dq_in; wire [HIGHEST_LANE*12-1:0] mem_dq_out; wire [HIGHEST_LANE*12-1:0] mem_dq_ts; wire [DQ_WIDTH-1:0] in_dq; wire [DQS_WIDTH-1:0] in_dqs; wire [ROW_WIDTH-1:0] out_addr; wire [BANK_WIDTH-1:0] out_ba; wire out_cas_n; wire [CS_WIDTH*nCS_PER_RANK-1:0] out_cs_n; wire [DM_WIDTH-1:0] out_dm; wire [ODT_WIDTH -1:0] out_odt; wire [CKE_WIDTH -1 :0] out_cke ; wire [DQ_WIDTH-1:0] out_dq; wire [DQS_WIDTH-1:0] out_dqs; wire out_parity; wire out_ras_n; wire out_we_n; wire [HIGHEST_LANE*80-1:0] phy_din; wire [HIGHEST_LANE*80-1:0] phy_dout; wire phy_rd_en; wire [DM_WIDTH-1:0] ts_dm; wire [DQ_WIDTH-1:0] ts_dq; wire [DQS_WIDTH-1:0] ts_dqs; reg [31:0] phy_ctl_wd_i1; reg [31:0] phy_ctl_wd_i2; reg phy_ctl_wr_i1; reg phy_ctl_wr_i2; reg [5:0] data_offset_1_i1; reg [5:0] data_offset_1_i2; reg [5:0] data_offset_2_i1; reg [5:0] data_offset_2_i2; wire [31:0] phy_ctl_wd_temp; wire phy_ctl_wr_temp; wire [5:0] data_offset_1_temp; wire [5:0] data_offset_2_temp; wire [5:0] data_offset_1_of; wire [5:0] data_offset_2_of; wire [31:0] phy_ctl_wd_of; (* keep = "true", max_fanout = 3 *) wire phy_ctl_wr_of /* synthesis syn_maxfan = 1 */; wire [3:0] phy_ctl_full_temp; wire data_io_idle_pwrdwn; // Always read from input data FIFOs when not empty assign phy_rd_en = !if_empty; // IDELAYE2 initial value assign idelaye2_init_val = PHY_0_A_IDELAYE2_IDELAY_VALUE; assign oclkdelay_init_val = PHY_0_A_PO_OCLK_DELAY; // Idle powerdown when there are no pending reads in the MC assign data_io_idle_pwrdwn = DATA_IO_IDLE_PWRDWN == "ON" ? idle : 1'b0; //*************************************************************************** // Auxiliary output steering //*************************************************************************** // For a 4 rank I/F the aux_out[3:0] from the addr/ctl bank will be // mapped to ddr_odt and the aux_out[7:4] from one of the data banks // will map to ddr_cke. For I/Fs less than 4 the aux_out[3:0] from the // addr/ctl bank would bank would map to both ddr_odt and ddr_cke. generate if(CKE_ODT_AUX == "TRUE")begin:cke_thru_auxpins if (CKE_WIDTH == 1) begin : gen_cke // Explicitly instantiate OBUF to ensure that these are present // in the netlist. Typically this is not required since NGDBUILD // at the top-level knows to infer an I/O/IOBUF and therefore a // top-level LOC constraint can be attached to that pin. This does // not work when a hierarchical flow is used and the LOC is applied // at the individual core-level UCF OBUF u_cke_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK]), .O (ddr_cke) ); end else begin: gen_2rank_cke OBUF u_cke0_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK]), .O (ddr_cke[0]) ); OBUF u_cke1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_cke[1]) ); end end endgenerate generate if(CKE_ODT_AUX == "TRUE")begin:odt_thru_auxpins if (USE_ODT_PORT == 1) begin : gen_use_odt // Explicitly instantiate OBUF to ensure that these are present // in the netlist. Typically this is not required since NGDBUILD // at the top-level knows to infer an I/O/IOBUF and therefore a // top-level LOC constraint can be attached to that pin. This does // not work when a hierarchical flow is used and the LOC is applied // at the individual core-level UCF OBUF u_odt_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+1]), .O (ddr_odt[0]) ); if (ODT_WIDTH == 2 && RANKS == 1) begin: gen_2port_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_odt[1]) ); end else if (ODT_WIDTH == 2 && RANKS == 2) begin: gen_2rank_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+3]), .O (ddr_odt[1]) ); end else if (ODT_WIDTH == 3 && RANKS == 1) begin: gen_3port_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_odt[1]) ); OBUF u_odt2_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+3]), .O (ddr_odt[2]) ); end end else begin assign ddr_odt = 'b0; end end endgenerate //*************************************************************************** // Read data bit steering //*************************************************************************** // Transpose elements of rd_data_map to form final read data output: // phy_din elements are grouped according to "physical bit" - e.g. // for nCK_PER_CLK = 4, there are 8 data phases transfered per physical // bit per clock cycle: // = {dq0_fall3, dq0_rise3, dq0_fall2, dq0_rise2, // dq0_fall1, dq0_rise1, dq0_fall0, dq0_rise0} // whereas rd_data is are grouped according to "phase" - e.g. // = {dq7_rise0, dq6_rise0, dq5_rise0, dq4_rise0, // dq3_rise0, dq2_rise0, dq1_rise0, dq0_rise0} // therefore rd_data is formed by transposing phy_din - e.g. // for nCK_PER_CLK = 4, and DQ_WIDTH = 16, and assuming MC_PHY // bit_lane[0] maps to DQ[0], and bit_lane[1] maps to DQ[1], then // the assignments for bits of rd_data corresponding to DQ[1:0] // would be: // {rd_data[112], rd_data[96], rd_data[80], rd_data[64], // rd_data[48], rd_data[32], rd_data[16], rd_data[0]} = phy_din[7:0] // {rd_data[113], rd_data[97], rd_data[81], rd_data[65], // rd_data[49], rd_data[33], rd_data[17], rd_data[1]} = phy_din[15:8] generate genvar i, j; for (i = 0; i < DQ_WIDTH; i = i + 1) begin: gen_loop_rd_data_1 for (j = 0; j < PHASE_PER_CLK; j = j + 1) begin: gen_loop_rd_data_2 assign rd_data[DQ_WIDTH*j + i] = phy_din[(320*FULL_DATA_MAP[(12*i+8)+:3]+ 80*FULL_DATA_MAP[(12*i+4)+:2] + 8*FULL_DATA_MAP[12*i+:4]) + j]; end end endgenerate //*************************************************************************** // Control/address //*************************************************************************** assign out_cas_n = mem_dq_out[48*CAS_MAP[10:8] + 12*CAS_MAP[5:4] + CAS_MAP[3:0]]; generate // if signal placed on bit lanes [0-9] if (CAS_MAP[3:0] < 4'hA) begin: gen_cas_lt10 // Determine routing based on clock ratio mode. If running in 4:1 // mode, then all four bits from logic are used. If 2:1 mode, only // 2-bits are provided by logic, and each bit is repeated 2x to form // 4-bit input to IN_FIFO, e.g. // 4:1 mode: phy_dout[] = {in[3], in[2], in[1], in[0]} // 2:1 mode: phy_dout[] = {in[1], in[1], in[0], in[0]} assign phy_dout[(320*CAS_MAP[10:8] + 80*CAS_MAP[5:4] + 8*CAS_MAP[3:0])+:4] = {mux_cas_n[3/PHASE_DIV], mux_cas_n[2/PHASE_DIV], mux_cas_n[1/PHASE_DIV], mux_cas_n[0]}; end else begin: gen_cas_ge10 // If signal is placed in bit lane [10] or [11], route to upper // nibble of phy_dout lane [5] or [6] respectively (in this case // phy_dout lane [5, 6] are multiplexed to take input for two // different SDR signals - this is how bits[10,11] need to be // provided to the OUT_FIFO assign phy_dout[(320*CAS_MAP[10:8] + 80*CAS_MAP[5:4] + 8*(CAS_MAP[3:0]-5) + 4)+:4] = {mux_cas_n[3/PHASE_DIV], mux_cas_n[2/PHASE_DIV], mux_cas_n[1/PHASE_DIV], mux_cas_n[0]}; end endgenerate assign out_ras_n = mem_dq_out[48*RAS_MAP[10:8] + 12*RAS_MAP[5:4] + RAS_MAP[3:0]]; generate if (RAS_MAP[3:0] < 4'hA) begin: gen_ras_lt10 assign phy_dout[(320*RAS_MAP[10:8] + 80*RAS_MAP[5:4] + 8*RAS_MAP[3:0])+:4] = {mux_ras_n[3/PHASE_DIV], mux_ras_n[2/PHASE_DIV], mux_ras_n[1/PHASE_DIV], mux_ras_n[0]}; end else begin: gen_ras_ge10 assign phy_dout[(320*RAS_MAP[10:8] + 80*RAS_MAP[5:4] + 8*(RAS_MAP[3:0]-5) + 4)+:4] = {mux_ras_n[3/PHASE_DIV], mux_ras_n[2/PHASE_DIV], mux_ras_n[1/PHASE_DIV], mux_ras_n[0]}; end endgenerate assign out_we_n = mem_dq_out[48*WE_MAP[10:8] + 12*WE_MAP[5:4] + WE_MAP[3:0]]; generate if (WE_MAP[3:0] < 4'hA) begin: gen_we_lt10 assign phy_dout[(320*WE_MAP[10:8] + 80*WE_MAP[5:4] + 8*WE_MAP[3:0])+:4] = {mux_we_n[3/PHASE_DIV], mux_we_n[2/PHASE_DIV], mux_we_n[1/PHASE_DIV], mux_we_n[0]}; end else begin: gen_we_ge10 assign phy_dout[(320*WE_MAP[10:8] + 80*WE_MAP[5:4] + 8*(WE_MAP[3:0]-5) + 4)+:4] = {mux_we_n[3/PHASE_DIV], mux_we_n[2/PHASE_DIV], mux_we_n[1/PHASE_DIV], mux_we_n[0]}; end endgenerate generate if (REG_CTRL == "ON") begin: gen_parity_out // Generate addr/ctrl parity output only for DDR3 and DDR2 registered DIMMs assign out_parity = mem_dq_out[48*PARITY_MAP[10:8] + 12*PARITY_MAP[5:4] + PARITY_MAP[3:0]]; if (PARITY_MAP[3:0] < 4'hA) begin: gen_lt10 assign phy_dout[(320*PARITY_MAP[10:8] + 80*PARITY_MAP[5:4] + 8*PARITY_MAP[3:0])+:4] = {parity_in[3/PHASE_DIV], parity_in[2/PHASE_DIV], parity_in[1/PHASE_DIV], parity_in[0]}; end else begin: gen_ge10 assign phy_dout[(320*PARITY_MAP[10:8] + 80*PARITY_MAP[5:4] + 8*(PARITY_MAP[3:0]-5) + 4)+:4] = {parity_in[3/PHASE_DIV], parity_in[2/PHASE_DIV], parity_in[1/PHASE_DIV], parity_in[0]}; end end endgenerate //***************************************************************** generate genvar m, n,x; //***************************************************************** // Control/address (multi-bit) buses //***************************************************************** // Row/Column address for (m = 0; m < ROW_WIDTH; m = m + 1) begin: gen_addr_out assign out_addr[m] = mem_dq_out[48*ADDR_MAP[(12*m+8)+:3] + 12*ADDR_MAP[(12*m+4)+:2] + ADDR_MAP[12*m+:4]]; if (ADDR_MAP[12*m+:4] < 4'hA) begin: gen_lt10 // For multi-bit buses, we also have to deal with transposition // when going from the logic-side control bus to phy_dout for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ADDR_MAP[(12*m+8)+:3] + 80*ADDR_MAP[(12*m+4)+:2] + 8*ADDR_MAP[12*m+:4] + n] = mux_address[ROW_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ADDR_MAP[(12*m+8)+:3] + 80*ADDR_MAP[(12*m+4)+:2] + 8*(ADDR_MAP[12*m+:4]-5) + 4 + n] = mux_address[ROW_WIDTH*(n/PHASE_DIV) + m]; end end end // Bank address for (m = 0; m < BANK_WIDTH; m = m + 1) begin: gen_ba_out assign out_ba[m] = mem_dq_out[48*BANK_MAP[(12*m+8)+:3] + 12*BANK_MAP[(12*m+4)+:2] + BANK_MAP[12*m+:4]]; if (BANK_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*BANK_MAP[(12*m+8)+:3] + 80*BANK_MAP[(12*m+4)+:2] + 8*BANK_MAP[12*m+:4] + n] = mux_bank[BANK_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*BANK_MAP[(12*m+8)+:3] + 80*BANK_MAP[(12*m+4)+:2] + 8*(BANK_MAP[12*m+:4]-5) + 4 + n] = mux_bank[BANK_WIDTH*(n/PHASE_DIV) + m]; end end end // Chip select if (USE_CS_PORT == 1) begin: gen_cs_n_out for (m = 0; m < CS_WIDTH*nCS_PER_RANK; m = m + 1) begin: gen_cs_out assign out_cs_n[m] = mem_dq_out[48*CS_MAP[(12*m+8)+:3] + 12*CS_MAP[(12*m+4)+:2] + CS_MAP[12*m+:4]]; if (CS_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CS_MAP[(12*m+8)+:3] + 80*CS_MAP[(12*m+4)+:2] + 8*CS_MAP[12*m+:4] + n] = mux_cs_n[CS_WIDTH*nCS_PER_RANK*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CS_MAP[(12*m+8)+:3] + 80*CS_MAP[(12*m+4)+:2] + 8*(CS_MAP[12*m+:4]-5) + 4 + n] = mux_cs_n[CS_WIDTH*nCS_PER_RANK*(n/PHASE_DIV) + m]; end end end end if(CKE_ODT_AUX == "FALSE") begin // ODT_ports wire [ODT_WIDTH*nCK_PER_CLK -1 :0] mux_odt_remap ; if(RANKS == 1) begin for(x =0 ; x < nCK_PER_CLK ; x = x+1) begin assign mux_odt_remap[(x*ODT_WIDTH)+:ODT_WIDTH] = {ODT_WIDTH{mux_odt[0]}} ; end end else begin for(x =0 ; x < 2*nCK_PER_CLK ; x = x+2) begin assign mux_odt_remap[(x*ODT_WIDTH/RANKS)+:ODT_WIDTH/RANKS] = {ODT_WIDTH/RANKS{mux_odt[0]}} ; assign mux_odt_remap[((x*ODT_WIDTH/RANKS)+(ODT_WIDTH/RANKS))+:ODT_WIDTH/RANKS] = {ODT_WIDTH/RANKS{mux_odt[1]}} ; end end if (USE_ODT_PORT == 1) begin: gen_odt_out for (m = 0; m < ODT_WIDTH; m = m + 1) begin: gen_odt_out_1 assign out_odt[m] = mem_dq_out[48*ODT_MAP[(12*m+8)+:3] + 12*ODT_MAP[(12*m+4)+:2] + ODT_MAP[12*m+:4]]; if (ODT_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ODT_MAP[(12*m+8)+:3] + 80*ODT_MAP[(12*m+4)+:2] + 8*ODT_MAP[12*m+:4] + n] = mux_odt_remap[ODT_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ODT_MAP[(12*m+8)+:3] + 80*ODT_MAP[(12*m+4)+:2] + 8*(ODT_MAP[12*m+:4]-5) + 4 + n] = mux_odt_remap[ODT_WIDTH*(n/PHASE_DIV) + m]; end end end end wire [CKE_WIDTH*nCK_PER_CLK -1:0] mux_cke_remap ; for(x = 0 ; x < nCK_PER_CLK ; x = x +1) begin assign mux_cke_remap[(x*CKE_WIDTH)+:CKE_WIDTH] = {CKE_WIDTH{mux_cke[x]}} ; end for (m = 0; m < CKE_WIDTH; m = m + 1) begin: gen_cke_out assign out_cke[m] = mem_dq_out[48*CKE_MAP[(12*m+8)+:3] + 12*CKE_MAP[(12*m+4)+:2] + CKE_MAP[12*m+:4]]; if (CKE_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CKE_MAP[(12*m+8)+:3] + 80*CKE_MAP[(12*m+4)+:2] + 8*CKE_MAP[12*m+:4] + n] = mux_cke_remap[CKE_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CKE_MAP[(12*m+8)+:3] + 80*CKE_MAP[(12*m+4)+:2] + 8*(CKE_MAP[12*m+:4]-5) + 4 + n] = mux_cke_remap[CKE_WIDTH*(n/PHASE_DIV) + m]; end end end end //***************************************************************** // Data mask //***************************************************************** if (USE_DM_PORT == 1) begin: gen_dm_out for (m = 0; m < DM_WIDTH; m = m + 1) begin: gen_dm_out assign out_dm[m] = mem_dq_out[48*FULL_MASK_MAP[(12*m+8)+:3] + 12*FULL_MASK_MAP[(12*m+4)+:2] + FULL_MASK_MAP[12*m+:4]]; assign ts_dm[m] = mem_dq_ts[48*FULL_MASK_MAP[(12*m+8)+:3] + 12*FULL_MASK_MAP[(12*m+4)+:2] + FULL_MASK_MAP[12*m+:4]]; for (n = 0; n < PHASE_PER_CLK; n = n + 1) begin: loop_xpose assign phy_dout[320*FULL_MASK_MAP[(12*m+8)+:3] + 80*FULL_MASK_MAP[(12*m+4)+:2] + 8*FULL_MASK_MAP[12*m+:4] + n] = mux_wrdata_mask[DM_WIDTH*n + m]; end end end //***************************************************************** // Input and output DQ //***************************************************************** for (m = 0; m < DQ_WIDTH; m = m + 1) begin: gen_dq_inout // to MC_PHY assign mem_dq_in[40*FULL_DATA_MAP[(12*m+8)+:3] + 10*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]] = in_dq[m]; // to I/O buffers assign out_dq[m] = mem_dq_out[48*FULL_DATA_MAP[(12*m+8)+:3] + 12*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]]; assign ts_dq[m] = mem_dq_ts[48*FULL_DATA_MAP[(12*m+8)+:3] + 12*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]]; for (n = 0; n < PHASE_PER_CLK; n = n + 1) begin: loop_xpose assign phy_dout[320*FULL_DATA_MAP[(12*m+8)+:3] + 80*FULL_DATA_MAP[(12*m+4)+:2] + 8*FULL_DATA_MAP[12*m+:4] + n] = mux_wrdata[DQ_WIDTH*n + m]; end end //***************************************************************** // Input and output DQS //***************************************************************** for (m = 0; m < DQS_WIDTH; m = m + 1) begin: gen_dqs_inout // to MC_PHY assign mem_dqs_in[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]] = in_dqs[m]; // to I/O buffers assign out_dqs[m] = mem_dqs_out[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]]; assign ts_dqs[m] = mem_dqs_ts[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]]; end endgenerate //*************************************************************************** // Memory I/F output and I/O buffer instantiation //*************************************************************************** // Note on instantiation - generally at the minimum, it's not required to // instantiate the output buffers - they can be inferred by the synthesis // tool, and there aren't any attributes that need to be associated with // them. Consider as a future option to take out the OBUF instantiations OBUF u_cas_n_obuf ( .I (out_cas_n), .O (ddr_cas_n) ); OBUF u_ras_n_obuf ( .I (out_ras_n), .O (ddr_ras_n) ); OBUF u_we_n_obuf ( .I (out_we_n), .O (ddr_we_n) ); generate genvar p; for (p = 0; p < ROW_WIDTH; p = p + 1) begin: gen_addr_obuf OBUF u_addr_obuf ( .I (out_addr[p]), .O (ddr_addr[p]) ); end for (p = 0; p < BANK_WIDTH; p = p + 1) begin: gen_bank_obuf OBUF u_bank_obuf ( .I (out_ba[p]), .O (ddr_ba[p]) ); end if (USE_CS_PORT == 1) begin: gen_cs_n_obuf for (p = 0; p < CS_WIDTH*nCS_PER_RANK; p = p + 1) begin: gen_cs_obuf OBUF u_cs_n_obuf ( .I (out_cs_n[p]), .O (ddr_cs_n[p]) ); end end if(CKE_ODT_AUX == "FALSE")begin:cke_odt_thru_outfifo if (USE_ODT_PORT== 1) begin: gen_odt_obuf for (p = 0; p < ODT_WIDTH; p = p + 1) begin: gen_odt_obuf OBUF u_cs_n_obuf ( .I (out_odt[p]), .O (ddr_odt[p]) ); end end for (p = 0; p < CKE_WIDTH; p = p + 1) begin: gen_cke_obuf OBUF u_cs_n_obuf ( .I (out_cke[p]), .O (ddr_cke[p]) ); end end if (REG_CTRL == "ON") begin: gen_parity_obuf // Generate addr/ctrl parity output only for DDR3 registered DIMMs OBUF u_parity_obuf ( .I (out_parity), .O (ddr_parity) ); end else begin: gen_parity_tieoff assign ddr_parity = 1'b0; end if ((DRAM_TYPE == "DDR3") || (REG_CTRL == "ON")) begin: gen_reset_obuf // Generate reset output only for DDR3 and DDR2 RDIMMs OBUF u_reset_obuf ( .I (mux_reset_n), .O (ddr_reset_n) ); end else begin: gen_reset_tieoff assign ddr_reset_n = 1'b1; end if (USE_DM_PORT == 1) begin: gen_dm_obuf for (p = 0; p < DM_WIDTH; p = p + 1) begin: loop_dm OBUFT u_dm_obuf ( .I (out_dm[p]), .T (ts_dm[p]), .O (ddr_dm[p]) ); end end else begin: gen_dm_tieoff assign ddr_dm = 'b0; end if (DATA_IO_PRIM_TYPE == "HP_LP") begin: gen_dq_iobuf_HP for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end else if (DATA_IO_PRIM_TYPE == "HR_LP") begin: gen_dq_iobuf_HR for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end else begin: gen_dq_iobuf_default for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end if (DATA_IO_PRIM_TYPE == "HP_LP") begin: gen_dqs_iobuf_HP for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end else if (DATA_IO_PRIM_TYPE == "HR_LP") begin: gen_dqs_iobuf_HR for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end else begin: gen_dqs_iobuf_default for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end endgenerate always @(posedge clk) begin phy_ctl_wd_i1 <= #TCQ phy_ctl_wd; phy_ctl_wr_i1 <= #TCQ phy_ctl_wr; phy_ctl_wd_i2 <= #TCQ phy_ctl_wd_i1; phy_ctl_wr_i2 <= #TCQ phy_ctl_wr_i1; data_offset_1_i1 <= #TCQ data_offset_1; data_offset_1_i2 <= #TCQ data_offset_1_i1; data_offset_2_i1 <= #TCQ data_offset_2; data_offset_2_i2 <= #TCQ data_offset_2_i1; end // 2 cycles of command delay needed for 4;1 mode. 2:1 mode does not need it. // 2:1 mode the command goes through pre fifo assign phy_ctl_wd_temp = (nCK_PER_CLK == 4) ? phy_ctl_wd_i2 : phy_ctl_wd_of; assign phy_ctl_wr_temp = (nCK_PER_CLK == 4) ? phy_ctl_wr_i2 : phy_ctl_wr_of; assign data_offset_1_temp = (nCK_PER_CLK == 4) ? data_offset_1_i2 : data_offset_1_of; assign data_offset_2_temp = (nCK_PER_CLK == 4) ? data_offset_2_i2 : data_offset_2_of; generate begin mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (32) ) phy_ctl_pre_fifo_0 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[1]), .wr_en_in (phy_ctl_wr), .d_in (phy_ctl_wd), .wr_en_out (phy_ctl_wr_of), .d_out (phy_ctl_wd_of) ); mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (6) ) phy_ctl_pre_fifo_1 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[2]), .wr_en_in (phy_ctl_wr), .d_in (data_offset_1), .wr_en_out (), .d_out (data_offset_1_of) ); mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (6) ) phy_ctl_pre_fifo_2 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[3]), .wr_en_in (phy_ctl_wr), .d_in (data_offset_2), .wr_en_out (), .d_out (data_offset_2_of) ); end endgenerate //*************************************************************************** // Hard PHY instantiation //*************************************************************************** assign phy_ctl_full = phy_ctl_full_temp[0]; mig_7series_v1_9_ddr_mc_phy # ( .BYTE_LANES_B0 (BYTE_LANES_B0), .BYTE_LANES_B1 (BYTE_LANES_B1), .BYTE_LANES_B2 (BYTE_LANES_B2), .BYTE_LANES_B3 (BYTE_LANES_B3), .BYTE_LANES_B4 (BYTE_LANES_B4), .DATA_CTL_B0 (DATA_CTL_B0), .DATA_CTL_B1 (DATA_CTL_B1), .DATA_CTL_B2 (DATA_CTL_B2), .DATA_CTL_B3 (DATA_CTL_B3), .DATA_CTL_B4 (DATA_CTL_B4), .PHY_0_BITLANES (PHY_0_BITLANES), .PHY_1_BITLANES (PHY_1_BITLANES), .PHY_2_BITLANES (PHY_2_BITLANES), .PHY_0_BITLANES_OUTONLY (PHY_0_BITLANES_OUTONLY), .PHY_1_BITLANES_OUTONLY (PHY_1_BITLANES_OUTONLY), .PHY_2_BITLANES_OUTONLY (PHY_2_BITLANES_OUTONLY), .RCLK_SELECT_BANK (CKE_ODT_RCLK_SELECT_BANK), .RCLK_SELECT_LANE (CKE_ODT_RCLK_SELECT_LANE), //.CKE_ODT_AUX (CKE_ODT_AUX), .GENERATE_DDR_CK_MAP (TMP_GENERATE_DDR_CK_MAP), .BYTELANES_DDR_CK (TMP_BYTELANES_DDR_CK), .NUM_DDR_CK (CK_WIDTH), .LP_DDR_CK_WIDTH (LP_DDR_CK_WIDTH), .PO_CTL_COARSE_BYPASS ("FALSE"), .PHYCTL_CMD_FIFO ("FALSE"), .PHY_CLK_RATIO (nCK_PER_CLK), .MASTER_PHY_CTL (MASTER_PHY_CTL), .PHY_FOUR_WINDOW_CLOCKS (63), .PHY_EVENTS_DELAY (18), .PHY_COUNT_EN ("FALSE"), //PHY_COUNT_EN .PHY_SYNC_MODE ("FALSE"), .SYNTHESIS ((SIM_CAL_OPTION == "NONE") ? "TRUE" : "FALSE"), .PHY_DISABLE_SEQ_MATCH ("TRUE"), //"TRUE" .PHY_0_GENERATE_IDELAYCTRL ("FALSE"), .PHY_0_A_PI_FREQ_REF_DIV (PHY_0_A_PI_FREQ_REF_DIV), .PHY_0_CMD_OFFSET (PHY_0_CMD_OFFSET), //for CKE .PHY_0_RD_CMD_OFFSET_0 (PHY_0_RD_CMD_OFFSET_0), .PHY_0_RD_CMD_OFFSET_1 (PHY_0_RD_CMD_OFFSET_1), .PHY_0_RD_CMD_OFFSET_2 (PHY_0_RD_CMD_OFFSET_2), .PHY_0_RD_CMD_OFFSET_3 (PHY_0_RD_CMD_OFFSET_3), .PHY_0_RD_DURATION_0 (6), .PHY_0_RD_DURATION_1 (6), .PHY_0_RD_DURATION_2 (6), .PHY_0_RD_DURATION_3 (6), .PHY_0_WR_CMD_OFFSET_0 (PHY_0_WR_CMD_OFFSET_0), .PHY_0_WR_CMD_OFFSET_1 (PHY_0_WR_CMD_OFFSET_1), .PHY_0_WR_CMD_OFFSET_2 (PHY_0_WR_CMD_OFFSET_2), .PHY_0_WR_CMD_OFFSET_3 (PHY_0_WR_CMD_OFFSET_3), .PHY_0_WR_DURATION_0 (PHY_0_WR_DURATION_0), .PHY_0_WR_DURATION_1 (PHY_0_WR_DURATION_1), .PHY_0_WR_DURATION_2 (PHY_0_WR_DURATION_2), .PHY_0_WR_DURATION_3 (PHY_0_WR_DURATION_3), .PHY_0_AO_TOGGLE ((RANKS == 1) ? 1 : 5), .PHY_0_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_A_PO_OCLKDELAY_INV (PO_OCLKDELAY_INV), .PHY_0_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_GENERATE_IDELAYCTRL ("FALSE"), //.PHY_1_GENERATE_DDR_CK (TMP_PHY_1_GENERATE_DDR_CK), //.PHY_1_NUM_DDR_CK (1), .PHY_1_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_GENERATE_IDELAYCTRL ("FALSE"), //.PHY_2_GENERATE_DDR_CK (TMP_PHY_2_GENERATE_DDR_CK), //.PHY_2_NUM_DDR_CK (1), .PHY_2_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .TCK (tCK), .PHY_0_IODELAY_GRP (IODELAY_GRP) ,.PHY_1_IODELAY_GRP (IODELAY_GRP) ,.PHY_2_IODELAY_GRP (IODELAY_GRP) ,.BANK_TYPE (BANK_TYPE) ,.CKE_ODT_AUX (CKE_ODT_AUX) ) u_ddr_mc_phy ( .rst (rst), // Don't use MC_PHY to generate DDR_RESET_N output. Instead // generate this output outside of MC_PHY (and synchronous to CLK) .ddr_rst_in_n (1'b1), .phy_clk (clk), .freq_refclk (freq_refclk), .mem_refclk (mem_refclk), // Remove later - always same connection as phy_clk port .mem_refclk_div4 (clk), .pll_lock (pll_lock), .auxout_clk (), .sync_pulse (sync_pulse), // IDELAYCTRL instantiated outside of mc_phy module .idelayctrl_refclk (), .phy_dout (phy_dout), .phy_cmd_wr_en (phy_cmd_wr_en), .phy_data_wr_en (phy_data_wr_en), .phy_rd_en (phy_rd_en), .phy_ctl_wd (phy_ctl_wd_temp), .phy_ctl_wr (phy_ctl_wr_temp), .if_empty_def (phy_if_empty_def), .if_rst (phy_if_reset), .phyGo ('b1), .aux_in_1 (aux_in_1), .aux_in_2 (aux_in_2), // No support yet for different data offsets for different I/O banks // (possible use in supporting wider range of skew among bytes) .data_offset_1 (data_offset_1_temp), .data_offset_2 (data_offset_2_temp), .cke_in (), .if_a_empty (), .if_empty (if_empty), .if_empty_or (), .if_empty_and (), .of_ctl_a_full (), // .of_data_a_full (phy_data_full), .of_ctl_full (phy_cmd_full), .of_data_full (), .pre_data_a_full (phy_pre_data_a_full), .idelay_ld (idelay_ld), .idelay_ce (idelay_ce), .idelay_inc (idelay_inc), .input_sink (), .phy_din (phy_din), .phy_ctl_a_full (), .phy_ctl_full (phy_ctl_full_temp), .mem_dq_out (mem_dq_out), .mem_dq_ts (mem_dq_ts), .mem_dq_in (mem_dq_in), .mem_dqs_out (mem_dqs_out), .mem_dqs_ts (mem_dqs_ts), .mem_dqs_in (mem_dqs_in), .aux_out (aux_out), .phy_ctl_ready (), .rst_out (), .ddr_clk (ddr_clk), //.rclk (), .mcGo (phy_mc_go), .phy_write_calib (phy_write_calib), .phy_read_calib (phy_read_calib), .calib_sel (calib_sel), .calib_in_common (calib_in_common), .calib_zero_inputs (calib_zero_inputs), .calib_zero_ctrl (calib_zero_ctrl), .calib_zero_lanes ('b0), .po_fine_enable (po_fine_enable), .po_coarse_enable (po_coarse_enable), .po_fine_inc (po_fine_inc), .po_coarse_inc (po_coarse_inc), .po_counter_load_en (po_counter_load_en), .po_sel_fine_oclk_delay (po_sel_fine_oclk_delay), .po_counter_load_val (po_counter_load_val), .po_counter_read_en (po_counter_read_en), .po_coarse_overflow (), .po_fine_overflow (), .po_counter_read_val (po_counter_read_val), .pi_rst_dqs_find (pi_rst_dqs_find), .pi_fine_enable (pi_fine_enable), .pi_fine_inc (pi_fine_inc), .pi_counter_load_en (pi_counter_load_en), .pi_counter_read_en (dbg_pi_counter_read_en), .pi_counter_load_val (pi_counter_load_val), .pi_fine_overflow (), .pi_counter_read_val (pi_counter_read_val), .pi_phase_locked (pi_phase_locked), .pi_phase_locked_all (pi_phase_locked_all), .pi_dqs_found (), .pi_dqs_found_any (pi_dqs_found), .pi_dqs_found_all (pi_dqs_found_all), .pi_dqs_found_lanes (dbg_pi_dqs_found_lanes_phy4lanes), // Currently not being used. May be used in future if periodic // reads become a requirement. This output could be used to signal // a catastrophic failure in read capture and the need for // re-calibration. .pi_dqs_out_of_range (pi_dqs_out_of_range) ,.ref_dll_lock (ref_dll_lock) ,.pi_phase_locked_lanes (dbg_pi_phase_locked_phy4lanes) // ,.rst_phaser_ref (rst_phaser_ref) ); endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ddr_mc_phy_wrapper.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Oct 10 2010 // \___\/\___\ // //Device : 7 Series //Design Name : DDR3 SDRAM //Purpose : Wrapper file that encompasses the MC_PHY module // instantiation and handles the vector remapping between // the MC_PHY ports and the user's DDR3 ports. Vector // remapping affects DDR3 control, address, and DQ/DQS/DM. //Reference : //Revision History : //***************************************************************************** `timescale 1 ps / 1 ps module mig_7series_v1_9_ddr_mc_phy_wrapper # ( parameter TCQ = 100, // Register delay (simulation only) parameter tCK = 2500, // ps parameter BANK_TYPE = "HP_IO", // # = "HP_IO", "HPL_IO", "HR_IO", "HRL_IO" parameter DATA_IO_PRIM_TYPE = "DEFAULT", // # = "HP_LP", "HR_LP", "DEFAULT" parameter DATA_IO_IDLE_PWRDWN = "ON", // "ON" or "OFF" parameter IODELAY_GRP = "IODELAY_MIG", parameter nCK_PER_CLK = 4, // Memory:Logic clock ratio parameter nCS_PER_RANK = 1, // # of unique CS outputs per rank parameter BANK_WIDTH = 3, // # of bank address parameter CKE_WIDTH = 1, // # of clock enable outputs parameter CS_WIDTH = 1, // # of chip select parameter CK_WIDTH = 1, // # of CK parameter CWL = 5, // CAS Write latency parameter DDR2_DQSN_ENABLE = "YES", // Enable differential DQS for DDR2 parameter DM_WIDTH = 8, // # of data mask parameter DQ_WIDTH = 16, // # of data bits parameter DQS_CNT_WIDTH = 3, // ceil(log2(DQS_WIDTH)) parameter DQS_WIDTH = 8, // # of strobe pairs parameter DRAM_TYPE = "DDR3", // DRAM type (DDR2, DDR3) parameter RANKS = 4, // # of ranks parameter ODT_WIDTH = 1, // # of ODT outputs parameter REG_CTRL = "OFF", // "ON" for registered DIMM parameter ROW_WIDTH = 16, // # of row/column address parameter USE_CS_PORT = 1, // Support chip select output parameter USE_DM_PORT = 1, // Support data mask output parameter USE_ODT_PORT = 1, // Support ODT output parameter IBUF_LPWR_MODE = "OFF", // input buffer low power option parameter LP_DDR_CK_WIDTH = 2, // Hard PHY parameters parameter PHYCTL_CMD_FIFO = "FALSE", parameter DATA_CTL_B0 = 4'hc, parameter DATA_CTL_B1 = 4'hf, parameter DATA_CTL_B2 = 4'hf, parameter DATA_CTL_B3 = 4'hf, parameter DATA_CTL_B4 = 4'hf, parameter BYTE_LANES_B0 = 4'b1111, parameter BYTE_LANES_B1 = 4'b0000, parameter BYTE_LANES_B2 = 4'b0000, parameter BYTE_LANES_B3 = 4'b0000, parameter BYTE_LANES_B4 = 4'b0000, parameter PHY_0_BITLANES = 48'h0000_0000_0000, parameter PHY_1_BITLANES = 48'h0000_0000_0000, parameter PHY_2_BITLANES = 48'h0000_0000_0000, // Parameters calculated outside of this block parameter HIGHEST_BANK = 3, // Highest I/O bank index parameter HIGHEST_LANE = 12, // Highest byte lane index // ** Pin mapping parameters // Parameters for mapping between hard PHY and physical DDR3 signals // There are 2 classes of parameters: // - DQS_BYTE_MAP, CK_BYTE_MAP, CKE_ODT_BYTE_MAP: These consist of // 8-bit elements. Each element indicates the bank and byte lane // location of that particular signal. The bit lane in this case // doesn't need to be specified, either because there's only one // pin pair in each byte lane that the DQS or CK pair can be // located at, or in the case of CKE_ODT_BYTE_MAP, only the byte // lane needs to be specified in order to determine which byte // lane generates the RCLK (Note that CKE, and ODT must be located // in the same bank, thus only one element in CKE_ODT_BYTE_MAP) // [7:4] = bank # (0-4) // [3:0] = byte lane # (0-3) // - All other MAP parameters: These consist of 12-bit elements. Each // element indicates the bank, byte lane, and bit lane location of // that particular signal: // [11:8] = bank # (0-4) // [7:4] = byte lane # (0-3) // [3:0] = bit lane # (0-11) // Note that not all elements in all parameters will be used - it // depends on the actual widths of the DDR3 buses. The parameters are // structured to support a maximum of: // - DQS groups: 18 // - data mask bits: 18 // In addition, the default parameter size of some of the parameters will // support a certain number of bits, however, this can be expanded at // compile time by expanding the width of the vector passed into this // parameter // - chip selects: 10 // - bank bits: 3 // - address bits: 16 parameter CK_BYTE_MAP = 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00, parameter ADDR_MAP = 192'h000_000_000_000_000_000_000_000_000_000_000_000_000_000_000_000, parameter BANK_MAP = 36'h000_000_000, parameter CAS_MAP = 12'h000, parameter CKE_ODT_BYTE_MAP = 8'h00, parameter CKE_MAP = 96'h000_000_000_000_000_000_000_000, parameter ODT_MAP = 96'h000_000_000_000_000_000_000_000, parameter CKE_ODT_AUX = "FALSE", parameter CS_MAP = 120'h000_000_000_000_000_000_000_000_000_000, parameter PARITY_MAP = 12'h000, parameter RAS_MAP = 12'h000, parameter WE_MAP = 12'h000, parameter DQS_BYTE_MAP = 144'h00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00, // DATAx_MAP parameter is used for byte lane X in the design parameter DATA0_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA1_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA2_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA3_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA4_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA5_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA6_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA7_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA8_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA9_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA10_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA11_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA12_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA13_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA14_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA15_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA16_MAP = 96'h000_000_000_000_000_000_000_000, parameter DATA17_MAP = 96'h000_000_000_000_000_000_000_000, // MASK0_MAP used for bytes [8:0], MASK1_MAP for bytes [17:9] parameter MASK0_MAP = 108'h000_000_000_000_000_000_000_000_000, parameter MASK1_MAP = 108'h000_000_000_000_000_000_000_000_000, // Simulation options parameter SIM_CAL_OPTION = "NONE", // The PHY_CONTROL primitive in the bank where PLL exists is declared // as the Master PHY_CONTROL. parameter MASTER_PHY_CTL = 1 ) ( input rst, input clk, input freq_refclk, input mem_refclk, input pll_lock, input sync_pulse, input idelayctrl_refclk, input phy_cmd_wr_en, input phy_data_wr_en, input [31:0] phy_ctl_wd, input phy_ctl_wr, input phy_if_empty_def, input phy_if_reset, input [5:0] data_offset_1, input [5:0] data_offset_2, input [3:0] aux_in_1, input [3:0] aux_in_2, output [4:0] idelaye2_init_val, output [5:0] oclkdelay_init_val, output if_empty, output phy_ctl_full, output phy_cmd_full, output phy_data_full, output phy_pre_data_a_full, output [(CK_WIDTH * LP_DDR_CK_WIDTH)-1:0] ddr_clk, output phy_mc_go, input phy_write_calib, input phy_read_calib, input calib_in_common, input [5:0] calib_sel, input [HIGHEST_BANK-1:0] calib_zero_inputs, input [HIGHEST_BANK-1:0] calib_zero_ctrl, input [2:0] po_fine_enable, input [2:0] po_coarse_enable, input [2:0] po_fine_inc, input [2:0] po_coarse_inc, input po_counter_load_en, input po_counter_read_en, input [2:0] po_sel_fine_oclk_delay, input [8:0] po_counter_load_val, output [8:0] po_counter_read_val, output [5:0] pi_counter_read_val, input [HIGHEST_BANK-1:0] pi_rst_dqs_find, input pi_fine_enable, input pi_fine_inc, input pi_counter_load_en, input [5:0] pi_counter_load_val, input idelay_ce, input idelay_inc, input idelay_ld, input idle, output pi_phase_locked, output pi_phase_locked_all, output pi_dqs_found, output pi_dqs_found_all, output pi_dqs_out_of_range, // From/to calibration logic/soft PHY input phy_init_data_sel, input [nCK_PER_CLK*ROW_WIDTH-1:0] mux_address, input [nCK_PER_CLK*BANK_WIDTH-1:0] mux_bank, input [nCK_PER_CLK-1:0] mux_cas_n, input [CS_WIDTH*nCS_PER_RANK*nCK_PER_CLK-1:0] mux_cs_n, input [nCK_PER_CLK-1:0] mux_ras_n, input [1:0] mux_odt, input [nCK_PER_CLK-1:0] mux_cke, input [nCK_PER_CLK-1:0] mux_we_n, input [nCK_PER_CLK-1:0] parity_in, input [2*nCK_PER_CLK*DQ_WIDTH-1:0] mux_wrdata, input [2*nCK_PER_CLK*(DQ_WIDTH/8)-1:0] mux_wrdata_mask, input mux_reset_n, output [2*nCK_PER_CLK*DQ_WIDTH-1:0] rd_data, // Memory I/F output [ROW_WIDTH-1:0] ddr_addr, output [BANK_WIDTH-1:0] ddr_ba, output ddr_cas_n, output [CKE_WIDTH-1:0] ddr_cke, output [CS_WIDTH*nCS_PER_RANK-1:0] ddr_cs_n, output [DM_WIDTH-1:0] ddr_dm, output [ODT_WIDTH-1:0] ddr_odt, output ddr_parity, output ddr_ras_n, output ddr_we_n, output ddr_reset_n, inout [DQ_WIDTH-1:0] ddr_dq, inout [DQS_WIDTH-1:0] ddr_dqs, inout [DQS_WIDTH-1:0] ddr_dqs_n ,input dbg_pi_counter_read_en ,output ref_dll_lock ,input rst_phaser_ref ,output [11:0] dbg_pi_phase_locked_phy4lanes ,output [11:0] dbg_pi_dqs_found_lanes_phy4lanes ); function [71:0] generate_bytelanes_ddr_ck; input [143:0] ck_byte_map; integer v ; begin generate_bytelanes_ddr_ck = 'b0 ; for (v = 0; v < CK_WIDTH; v = v + 1) begin if ((CK_BYTE_MAP[((v*8)+4)+:4]) == 2) generate_bytelanes_ddr_ck[48+(4*v)+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; else if ((CK_BYTE_MAP[((v*8)+4)+:4]) == 1) generate_bytelanes_ddr_ck[24+(4*v)+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; else generate_bytelanes_ddr_ck[4*v+1*(CK_BYTE_MAP[(v*8)+:4])] = 1'b1; end end endfunction function [(2*CK_WIDTH*8)-1:0] generate_ddr_ck_map; input [143:0] ck_byte_map; integer g; begin generate_ddr_ck_map = 'b0 ; for(g = 0 ; g < CK_WIDTH ; g= g + 1) begin generate_ddr_ck_map[(g*2*8)+:8] = (ck_byte_map[(g*8)+:4] == 4'd0) ? "A" : (ck_byte_map[(g*8)+:4] == 4'd1) ? "B" : (ck_byte_map[(g*8)+:4] == 4'd2) ? "C" : "D" ; generate_ddr_ck_map[(((g*2)+1)*8)+:8] = (ck_byte_map[((g*8)+4)+:4] == 4'd0) ? "0" : (ck_byte_map[((g*8)+4)+:4] == 4'd1) ? "1" : "2" ; //each STRING charater takes 0 location end end endfunction // Enable low power mode for input buffer localparam IBUF_LOW_PWR = (IBUF_LPWR_MODE == "OFF") ? "FALSE" : ((IBUF_LPWR_MODE == "ON") ? "TRUE" : "ILLEGAL"); // Ratio of data to strobe localparam DQ_PER_DQS = DQ_WIDTH / DQS_WIDTH; // number of data phases per internal clock localparam PHASE_PER_CLK = 2*nCK_PER_CLK; // used to determine routing to OUT_FIFO for control/address for 2:1 // vs. 4:1 memory:internal clock ratio modes localparam PHASE_DIV = 4 / nCK_PER_CLK; localparam CLK_PERIOD = tCK * nCK_PER_CLK; // Create an aggregate parameters for data mapping to reduce # of generate // statements required in remapping code. Need to account for the case // when the DQ:DQS ratio is not 8:1 - in this case, each DATAx_MAP // parameter will have fewer than 8 elements used localparam FULL_DATA_MAP = {DATA17_MAP[12*DQ_PER_DQS-1:0], DATA16_MAP[12*DQ_PER_DQS-1:0], DATA15_MAP[12*DQ_PER_DQS-1:0], DATA14_MAP[12*DQ_PER_DQS-1:0], DATA13_MAP[12*DQ_PER_DQS-1:0], DATA12_MAP[12*DQ_PER_DQS-1:0], DATA11_MAP[12*DQ_PER_DQS-1:0], DATA10_MAP[12*DQ_PER_DQS-1:0], DATA9_MAP[12*DQ_PER_DQS-1:0], DATA8_MAP[12*DQ_PER_DQS-1:0], DATA7_MAP[12*DQ_PER_DQS-1:0], DATA6_MAP[12*DQ_PER_DQS-1:0], DATA5_MAP[12*DQ_PER_DQS-1:0], DATA4_MAP[12*DQ_PER_DQS-1:0], DATA3_MAP[12*DQ_PER_DQS-1:0], DATA2_MAP[12*DQ_PER_DQS-1:0], DATA1_MAP[12*DQ_PER_DQS-1:0], DATA0_MAP[12*DQ_PER_DQS-1:0]}; // Same deal, but for data mask mapping localparam FULL_MASK_MAP = {MASK1_MAP, MASK0_MAP}; localparam TMP_BYTELANES_DDR_CK = generate_bytelanes_ddr_ck(CK_BYTE_MAP) ; localparam TMP_GENERATE_DDR_CK_MAP = generate_ddr_ck_map(CK_BYTE_MAP) ; // Temporary parameters to determine which bank is outputting the CK/CK# // Eventually there will be support for multiple CK/CK# output //localparam TMP_DDR_CLK_SELECT_BANK = (CK_BYTE_MAP[7:4]); //// Temporary method to force MC_PHY to generate ODDR associated with //// CK/CK# output only for a single byte lane in the design. All banks //// that won't be generating the CK/CK# will have "UNUSED" as their //// PHY_GENERATE_DDR_CK parameter //localparam TMP_PHY_0_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 0) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); //localparam TMP_PHY_1_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 1) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); //localparam TMP_PHY_2_GENERATE_DDR_CK // = (TMP_DDR_CLK_SELECT_BANK != 2) ? "UNUSED" : // ((CK_BYTE_MAP[1:0] == 2'b00) ? "A" : // ((CK_BYTE_MAP[1:0] == 2'b01) ? "B" : // ((CK_BYTE_MAP[1:0] == 2'b10) ? "C" : "D"))); // Function to generate MC_PHY parameters PHY_BITLANES_OUTONLYx // which indicates which bit lanes in data byte lanes are // output-only bitlanes (e.g. used specifically for data mask outputs) function [143:0] calc_phy_bitlanes_outonly; input [215:0] data_mask_in; integer z; begin calc_phy_bitlanes_outonly = 'b0; // Only enable BITLANES parameters for data masks if, well, if // the data masks are actually enabled if (USE_DM_PORT == 1) for (z = 0; z < DM_WIDTH; z = z + 1) calc_phy_bitlanes_outonly[48*data_mask_in[(12*z+8)+:3] + 12*data_mask_in[(12*z+4)+:2] + data_mask_in[12*z+:4]] = 1'b1; end endfunction localparam PHY_BITLANES_OUTONLY = calc_phy_bitlanes_outonly(FULL_MASK_MAP); localparam PHY_0_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[47:0]; localparam PHY_1_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[95:48]; localparam PHY_2_BITLANES_OUTONLY = PHY_BITLANES_OUTONLY[143:96]; // Determine which bank and byte lane generates the RCLK used to clock // out the auxilliary (ODT, CKE) outputs localparam CKE_ODT_RCLK_SELECT_BANK_AUX_ON = (CKE_ODT_BYTE_MAP[7:4] == 4'h0) ? 0 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h1) ? 1 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h2) ? 2 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h3) ? 3 : ((CKE_ODT_BYTE_MAP[7:4] == 4'h4) ? 4 : -1)))); localparam CKE_ODT_RCLK_SELECT_LANE_AUX_ON = (CKE_ODT_BYTE_MAP[3:0] == 4'h0) ? "A" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h1) ? "B" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h2) ? "C" : ((CKE_ODT_BYTE_MAP[3:0] == 4'h3) ? "D" : "ILLEGAL"))); localparam CKE_ODT_RCLK_SELECT_BANK_AUX_OFF = (CKE_MAP[11:8] == 4'h0) ? 0 : ((CKE_MAP[11:8] == 4'h1) ? 1 : ((CKE_MAP[11:8] == 4'h2) ? 2 : ((CKE_MAP[11:8] == 4'h3) ? 3 : ((CKE_MAP[11:8] == 4'h4) ? 4 : -1)))); localparam CKE_ODT_RCLK_SELECT_LANE_AUX_OFF = (CKE_MAP[7:4] == 4'h0) ? "A" : ((CKE_MAP[7:4] == 4'h1) ? "B" : ((CKE_MAP[7:4] == 4'h2) ? "C" : ((CKE_MAP[7:4] == 4'h3) ? "D" : "ILLEGAL"))); localparam CKE_ODT_RCLK_SELECT_BANK = (CKE_ODT_AUX == "TRUE") ? CKE_ODT_RCLK_SELECT_BANK_AUX_ON : CKE_ODT_RCLK_SELECT_BANK_AUX_OFF ; localparam CKE_ODT_RCLK_SELECT_LANE = (CKE_ODT_AUX == "TRUE") ? CKE_ODT_RCLK_SELECT_LANE_AUX_ON : CKE_ODT_RCLK_SELECT_LANE_AUX_OFF ; //*************************************************************************** // OCLKDELAYED tap setting calculation: // Parameters for calculating amount of phase shifting output clock to // achieve 90 degree offset between DQS and DQ on writes //*************************************************************************** //90 deg equivalent to 0.25 for MEM_RefClk <= 300 MHz // and 1.25 for Mem_RefClk > 300 MHz localparam PO_OCLKDELAY_INV = (((SIM_CAL_OPTION == "NONE") && (tCK > 2500)) || (tCK >= 3333)) ? "FALSE" : "TRUE"; //DIV1: MemRefClk >= 400 MHz, DIV2: 200 <= MemRefClk < 400, //DIV4: MemRefClk < 200 MHz localparam PHY_0_A_PI_FREQ_REF_DIV = tCK > 5000 ? "DIV4" : tCK > 2500 ? "DIV2": "NONE"; localparam FREQ_REF_DIV = (PHY_0_A_PI_FREQ_REF_DIV == "DIV4" ? 4 : PHY_0_A_PI_FREQ_REF_DIV == "DIV2" ? 2 : 1); // Intrinsic delay between OCLK and OCLK_DELAYED Phaser Output localparam real INT_DELAY = 0.4392/FREQ_REF_DIV + 100.0/tCK; // Whether OCLK_DELAY output comes inverted or not localparam real HALF_CYCLE_DELAY = 0.5*(PO_OCLKDELAY_INV == "TRUE" ? 1 : 0); // Phaser-Out Stage3 Tap delay for 90 deg shift. // Maximum tap delay is FreqRefClk period distributed over 64 taps // localparam real TAP_DELAY = MC_OCLK_DELAY/64/FREQ_REF_DIV; localparam real MC_OCLK_DELAY = ((PO_OCLKDELAY_INV == "TRUE" ? 1.25 : 0.25) - (INT_DELAY + HALF_CYCLE_DELAY)) * 63 * FREQ_REF_DIV; //localparam integer PHY_0_A_PO_OCLK_DELAY = MC_OCLK_DELAY; localparam integer PHY_0_A_PO_OCLK_DELAY_HW = (tCK > 2273) ? 34 : (tCK > 2000) ? 33 : (tCK > 1724) ? 32 : (tCK > 1515) ? 31 : (tCK > 1315) ? 30 : (tCK > 1136) ? 29 : (tCK > 1021) ? 28 : 27; // Note that simulation requires a different value than in H/W because of the // difference in the way delays are modeled localparam integer PHY_0_A_PO_OCLK_DELAY = (SIM_CAL_OPTION == "NONE") ? ((tCK > 2500) ? 8 : (DRAM_TYPE == "DDR3") ? PHY_0_A_PO_OCLK_DELAY_HW : 30) : MC_OCLK_DELAY; // Initial DQ IDELAY value localparam PHY_0_A_IDELAYE2_IDELAY_VALUE = (SIM_CAL_OPTION != "FAST_CAL") ? 0 : (tCK < 1000) ? 0 : (tCK < 1330) ? 0 : (tCK < 2300) ? 0 : (tCK < 2500) ? 2 : 0; //localparam PHY_0_A_IDELAYE2_IDELAY_VALUE = 0; // Aux_out parameters RD_CMD_OFFSET = CL+2? and WR_CMD_OFFSET = CWL+3? localparam PHY_0_RD_CMD_OFFSET_0 = 10; localparam PHY_0_RD_CMD_OFFSET_1 = 10; localparam PHY_0_RD_CMD_OFFSET_2 = 10; localparam PHY_0_RD_CMD_OFFSET_3 = 10; // 4:1 and 2:1 have WR_CMD_OFFSET values for ODT timing localparam PHY_0_WR_CMD_OFFSET_0 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_1 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_2 = (nCK_PER_CLK == 4) ? 8 : 4; localparam PHY_0_WR_CMD_OFFSET_3 = (nCK_PER_CLK == 4) ? 8 : 4; // 4:1 and 2:1 have different values localparam PHY_0_WR_DURATION_0 = 7; localparam PHY_0_WR_DURATION_1 = 7; localparam PHY_0_WR_DURATION_2 = 7; localparam PHY_0_WR_DURATION_3 = 7; // Aux_out parameters for toggle mode (CKE) localparam CWL_M = (REG_CTRL == "ON") ? CWL + 1 : CWL; localparam PHY_0_CMD_OFFSET = (nCK_PER_CLK == 4) ? (CWL_M % 2) ? 8 : 9 : (CWL < 7) ? 4 + ((CWL_M % 2) ? 0 : 1) : 5 + ((CWL_M % 2) ? 0 : 1); // temporary parameter to enable/disable PHY PC counters. In both 4:1 and // 2:1 cases, this should be disabled. For now, enable for 4:1 mode to // avoid making too many changes at once. localparam PHY_COUNT_EN = (nCK_PER_CLK == 4) ? "TRUE" : "FALSE"; wire [((HIGHEST_LANE+3)/4)*4-1:0] aux_out; wire [HIGHEST_LANE-1:0] mem_dqs_in; wire [HIGHEST_LANE-1:0] mem_dqs_out; wire [HIGHEST_LANE-1:0] mem_dqs_ts; wire [HIGHEST_LANE*10-1:0] mem_dq_in; wire [HIGHEST_LANE*12-1:0] mem_dq_out; wire [HIGHEST_LANE*12-1:0] mem_dq_ts; wire [DQ_WIDTH-1:0] in_dq; wire [DQS_WIDTH-1:0] in_dqs; wire [ROW_WIDTH-1:0] out_addr; wire [BANK_WIDTH-1:0] out_ba; wire out_cas_n; wire [CS_WIDTH*nCS_PER_RANK-1:0] out_cs_n; wire [DM_WIDTH-1:0] out_dm; wire [ODT_WIDTH -1:0] out_odt; wire [CKE_WIDTH -1 :0] out_cke ; wire [DQ_WIDTH-1:0] out_dq; wire [DQS_WIDTH-1:0] out_dqs; wire out_parity; wire out_ras_n; wire out_we_n; wire [HIGHEST_LANE*80-1:0] phy_din; wire [HIGHEST_LANE*80-1:0] phy_dout; wire phy_rd_en; wire [DM_WIDTH-1:0] ts_dm; wire [DQ_WIDTH-1:0] ts_dq; wire [DQS_WIDTH-1:0] ts_dqs; reg [31:0] phy_ctl_wd_i1; reg [31:0] phy_ctl_wd_i2; reg phy_ctl_wr_i1; reg phy_ctl_wr_i2; reg [5:0] data_offset_1_i1; reg [5:0] data_offset_1_i2; reg [5:0] data_offset_2_i1; reg [5:0] data_offset_2_i2; wire [31:0] phy_ctl_wd_temp; wire phy_ctl_wr_temp; wire [5:0] data_offset_1_temp; wire [5:0] data_offset_2_temp; wire [5:0] data_offset_1_of; wire [5:0] data_offset_2_of; wire [31:0] phy_ctl_wd_of; (* keep = "true", max_fanout = 3 *) wire phy_ctl_wr_of /* synthesis syn_maxfan = 1 */; wire [3:0] phy_ctl_full_temp; wire data_io_idle_pwrdwn; // Always read from input data FIFOs when not empty assign phy_rd_en = !if_empty; // IDELAYE2 initial value assign idelaye2_init_val = PHY_0_A_IDELAYE2_IDELAY_VALUE; assign oclkdelay_init_val = PHY_0_A_PO_OCLK_DELAY; // Idle powerdown when there are no pending reads in the MC assign data_io_idle_pwrdwn = DATA_IO_IDLE_PWRDWN == "ON" ? idle : 1'b0; //*************************************************************************** // Auxiliary output steering //*************************************************************************** // For a 4 rank I/F the aux_out[3:0] from the addr/ctl bank will be // mapped to ddr_odt and the aux_out[7:4] from one of the data banks // will map to ddr_cke. For I/Fs less than 4 the aux_out[3:0] from the // addr/ctl bank would bank would map to both ddr_odt and ddr_cke. generate if(CKE_ODT_AUX == "TRUE")begin:cke_thru_auxpins if (CKE_WIDTH == 1) begin : gen_cke // Explicitly instantiate OBUF to ensure that these are present // in the netlist. Typically this is not required since NGDBUILD // at the top-level knows to infer an I/O/IOBUF and therefore a // top-level LOC constraint can be attached to that pin. This does // not work when a hierarchical flow is used and the LOC is applied // at the individual core-level UCF OBUF u_cke_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK]), .O (ddr_cke) ); end else begin: gen_2rank_cke OBUF u_cke0_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK]), .O (ddr_cke[0]) ); OBUF u_cke1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_cke[1]) ); end end endgenerate generate if(CKE_ODT_AUX == "TRUE")begin:odt_thru_auxpins if (USE_ODT_PORT == 1) begin : gen_use_odt // Explicitly instantiate OBUF to ensure that these are present // in the netlist. Typically this is not required since NGDBUILD // at the top-level knows to infer an I/O/IOBUF and therefore a // top-level LOC constraint can be attached to that pin. This does // not work when a hierarchical flow is used and the LOC is applied // at the individual core-level UCF OBUF u_odt_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+1]), .O (ddr_odt[0]) ); if (ODT_WIDTH == 2 && RANKS == 1) begin: gen_2port_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_odt[1]) ); end else if (ODT_WIDTH == 2 && RANKS == 2) begin: gen_2rank_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+3]), .O (ddr_odt[1]) ); end else if (ODT_WIDTH == 3 && RANKS == 1) begin: gen_3port_odt OBUF u_odt1_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+2]), .O (ddr_odt[1]) ); OBUF u_odt2_obuf ( .I (aux_out[4*CKE_ODT_RCLK_SELECT_BANK+3]), .O (ddr_odt[2]) ); end end else begin assign ddr_odt = 'b0; end end endgenerate //*************************************************************************** // Read data bit steering //*************************************************************************** // Transpose elements of rd_data_map to form final read data output: // phy_din elements are grouped according to "physical bit" - e.g. // for nCK_PER_CLK = 4, there are 8 data phases transfered per physical // bit per clock cycle: // = {dq0_fall3, dq0_rise3, dq0_fall2, dq0_rise2, // dq0_fall1, dq0_rise1, dq0_fall0, dq0_rise0} // whereas rd_data is are grouped according to "phase" - e.g. // = {dq7_rise0, dq6_rise0, dq5_rise0, dq4_rise0, // dq3_rise0, dq2_rise0, dq1_rise0, dq0_rise0} // therefore rd_data is formed by transposing phy_din - e.g. // for nCK_PER_CLK = 4, and DQ_WIDTH = 16, and assuming MC_PHY // bit_lane[0] maps to DQ[0], and bit_lane[1] maps to DQ[1], then // the assignments for bits of rd_data corresponding to DQ[1:0] // would be: // {rd_data[112], rd_data[96], rd_data[80], rd_data[64], // rd_data[48], rd_data[32], rd_data[16], rd_data[0]} = phy_din[7:0] // {rd_data[113], rd_data[97], rd_data[81], rd_data[65], // rd_data[49], rd_data[33], rd_data[17], rd_data[1]} = phy_din[15:8] generate genvar i, j; for (i = 0; i < DQ_WIDTH; i = i + 1) begin: gen_loop_rd_data_1 for (j = 0; j < PHASE_PER_CLK; j = j + 1) begin: gen_loop_rd_data_2 assign rd_data[DQ_WIDTH*j + i] = phy_din[(320*FULL_DATA_MAP[(12*i+8)+:3]+ 80*FULL_DATA_MAP[(12*i+4)+:2] + 8*FULL_DATA_MAP[12*i+:4]) + j]; end end endgenerate //*************************************************************************** // Control/address //*************************************************************************** assign out_cas_n = mem_dq_out[48*CAS_MAP[10:8] + 12*CAS_MAP[5:4] + CAS_MAP[3:0]]; generate // if signal placed on bit lanes [0-9] if (CAS_MAP[3:0] < 4'hA) begin: gen_cas_lt10 // Determine routing based on clock ratio mode. If running in 4:1 // mode, then all four bits from logic are used. If 2:1 mode, only // 2-bits are provided by logic, and each bit is repeated 2x to form // 4-bit input to IN_FIFO, e.g. // 4:1 mode: phy_dout[] = {in[3], in[2], in[1], in[0]} // 2:1 mode: phy_dout[] = {in[1], in[1], in[0], in[0]} assign phy_dout[(320*CAS_MAP[10:8] + 80*CAS_MAP[5:4] + 8*CAS_MAP[3:0])+:4] = {mux_cas_n[3/PHASE_DIV], mux_cas_n[2/PHASE_DIV], mux_cas_n[1/PHASE_DIV], mux_cas_n[0]}; end else begin: gen_cas_ge10 // If signal is placed in bit lane [10] or [11], route to upper // nibble of phy_dout lane [5] or [6] respectively (in this case // phy_dout lane [5, 6] are multiplexed to take input for two // different SDR signals - this is how bits[10,11] need to be // provided to the OUT_FIFO assign phy_dout[(320*CAS_MAP[10:8] + 80*CAS_MAP[5:4] + 8*(CAS_MAP[3:0]-5) + 4)+:4] = {mux_cas_n[3/PHASE_DIV], mux_cas_n[2/PHASE_DIV], mux_cas_n[1/PHASE_DIV], mux_cas_n[0]}; end endgenerate assign out_ras_n = mem_dq_out[48*RAS_MAP[10:8] + 12*RAS_MAP[5:4] + RAS_MAP[3:0]]; generate if (RAS_MAP[3:0] < 4'hA) begin: gen_ras_lt10 assign phy_dout[(320*RAS_MAP[10:8] + 80*RAS_MAP[5:4] + 8*RAS_MAP[3:0])+:4] = {mux_ras_n[3/PHASE_DIV], mux_ras_n[2/PHASE_DIV], mux_ras_n[1/PHASE_DIV], mux_ras_n[0]}; end else begin: gen_ras_ge10 assign phy_dout[(320*RAS_MAP[10:8] + 80*RAS_MAP[5:4] + 8*(RAS_MAP[3:0]-5) + 4)+:4] = {mux_ras_n[3/PHASE_DIV], mux_ras_n[2/PHASE_DIV], mux_ras_n[1/PHASE_DIV], mux_ras_n[0]}; end endgenerate assign out_we_n = mem_dq_out[48*WE_MAP[10:8] + 12*WE_MAP[5:4] + WE_MAP[3:0]]; generate if (WE_MAP[3:0] < 4'hA) begin: gen_we_lt10 assign phy_dout[(320*WE_MAP[10:8] + 80*WE_MAP[5:4] + 8*WE_MAP[3:0])+:4] = {mux_we_n[3/PHASE_DIV], mux_we_n[2/PHASE_DIV], mux_we_n[1/PHASE_DIV], mux_we_n[0]}; end else begin: gen_we_ge10 assign phy_dout[(320*WE_MAP[10:8] + 80*WE_MAP[5:4] + 8*(WE_MAP[3:0]-5) + 4)+:4] = {mux_we_n[3/PHASE_DIV], mux_we_n[2/PHASE_DIV], mux_we_n[1/PHASE_DIV], mux_we_n[0]}; end endgenerate generate if (REG_CTRL == "ON") begin: gen_parity_out // Generate addr/ctrl parity output only for DDR3 and DDR2 registered DIMMs assign out_parity = mem_dq_out[48*PARITY_MAP[10:8] + 12*PARITY_MAP[5:4] + PARITY_MAP[3:0]]; if (PARITY_MAP[3:0] < 4'hA) begin: gen_lt10 assign phy_dout[(320*PARITY_MAP[10:8] + 80*PARITY_MAP[5:4] + 8*PARITY_MAP[3:0])+:4] = {parity_in[3/PHASE_DIV], parity_in[2/PHASE_DIV], parity_in[1/PHASE_DIV], parity_in[0]}; end else begin: gen_ge10 assign phy_dout[(320*PARITY_MAP[10:8] + 80*PARITY_MAP[5:4] + 8*(PARITY_MAP[3:0]-5) + 4)+:4] = {parity_in[3/PHASE_DIV], parity_in[2/PHASE_DIV], parity_in[1/PHASE_DIV], parity_in[0]}; end end endgenerate //***************************************************************** generate genvar m, n,x; //***************************************************************** // Control/address (multi-bit) buses //***************************************************************** // Row/Column address for (m = 0; m < ROW_WIDTH; m = m + 1) begin: gen_addr_out assign out_addr[m] = mem_dq_out[48*ADDR_MAP[(12*m+8)+:3] + 12*ADDR_MAP[(12*m+4)+:2] + ADDR_MAP[12*m+:4]]; if (ADDR_MAP[12*m+:4] < 4'hA) begin: gen_lt10 // For multi-bit buses, we also have to deal with transposition // when going from the logic-side control bus to phy_dout for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ADDR_MAP[(12*m+8)+:3] + 80*ADDR_MAP[(12*m+4)+:2] + 8*ADDR_MAP[12*m+:4] + n] = mux_address[ROW_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ADDR_MAP[(12*m+8)+:3] + 80*ADDR_MAP[(12*m+4)+:2] + 8*(ADDR_MAP[12*m+:4]-5) + 4 + n] = mux_address[ROW_WIDTH*(n/PHASE_DIV) + m]; end end end // Bank address for (m = 0; m < BANK_WIDTH; m = m + 1) begin: gen_ba_out assign out_ba[m] = mem_dq_out[48*BANK_MAP[(12*m+8)+:3] + 12*BANK_MAP[(12*m+4)+:2] + BANK_MAP[12*m+:4]]; if (BANK_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*BANK_MAP[(12*m+8)+:3] + 80*BANK_MAP[(12*m+4)+:2] + 8*BANK_MAP[12*m+:4] + n] = mux_bank[BANK_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*BANK_MAP[(12*m+8)+:3] + 80*BANK_MAP[(12*m+4)+:2] + 8*(BANK_MAP[12*m+:4]-5) + 4 + n] = mux_bank[BANK_WIDTH*(n/PHASE_DIV) + m]; end end end // Chip select if (USE_CS_PORT == 1) begin: gen_cs_n_out for (m = 0; m < CS_WIDTH*nCS_PER_RANK; m = m + 1) begin: gen_cs_out assign out_cs_n[m] = mem_dq_out[48*CS_MAP[(12*m+8)+:3] + 12*CS_MAP[(12*m+4)+:2] + CS_MAP[12*m+:4]]; if (CS_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CS_MAP[(12*m+8)+:3] + 80*CS_MAP[(12*m+4)+:2] + 8*CS_MAP[12*m+:4] + n] = mux_cs_n[CS_WIDTH*nCS_PER_RANK*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CS_MAP[(12*m+8)+:3] + 80*CS_MAP[(12*m+4)+:2] + 8*(CS_MAP[12*m+:4]-5) + 4 + n] = mux_cs_n[CS_WIDTH*nCS_PER_RANK*(n/PHASE_DIV) + m]; end end end end if(CKE_ODT_AUX == "FALSE") begin // ODT_ports wire [ODT_WIDTH*nCK_PER_CLK -1 :0] mux_odt_remap ; if(RANKS == 1) begin for(x =0 ; x < nCK_PER_CLK ; x = x+1) begin assign mux_odt_remap[(x*ODT_WIDTH)+:ODT_WIDTH] = {ODT_WIDTH{mux_odt[0]}} ; end end else begin for(x =0 ; x < 2*nCK_PER_CLK ; x = x+2) begin assign mux_odt_remap[(x*ODT_WIDTH/RANKS)+:ODT_WIDTH/RANKS] = {ODT_WIDTH/RANKS{mux_odt[0]}} ; assign mux_odt_remap[((x*ODT_WIDTH/RANKS)+(ODT_WIDTH/RANKS))+:ODT_WIDTH/RANKS] = {ODT_WIDTH/RANKS{mux_odt[1]}} ; end end if (USE_ODT_PORT == 1) begin: gen_odt_out for (m = 0; m < ODT_WIDTH; m = m + 1) begin: gen_odt_out_1 assign out_odt[m] = mem_dq_out[48*ODT_MAP[(12*m+8)+:3] + 12*ODT_MAP[(12*m+4)+:2] + ODT_MAP[12*m+:4]]; if (ODT_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ODT_MAP[(12*m+8)+:3] + 80*ODT_MAP[(12*m+4)+:2] + 8*ODT_MAP[12*m+:4] + n] = mux_odt_remap[ODT_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*ODT_MAP[(12*m+8)+:3] + 80*ODT_MAP[(12*m+4)+:2] + 8*(ODT_MAP[12*m+:4]-5) + 4 + n] = mux_odt_remap[ODT_WIDTH*(n/PHASE_DIV) + m]; end end end end wire [CKE_WIDTH*nCK_PER_CLK -1:0] mux_cke_remap ; for(x = 0 ; x < nCK_PER_CLK ; x = x +1) begin assign mux_cke_remap[(x*CKE_WIDTH)+:CKE_WIDTH] = {CKE_WIDTH{mux_cke[x]}} ; end for (m = 0; m < CKE_WIDTH; m = m + 1) begin: gen_cke_out assign out_cke[m] = mem_dq_out[48*CKE_MAP[(12*m+8)+:3] + 12*CKE_MAP[(12*m+4)+:2] + CKE_MAP[12*m+:4]]; if (CKE_MAP[12*m+:4] < 4'hA) begin: gen_lt10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CKE_MAP[(12*m+8)+:3] + 80*CKE_MAP[(12*m+4)+:2] + 8*CKE_MAP[12*m+:4] + n] = mux_cke_remap[CKE_WIDTH*(n/PHASE_DIV) + m]; end end else begin: gen_ge10 for (n = 0; n < 4; n = n + 1) begin: loop_xpose assign phy_dout[320*CKE_MAP[(12*m+8)+:3] + 80*CKE_MAP[(12*m+4)+:2] + 8*(CKE_MAP[12*m+:4]-5) + 4 + n] = mux_cke_remap[CKE_WIDTH*(n/PHASE_DIV) + m]; end end end end //***************************************************************** // Data mask //***************************************************************** if (USE_DM_PORT == 1) begin: gen_dm_out for (m = 0; m < DM_WIDTH; m = m + 1) begin: gen_dm_out assign out_dm[m] = mem_dq_out[48*FULL_MASK_MAP[(12*m+8)+:3] + 12*FULL_MASK_MAP[(12*m+4)+:2] + FULL_MASK_MAP[12*m+:4]]; assign ts_dm[m] = mem_dq_ts[48*FULL_MASK_MAP[(12*m+8)+:3] + 12*FULL_MASK_MAP[(12*m+4)+:2] + FULL_MASK_MAP[12*m+:4]]; for (n = 0; n < PHASE_PER_CLK; n = n + 1) begin: loop_xpose assign phy_dout[320*FULL_MASK_MAP[(12*m+8)+:3] + 80*FULL_MASK_MAP[(12*m+4)+:2] + 8*FULL_MASK_MAP[12*m+:4] + n] = mux_wrdata_mask[DM_WIDTH*n + m]; end end end //***************************************************************** // Input and output DQ //***************************************************************** for (m = 0; m < DQ_WIDTH; m = m + 1) begin: gen_dq_inout // to MC_PHY assign mem_dq_in[40*FULL_DATA_MAP[(12*m+8)+:3] + 10*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]] = in_dq[m]; // to I/O buffers assign out_dq[m] = mem_dq_out[48*FULL_DATA_MAP[(12*m+8)+:3] + 12*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]]; assign ts_dq[m] = mem_dq_ts[48*FULL_DATA_MAP[(12*m+8)+:3] + 12*FULL_DATA_MAP[(12*m+4)+:2] + FULL_DATA_MAP[12*m+:4]]; for (n = 0; n < PHASE_PER_CLK; n = n + 1) begin: loop_xpose assign phy_dout[320*FULL_DATA_MAP[(12*m+8)+:3] + 80*FULL_DATA_MAP[(12*m+4)+:2] + 8*FULL_DATA_MAP[12*m+:4] + n] = mux_wrdata[DQ_WIDTH*n + m]; end end //***************************************************************** // Input and output DQS //***************************************************************** for (m = 0; m < DQS_WIDTH; m = m + 1) begin: gen_dqs_inout // to MC_PHY assign mem_dqs_in[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]] = in_dqs[m]; // to I/O buffers assign out_dqs[m] = mem_dqs_out[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]]; assign ts_dqs[m] = mem_dqs_ts[4*DQS_BYTE_MAP[(8*m+4)+:3] + DQS_BYTE_MAP[(8*m)+:2]]; end endgenerate //*************************************************************************** // Memory I/F output and I/O buffer instantiation //*************************************************************************** // Note on instantiation - generally at the minimum, it's not required to // instantiate the output buffers - they can be inferred by the synthesis // tool, and there aren't any attributes that need to be associated with // them. Consider as a future option to take out the OBUF instantiations OBUF u_cas_n_obuf ( .I (out_cas_n), .O (ddr_cas_n) ); OBUF u_ras_n_obuf ( .I (out_ras_n), .O (ddr_ras_n) ); OBUF u_we_n_obuf ( .I (out_we_n), .O (ddr_we_n) ); generate genvar p; for (p = 0; p < ROW_WIDTH; p = p + 1) begin: gen_addr_obuf OBUF u_addr_obuf ( .I (out_addr[p]), .O (ddr_addr[p]) ); end for (p = 0; p < BANK_WIDTH; p = p + 1) begin: gen_bank_obuf OBUF u_bank_obuf ( .I (out_ba[p]), .O (ddr_ba[p]) ); end if (USE_CS_PORT == 1) begin: gen_cs_n_obuf for (p = 0; p < CS_WIDTH*nCS_PER_RANK; p = p + 1) begin: gen_cs_obuf OBUF u_cs_n_obuf ( .I (out_cs_n[p]), .O (ddr_cs_n[p]) ); end end if(CKE_ODT_AUX == "FALSE")begin:cke_odt_thru_outfifo if (USE_ODT_PORT== 1) begin: gen_odt_obuf for (p = 0; p < ODT_WIDTH; p = p + 1) begin: gen_odt_obuf OBUF u_cs_n_obuf ( .I (out_odt[p]), .O (ddr_odt[p]) ); end end for (p = 0; p < CKE_WIDTH; p = p + 1) begin: gen_cke_obuf OBUF u_cs_n_obuf ( .I (out_cke[p]), .O (ddr_cke[p]) ); end end if (REG_CTRL == "ON") begin: gen_parity_obuf // Generate addr/ctrl parity output only for DDR3 registered DIMMs OBUF u_parity_obuf ( .I (out_parity), .O (ddr_parity) ); end else begin: gen_parity_tieoff assign ddr_parity = 1'b0; end if ((DRAM_TYPE == "DDR3") || (REG_CTRL == "ON")) begin: gen_reset_obuf // Generate reset output only for DDR3 and DDR2 RDIMMs OBUF u_reset_obuf ( .I (mux_reset_n), .O (ddr_reset_n) ); end else begin: gen_reset_tieoff assign ddr_reset_n = 1'b1; end if (USE_DM_PORT == 1) begin: gen_dm_obuf for (p = 0; p < DM_WIDTH; p = p + 1) begin: loop_dm OBUFT u_dm_obuf ( .I (out_dm[p]), .T (ts_dm[p]), .O (ddr_dm[p]) ); end end else begin: gen_dm_tieoff assign ddr_dm = 'b0; end if (DATA_IO_PRIM_TYPE == "HP_LP") begin: gen_dq_iobuf_HP for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end else if (DATA_IO_PRIM_TYPE == "HR_LP") begin: gen_dq_iobuf_HR for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end else begin: gen_dq_iobuf_default for (p = 0; p < DQ_WIDTH; p = p + 1) begin: gen_dq_iobuf IOBUF # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dq ( .I (out_dq[p]), .T (ts_dq[p]), .O (in_dq[p]), .IO (ddr_dq[p]) ); end end if (DATA_IO_PRIM_TYPE == "HP_LP") begin: gen_dqs_iobuf_HP for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS_DCIEN # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .DCITERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end else if (DATA_IO_PRIM_TYPE == "HR_LP") begin: gen_dqs_iobuf_HR for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS_INTERMDISABLE # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .INTERMDISABLE (data_io_idle_pwrdwn), .IBUFDISABLE (data_io_idle_pwrdwn), .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end else begin: gen_dqs_iobuf_default for (p = 0; p < DQS_WIDTH; p = p + 1) begin: gen_dqs_iobuf if ((DRAM_TYPE == "DDR2") && (DDR2_DQSN_ENABLE != "YES")) begin: gen_ddr2_dqs_se IOBUF # ( .IBUF_LOW_PWR (IBUF_LOW_PWR) ) u_iobuf_dqs ( .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]) ); assign ddr_dqs_n[p] = 1'b0; end else begin: gen_dqs_diff IOBUFDS # ( .IBUF_LOW_PWR (IBUF_LOW_PWR), .DQS_BIAS ("TRUE") ) u_iobuf_dqs ( .I (out_dqs[p]), .T (ts_dqs[p]), .O (in_dqs[p]), .IO (ddr_dqs[p]), .IOB (ddr_dqs_n[p]) ); end end end endgenerate always @(posedge clk) begin phy_ctl_wd_i1 <= #TCQ phy_ctl_wd; phy_ctl_wr_i1 <= #TCQ phy_ctl_wr; phy_ctl_wd_i2 <= #TCQ phy_ctl_wd_i1; phy_ctl_wr_i2 <= #TCQ phy_ctl_wr_i1; data_offset_1_i1 <= #TCQ data_offset_1; data_offset_1_i2 <= #TCQ data_offset_1_i1; data_offset_2_i1 <= #TCQ data_offset_2; data_offset_2_i2 <= #TCQ data_offset_2_i1; end // 2 cycles of command delay needed for 4;1 mode. 2:1 mode does not need it. // 2:1 mode the command goes through pre fifo assign phy_ctl_wd_temp = (nCK_PER_CLK == 4) ? phy_ctl_wd_i2 : phy_ctl_wd_of; assign phy_ctl_wr_temp = (nCK_PER_CLK == 4) ? phy_ctl_wr_i2 : phy_ctl_wr_of; assign data_offset_1_temp = (nCK_PER_CLK == 4) ? data_offset_1_i2 : data_offset_1_of; assign data_offset_2_temp = (nCK_PER_CLK == 4) ? data_offset_2_i2 : data_offset_2_of; generate begin mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (32) ) phy_ctl_pre_fifo_0 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[1]), .wr_en_in (phy_ctl_wr), .d_in (phy_ctl_wd), .wr_en_out (phy_ctl_wr_of), .d_out (phy_ctl_wd_of) ); mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (6) ) phy_ctl_pre_fifo_1 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[2]), .wr_en_in (phy_ctl_wr), .d_in (data_offset_1), .wr_en_out (), .d_out (data_offset_1_of) ); mig_7series_v1_9_ddr_of_pre_fifo # ( .TCQ (25), .DEPTH (8), .WIDTH (6) ) phy_ctl_pre_fifo_2 ( .clk (clk), .rst (rst), .full_in (phy_ctl_full_temp[3]), .wr_en_in (phy_ctl_wr), .d_in (data_offset_2), .wr_en_out (), .d_out (data_offset_2_of) ); end endgenerate //*************************************************************************** // Hard PHY instantiation //*************************************************************************** assign phy_ctl_full = phy_ctl_full_temp[0]; mig_7series_v1_9_ddr_mc_phy # ( .BYTE_LANES_B0 (BYTE_LANES_B0), .BYTE_LANES_B1 (BYTE_LANES_B1), .BYTE_LANES_B2 (BYTE_LANES_B2), .BYTE_LANES_B3 (BYTE_LANES_B3), .BYTE_LANES_B4 (BYTE_LANES_B4), .DATA_CTL_B0 (DATA_CTL_B0), .DATA_CTL_B1 (DATA_CTL_B1), .DATA_CTL_B2 (DATA_CTL_B2), .DATA_CTL_B3 (DATA_CTL_B3), .DATA_CTL_B4 (DATA_CTL_B4), .PHY_0_BITLANES (PHY_0_BITLANES), .PHY_1_BITLANES (PHY_1_BITLANES), .PHY_2_BITLANES (PHY_2_BITLANES), .PHY_0_BITLANES_OUTONLY (PHY_0_BITLANES_OUTONLY), .PHY_1_BITLANES_OUTONLY (PHY_1_BITLANES_OUTONLY), .PHY_2_BITLANES_OUTONLY (PHY_2_BITLANES_OUTONLY), .RCLK_SELECT_BANK (CKE_ODT_RCLK_SELECT_BANK), .RCLK_SELECT_LANE (CKE_ODT_RCLK_SELECT_LANE), //.CKE_ODT_AUX (CKE_ODT_AUX), .GENERATE_DDR_CK_MAP (TMP_GENERATE_DDR_CK_MAP), .BYTELANES_DDR_CK (TMP_BYTELANES_DDR_CK), .NUM_DDR_CK (CK_WIDTH), .LP_DDR_CK_WIDTH (LP_DDR_CK_WIDTH), .PO_CTL_COARSE_BYPASS ("FALSE"), .PHYCTL_CMD_FIFO ("FALSE"), .PHY_CLK_RATIO (nCK_PER_CLK), .MASTER_PHY_CTL (MASTER_PHY_CTL), .PHY_FOUR_WINDOW_CLOCKS (63), .PHY_EVENTS_DELAY (18), .PHY_COUNT_EN ("FALSE"), //PHY_COUNT_EN .PHY_SYNC_MODE ("FALSE"), .SYNTHESIS ((SIM_CAL_OPTION == "NONE") ? "TRUE" : "FALSE"), .PHY_DISABLE_SEQ_MATCH ("TRUE"), //"TRUE" .PHY_0_GENERATE_IDELAYCTRL ("FALSE"), .PHY_0_A_PI_FREQ_REF_DIV (PHY_0_A_PI_FREQ_REF_DIV), .PHY_0_CMD_OFFSET (PHY_0_CMD_OFFSET), //for CKE .PHY_0_RD_CMD_OFFSET_0 (PHY_0_RD_CMD_OFFSET_0), .PHY_0_RD_CMD_OFFSET_1 (PHY_0_RD_CMD_OFFSET_1), .PHY_0_RD_CMD_OFFSET_2 (PHY_0_RD_CMD_OFFSET_2), .PHY_0_RD_CMD_OFFSET_3 (PHY_0_RD_CMD_OFFSET_3), .PHY_0_RD_DURATION_0 (6), .PHY_0_RD_DURATION_1 (6), .PHY_0_RD_DURATION_2 (6), .PHY_0_RD_DURATION_3 (6), .PHY_0_WR_CMD_OFFSET_0 (PHY_0_WR_CMD_OFFSET_0), .PHY_0_WR_CMD_OFFSET_1 (PHY_0_WR_CMD_OFFSET_1), .PHY_0_WR_CMD_OFFSET_2 (PHY_0_WR_CMD_OFFSET_2), .PHY_0_WR_CMD_OFFSET_3 (PHY_0_WR_CMD_OFFSET_3), .PHY_0_WR_DURATION_0 (PHY_0_WR_DURATION_0), .PHY_0_WR_DURATION_1 (PHY_0_WR_DURATION_1), .PHY_0_WR_DURATION_2 (PHY_0_WR_DURATION_2), .PHY_0_WR_DURATION_3 (PHY_0_WR_DURATION_3), .PHY_0_AO_TOGGLE ((RANKS == 1) ? 1 : 5), .PHY_0_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_0_A_PO_OCLKDELAY_INV (PO_OCLKDELAY_INV), .PHY_0_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_0_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_GENERATE_IDELAYCTRL ("FALSE"), //.PHY_1_GENERATE_DDR_CK (TMP_PHY_1_GENERATE_DDR_CK), //.PHY_1_NUM_DDR_CK (1), .PHY_1_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_1_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_1_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_GENERATE_IDELAYCTRL ("FALSE"), //.PHY_2_GENERATE_DDR_CK (TMP_PHY_2_GENERATE_DDR_CK), //.PHY_2_NUM_DDR_CK (1), .PHY_2_A_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_B_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_C_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_D_PO_OCLK_DELAY (PHY_0_A_PO_OCLK_DELAY), .PHY_2_A_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_B_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_C_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .PHY_2_D_IDELAYE2_IDELAY_VALUE (PHY_0_A_IDELAYE2_IDELAY_VALUE), .TCK (tCK), .PHY_0_IODELAY_GRP (IODELAY_GRP) ,.PHY_1_IODELAY_GRP (IODELAY_GRP) ,.PHY_2_IODELAY_GRP (IODELAY_GRP) ,.BANK_TYPE (BANK_TYPE) ,.CKE_ODT_AUX (CKE_ODT_AUX) ) u_ddr_mc_phy ( .rst (rst), // Don't use MC_PHY to generate DDR_RESET_N output. Instead // generate this output outside of MC_PHY (and synchronous to CLK) .ddr_rst_in_n (1'b1), .phy_clk (clk), .freq_refclk (freq_refclk), .mem_refclk (mem_refclk), // Remove later - always same connection as phy_clk port .mem_refclk_div4 (clk), .pll_lock (pll_lock), .auxout_clk (), .sync_pulse (sync_pulse), // IDELAYCTRL instantiated outside of mc_phy module .idelayctrl_refclk (), .phy_dout (phy_dout), .phy_cmd_wr_en (phy_cmd_wr_en), .phy_data_wr_en (phy_data_wr_en), .phy_rd_en (phy_rd_en), .phy_ctl_wd (phy_ctl_wd_temp), .phy_ctl_wr (phy_ctl_wr_temp), .if_empty_def (phy_if_empty_def), .if_rst (phy_if_reset), .phyGo ('b1), .aux_in_1 (aux_in_1), .aux_in_2 (aux_in_2), // No support yet for different data offsets for different I/O banks // (possible use in supporting wider range of skew among bytes) .data_offset_1 (data_offset_1_temp), .data_offset_2 (data_offset_2_temp), .cke_in (), .if_a_empty (), .if_empty (if_empty), .if_empty_or (), .if_empty_and (), .of_ctl_a_full (), // .of_data_a_full (phy_data_full), .of_ctl_full (phy_cmd_full), .of_data_full (), .pre_data_a_full (phy_pre_data_a_full), .idelay_ld (idelay_ld), .idelay_ce (idelay_ce), .idelay_inc (idelay_inc), .input_sink (), .phy_din (phy_din), .phy_ctl_a_full (), .phy_ctl_full (phy_ctl_full_temp), .mem_dq_out (mem_dq_out), .mem_dq_ts (mem_dq_ts), .mem_dq_in (mem_dq_in), .mem_dqs_out (mem_dqs_out), .mem_dqs_ts (mem_dqs_ts), .mem_dqs_in (mem_dqs_in), .aux_out (aux_out), .phy_ctl_ready (), .rst_out (), .ddr_clk (ddr_clk), //.rclk (), .mcGo (phy_mc_go), .phy_write_calib (phy_write_calib), .phy_read_calib (phy_read_calib), .calib_sel (calib_sel), .calib_in_common (calib_in_common), .calib_zero_inputs (calib_zero_inputs), .calib_zero_ctrl (calib_zero_ctrl), .calib_zero_lanes ('b0), .po_fine_enable (po_fine_enable), .po_coarse_enable (po_coarse_enable), .po_fine_inc (po_fine_inc), .po_coarse_inc (po_coarse_inc), .po_counter_load_en (po_counter_load_en), .po_sel_fine_oclk_delay (po_sel_fine_oclk_delay), .po_counter_load_val (po_counter_load_val), .po_counter_read_en (po_counter_read_en), .po_coarse_overflow (), .po_fine_overflow (), .po_counter_read_val (po_counter_read_val), .pi_rst_dqs_find (pi_rst_dqs_find), .pi_fine_enable (pi_fine_enable), .pi_fine_inc (pi_fine_inc), .pi_counter_load_en (pi_counter_load_en), .pi_counter_read_en (dbg_pi_counter_read_en), .pi_counter_load_val (pi_counter_load_val), .pi_fine_overflow (), .pi_counter_read_val (pi_counter_read_val), .pi_phase_locked (pi_phase_locked), .pi_phase_locked_all (pi_phase_locked_all), .pi_dqs_found (), .pi_dqs_found_any (pi_dqs_found), .pi_dqs_found_all (pi_dqs_found_all), .pi_dqs_found_lanes (dbg_pi_dqs_found_lanes_phy4lanes), // Currently not being used. May be used in future if periodic // reads become a requirement. This output could be used to signal // a catastrophic failure in read capture and the need for // re-calibration. .pi_dqs_out_of_range (pi_dqs_out_of_range) ,.ref_dll_lock (ref_dll_lock) ,.pi_phase_locked_lanes (dbg_pi_phase_locked_phy4lanes) // ,.rst_phaser_ref (rst_phaser_ref) ); endmodule
// (C) 2001-2016 Altera Corporation. All rights reserved. // Your use of Altera Corporation's design tools, logic functions and other // software and tools, and its AMPP partner logic functions, and any output // files any of the foregoing (including device programming or simulation // files), and any associated documentation or information are expressly subject // to the terms and conditions of the Altera Program License Subscription // Agreement, Altera MegaCore Function License Agreement, or other applicable // license agreement, including, without limitation, that your use is for the // sole purpose of programming logic devices manufactured by Altera and sold by // Altera or its authorized distributors. Please refer to the applicable // agreement for further details. // THIS FILE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THIS FILE OR THE USE OR OTHER DEALINGS // IN THIS FILE. /****************************************************************************** * * * This module reads and writes data to the RS232 connectpr on Altera's * * DE1 and DE2 Development and Education Boards. * * * ******************************************************************************/ module altera_up_rs232_counters ( // Inputs clk, reset, reset_counters, // Bidirectionals // Outputs baud_clock_rising_edge, baud_clock_falling_edge, all_bits_transmitted ); /***************************************************************************** * Parameter Declarations * *****************************************************************************/ parameter CW = 9; // BAUD COUNTER WIDTH parameter BAUD_TICK_COUNT = 433; parameter HALF_BAUD_TICK_COUNT = 216; parameter TDW = 11; // TOTAL DATA WIDTH /***************************************************************************** * Port Declarations * *****************************************************************************/ // Inputs input clk; input reset; input reset_counters; // Bidirectionals // Outputs output reg baud_clock_rising_edge; output reg baud_clock_falling_edge; output reg all_bits_transmitted; /***************************************************************************** * Constant Declarations * *****************************************************************************/ /***************************************************************************** * Internal Wires and Registers Declarations * *****************************************************************************/ // Internal Wires // Internal Registers reg [(CW-1):0] baud_counter; reg [ 3: 0] bit_counter; // State Machine Registers /***************************************************************************** * Finite State Machine(s) * *****************************************************************************/ /***************************************************************************** * Sequential Logic * *****************************************************************************/ always @(posedge clk) begin if (reset) baud_counter <= {CW{1'b0}}; else if (reset_counters) baud_counter <= {CW{1'b0}}; else if (baud_counter == BAUD_TICK_COUNT) baud_counter <= {CW{1'b0}}; else baud_counter <= baud_counter + 1; end always @(posedge clk) begin if (reset) baud_clock_rising_edge <= 1'b0; else if (baud_counter == BAUD_TICK_COUNT) baud_clock_rising_edge <= 1'b1; else baud_clock_rising_edge <= 1'b0; end always @(posedge clk) begin if (reset) baud_clock_falling_edge <= 1'b0; else if (baud_counter == HALF_BAUD_TICK_COUNT) baud_clock_falling_edge <= 1'b1; else baud_clock_falling_edge <= 1'b0; end always @(posedge clk) begin if (reset) bit_counter <= 4'h0; else if (reset_counters) bit_counter <= 4'h0; else if (bit_counter == TDW) bit_counter <= 4'h0; else if (baud_counter == BAUD_TICK_COUNT) bit_counter <= bit_counter + 4'h1; end always @(posedge clk) begin if (reset) all_bits_transmitted <= 1'b0; else if (bit_counter == TDW) all_bits_transmitted <= 1'b1; else all_bits_transmitted <= 1'b0; end /***************************************************************************** * Combinational Logic * *****************************************************************************/ /***************************************************************************** * Internal Modules * *****************************************************************************/ endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: sg_list_reader_32.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Reads data from the scatter gather list buffer. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_SGR32_RD_0 3'b000 `define S_SGR32_RD_1 3'b001 `define S_SGR32_RD_2 3'b010 `define S_SGR32_RD_3 3'b011 `define S_SGR32_RD_WAIT 3'b100 `define S_SGR32_CAP_0 3'b000 `define S_SGR32_CAP_1 3'b001 `define S_SGR32_CAP_2 3'b010 `define S_SGR32_CAP_3 3'b011 `define S_SGR32_CAP_RDY 3'b100 `timescale 1ns/1ns module sg_list_reader_32 #( parameter C_DATA_WIDTH = 9'd32 ) ( input CLK, input RST, input [C_DATA_WIDTH-1:0] BUF_DATA, // Scatter gather buffer data input BUF_DATA_EMPTY, // Scatter gather buffer data empty output BUF_DATA_REN, // Scatter gather buffer data read enable output VALID, // Scatter gather element data is valid output EMPTY, // Scatter gather elements empty input REN, // Scatter gather element data read enable output [63:0] ADDR, // Scatter gather element address output [31:0] LEN // Scatter gather element length (in words) ); (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [2:0] rRdState=`S_SGR32_RD_0, _rRdState=`S_SGR32_RD_0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [2:0] rCapState=`S_SGR32_CAP_0, _rCapState=`S_SGR32_CAP_0; reg [C_DATA_WIDTH-1:0] rData={C_DATA_WIDTH{1'd0}}, _rData={C_DATA_WIDTH{1'd0}}; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [31:0] rLen=0, _rLen=0; reg rFifoValid=0, _rFifoValid=0; reg rDataValid=0, _rDataValid=0; assign BUF_DATA_REN = !rRdState[2]; // Not S_SGR32_RD_0 assign VALID = rCapState[2]; // S_SGR32_CAP_RDY assign EMPTY = (BUF_DATA_EMPTY & !rRdState[2]); // Not S_SGR32_RD_0 assign ADDR = rAddr; assign LEN = rLen; // Capture address and length as it comes out of the FIFO always @ (posedge CLK) begin rRdState <= #1 (RST ? `S_SGR32_RD_0 : _rRdState); rCapState <= #1 (RST ? `S_SGR32_CAP_0 : _rCapState); rData <= #1 _rData; rFifoValid <= #1 (RST ? 1'd0 : _rFifoValid); rDataValid <= #1 (RST ? 1'd0 : _rDataValid); rAddr <= #1 _rAddr; rLen <= #1 _rLen; end always @ (*) begin _rRdState = rRdState; _rCapState = rCapState; _rAddr = rAddr; _rLen = rLen; _rData = BUF_DATA; _rFifoValid = (BUF_DATA_REN & !BUF_DATA_EMPTY); _rDataValid = rFifoValid; case (rCapState) `S_SGR32_CAP_0: begin if (rDataValid) begin _rAddr[31:0] = rData; _rCapState = `S_SGR32_CAP_1; end end `S_SGR32_CAP_1: begin if (rDataValid) begin _rAddr[63:32] = rData; _rCapState = `S_SGR32_CAP_2; end end `S_SGR32_CAP_2: begin if (rDataValid) begin _rLen = rData; _rCapState = `S_SGR32_CAP_3; end end `S_SGR32_CAP_3: begin if (rDataValid) _rCapState = `S_SGR32_CAP_RDY; end `S_SGR32_CAP_RDY: begin if (REN) _rCapState = `S_SGR32_CAP_0; end default: begin _rCapState = `S_SGR32_CAP_0; end endcase case (rRdState) `S_SGR32_RD_0: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_1; end `S_SGR32_RD_1: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_2; end `S_SGR32_RD_2: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_3; end `S_SGR32_RD_3: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_WAIT; end `S_SGR32_RD_WAIT: begin // Wait for the data to be consumed if (REN) _rRdState = `S_SGR32_RD_0; end default: begin _rRdState = `S_SGR32_RD_0; end endcase end endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: sg_list_reader_32.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Reads data from the scatter gather list buffer. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_SGR32_RD_0 3'b000 `define S_SGR32_RD_1 3'b001 `define S_SGR32_RD_2 3'b010 `define S_SGR32_RD_3 3'b011 `define S_SGR32_RD_WAIT 3'b100 `define S_SGR32_CAP_0 3'b000 `define S_SGR32_CAP_1 3'b001 `define S_SGR32_CAP_2 3'b010 `define S_SGR32_CAP_3 3'b011 `define S_SGR32_CAP_RDY 3'b100 `timescale 1ns/1ns module sg_list_reader_32 #( parameter C_DATA_WIDTH = 9'd32 ) ( input CLK, input RST, input [C_DATA_WIDTH-1:0] BUF_DATA, // Scatter gather buffer data input BUF_DATA_EMPTY, // Scatter gather buffer data empty output BUF_DATA_REN, // Scatter gather buffer data read enable output VALID, // Scatter gather element data is valid output EMPTY, // Scatter gather elements empty input REN, // Scatter gather element data read enable output [63:0] ADDR, // Scatter gather element address output [31:0] LEN // Scatter gather element length (in words) ); (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [2:0] rRdState=`S_SGR32_RD_0, _rRdState=`S_SGR32_RD_0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [2:0] rCapState=`S_SGR32_CAP_0, _rCapState=`S_SGR32_CAP_0; reg [C_DATA_WIDTH-1:0] rData={C_DATA_WIDTH{1'd0}}, _rData={C_DATA_WIDTH{1'd0}}; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [31:0] rLen=0, _rLen=0; reg rFifoValid=0, _rFifoValid=0; reg rDataValid=0, _rDataValid=0; assign BUF_DATA_REN = !rRdState[2]; // Not S_SGR32_RD_0 assign VALID = rCapState[2]; // S_SGR32_CAP_RDY assign EMPTY = (BUF_DATA_EMPTY & !rRdState[2]); // Not S_SGR32_RD_0 assign ADDR = rAddr; assign LEN = rLen; // Capture address and length as it comes out of the FIFO always @ (posedge CLK) begin rRdState <= #1 (RST ? `S_SGR32_RD_0 : _rRdState); rCapState <= #1 (RST ? `S_SGR32_CAP_0 : _rCapState); rData <= #1 _rData; rFifoValid <= #1 (RST ? 1'd0 : _rFifoValid); rDataValid <= #1 (RST ? 1'd0 : _rDataValid); rAddr <= #1 _rAddr; rLen <= #1 _rLen; end always @ (*) begin _rRdState = rRdState; _rCapState = rCapState; _rAddr = rAddr; _rLen = rLen; _rData = BUF_DATA; _rFifoValid = (BUF_DATA_REN & !BUF_DATA_EMPTY); _rDataValid = rFifoValid; case (rCapState) `S_SGR32_CAP_0: begin if (rDataValid) begin _rAddr[31:0] = rData; _rCapState = `S_SGR32_CAP_1; end end `S_SGR32_CAP_1: begin if (rDataValid) begin _rAddr[63:32] = rData; _rCapState = `S_SGR32_CAP_2; end end `S_SGR32_CAP_2: begin if (rDataValid) begin _rLen = rData; _rCapState = `S_SGR32_CAP_3; end end `S_SGR32_CAP_3: begin if (rDataValid) _rCapState = `S_SGR32_CAP_RDY; end `S_SGR32_CAP_RDY: begin if (REN) _rCapState = `S_SGR32_CAP_0; end default: begin _rCapState = `S_SGR32_CAP_0; end endcase case (rRdState) `S_SGR32_RD_0: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_1; end `S_SGR32_RD_1: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_2; end `S_SGR32_RD_2: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_3; end `S_SGR32_RD_3: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_WAIT; end `S_SGR32_RD_WAIT: begin // Wait for the data to be consumed if (REN) _rRdState = `S_SGR32_RD_0; end default: begin _rRdState = `S_SGR32_RD_0; end endcase end endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: sg_list_reader_32.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Reads data from the scatter gather list buffer. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_SGR32_RD_0 3'b000 `define S_SGR32_RD_1 3'b001 `define S_SGR32_RD_2 3'b010 `define S_SGR32_RD_3 3'b011 `define S_SGR32_RD_WAIT 3'b100 `define S_SGR32_CAP_0 3'b000 `define S_SGR32_CAP_1 3'b001 `define S_SGR32_CAP_2 3'b010 `define S_SGR32_CAP_3 3'b011 `define S_SGR32_CAP_RDY 3'b100 `timescale 1ns/1ns module sg_list_reader_32 #( parameter C_DATA_WIDTH = 9'd32 ) ( input CLK, input RST, input [C_DATA_WIDTH-1:0] BUF_DATA, // Scatter gather buffer data input BUF_DATA_EMPTY, // Scatter gather buffer data empty output BUF_DATA_REN, // Scatter gather buffer data read enable output VALID, // Scatter gather element data is valid output EMPTY, // Scatter gather elements empty input REN, // Scatter gather element data read enable output [63:0] ADDR, // Scatter gather element address output [31:0] LEN // Scatter gather element length (in words) ); (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [2:0] rRdState=`S_SGR32_RD_0, _rRdState=`S_SGR32_RD_0; (* syn_encoding = "user" *) (* fsm_encoding = "user" *) reg [2:0] rCapState=`S_SGR32_CAP_0, _rCapState=`S_SGR32_CAP_0; reg [C_DATA_WIDTH-1:0] rData={C_DATA_WIDTH{1'd0}}, _rData={C_DATA_WIDTH{1'd0}}; reg [63:0] rAddr=64'd0, _rAddr=64'd0; reg [31:0] rLen=0, _rLen=0; reg rFifoValid=0, _rFifoValid=0; reg rDataValid=0, _rDataValid=0; assign BUF_DATA_REN = !rRdState[2]; // Not S_SGR32_RD_0 assign VALID = rCapState[2]; // S_SGR32_CAP_RDY assign EMPTY = (BUF_DATA_EMPTY & !rRdState[2]); // Not S_SGR32_RD_0 assign ADDR = rAddr; assign LEN = rLen; // Capture address and length as it comes out of the FIFO always @ (posedge CLK) begin rRdState <= #1 (RST ? `S_SGR32_RD_0 : _rRdState); rCapState <= #1 (RST ? `S_SGR32_CAP_0 : _rCapState); rData <= #1 _rData; rFifoValid <= #1 (RST ? 1'd0 : _rFifoValid); rDataValid <= #1 (RST ? 1'd0 : _rDataValid); rAddr <= #1 _rAddr; rLen <= #1 _rLen; end always @ (*) begin _rRdState = rRdState; _rCapState = rCapState; _rAddr = rAddr; _rLen = rLen; _rData = BUF_DATA; _rFifoValid = (BUF_DATA_REN & !BUF_DATA_EMPTY); _rDataValid = rFifoValid; case (rCapState) `S_SGR32_CAP_0: begin if (rDataValid) begin _rAddr[31:0] = rData; _rCapState = `S_SGR32_CAP_1; end end `S_SGR32_CAP_1: begin if (rDataValid) begin _rAddr[63:32] = rData; _rCapState = `S_SGR32_CAP_2; end end `S_SGR32_CAP_2: begin if (rDataValid) begin _rLen = rData; _rCapState = `S_SGR32_CAP_3; end end `S_SGR32_CAP_3: begin if (rDataValid) _rCapState = `S_SGR32_CAP_RDY; end `S_SGR32_CAP_RDY: begin if (REN) _rCapState = `S_SGR32_CAP_0; end default: begin _rCapState = `S_SGR32_CAP_0; end endcase case (rRdState) `S_SGR32_RD_0: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_1; end `S_SGR32_RD_1: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_2; end `S_SGR32_RD_2: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_3; end `S_SGR32_RD_3: begin // Read from the sg data FIFO if (!BUF_DATA_EMPTY) _rRdState = `S_SGR32_RD_WAIT; end `S_SGR32_RD_WAIT: begin // Wait for the data to be consumed if (REN) _rRdState = `S_SGR32_RD_0; end default: begin _rRdState = `S_SGR32_RD_0; end endcase end endmodule
(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2015 *) (* \VV/ **************************************************************) (* // * This file is distributed under the terms of the *) (* * GNU Lesser General Public License Version 2.1 *) (************************************************************************) (** * Int31 numbers defines indeed a cyclic structure : Z/(2^31)Z *) (** Author: Arnaud Spiwack (+ Pierre Letouzey) *) Require Import List. Require Import Min. Require Export Int31. Require Import Znumtheory. Require Import Zgcd_alt. Require Import Zpow_facts. Require Import BigNumPrelude. Require Import CyclicAxioms. Require Import ROmega. Local Open Scope nat_scope. Local Open Scope int31_scope. Section Basics. (** * Basic results about [iszero], [shiftl], [shiftr] *) Lemma iszero_eq0 : forall x, iszero x = true -> x=0. Proof. destruct x; simpl; intros. repeat match goal with H:(if ?d then _ else _) = true |- _ => destruct d; try discriminate end. reflexivity. Qed. Lemma iszero_not_eq0 : forall x, iszero x = false -> x<>0. Proof. intros x H Eq; rewrite Eq in H; simpl in *; discriminate. Qed. Lemma sneakl_shiftr : forall x, x = sneakl (firstr x) (shiftr x). Proof. destruct x; simpl; auto. Qed. Lemma sneakr_shiftl : forall x, x = sneakr (firstl x) (shiftl x). Proof. destruct x; simpl; auto. Qed. Lemma twice_zero : forall x, twice x = 0 <-> twice_plus_one x = 1. Proof. destruct x; simpl in *; split; intro H; injection H; intros; subst; auto. Qed. Lemma twice_or_twice_plus_one : forall x, x = twice (shiftr x) \/ x = twice_plus_one (shiftr x). Proof. intros; case_eq (firstr x); intros. destruct x; simpl in *; rewrite H; auto. destruct x; simpl in *; rewrite H; auto. Qed. (** * Iterated shift to the right *) Definition nshiftr x := nat_rect _ x (fun _ => shiftr). Lemma nshiftr_S : forall n x, nshiftr x (S n) = shiftr (nshiftr x n). Proof. reflexivity. Qed. Lemma nshiftr_S_tail : forall n x, nshiftr x (S n) = nshiftr (shiftr x) n. Proof. intros n; elim n; simpl; auto. intros; now f_equal. Qed. Lemma nshiftr_n_0 : forall n, nshiftr 0 n = 0. Proof. induction n; simpl; auto. rewrite IHn; auto. Qed. Lemma nshiftr_size : forall x, nshiftr x size = 0. Proof. destruct x; simpl; auto. Qed. Lemma nshiftr_above_size : forall k x, size<=k -> nshiftr x k = 0. Proof. intros. replace k with ((k-size)+size)%nat by omega. induction (k-size)%nat; auto. rewrite nshiftr_size; auto. simpl; rewrite IHn; auto. Qed. (** * Iterated shift to the left *) Definition nshiftl x := nat_rect _ x (fun _ => shiftl). Lemma nshiftl_S : forall n x, nshiftl x (S n) = shiftl (nshiftl x n). Proof. reflexivity. Qed. Lemma nshiftl_S_tail : forall n x, nshiftl x (S n) = nshiftl (shiftl x) n. Proof. intros n; elim n; simpl; intros; now f_equal. Qed. Lemma nshiftl_n_0 : forall n, nshiftl 0 n = 0. Proof. induction n; simpl; auto. rewrite IHn; auto. Qed. Lemma nshiftl_size : forall x, nshiftl x size = 0. Proof. destruct x; simpl; auto. Qed. Lemma nshiftl_above_size : forall k x, size<=k -> nshiftl x k = 0. Proof. intros. replace k with ((k-size)+size)%nat by omega. induction (k-size)%nat; auto. rewrite nshiftl_size; auto. simpl; rewrite IHn; auto. Qed. Lemma firstr_firstl : forall x, firstr x = firstl (nshiftl x (pred size)). Proof. destruct x; simpl; auto. Qed. Lemma firstl_firstr : forall x, firstl x = firstr (nshiftr x (pred size)). Proof. destruct x; simpl; auto. Qed. (** More advanced results about [nshiftr] *) Lemma nshiftr_predsize_0_firstl : forall x, nshiftr x (pred size) = 0 -> firstl x = D0. Proof. destruct x; compute; intros H; injection H; intros; subst; auto. Qed. Lemma nshiftr_0_propagates : forall n p x, n <= p -> nshiftr x n = 0 -> nshiftr x p = 0. Proof. intros. replace p with ((p-n)+n)%nat by omega. induction (p-n)%nat. simpl; auto. simpl; rewrite IHn0; auto. Qed. Lemma nshiftr_0_firstl : forall n x, n < size -> nshiftr x n = 0 -> firstl x = D0. Proof. intros. apply nshiftr_predsize_0_firstl. apply nshiftr_0_propagates with n; auto; omega. Qed. (** * Some induction principles over [int31] *) (** Not used for the moment. Are they really useful ? *) Lemma int31_ind_sneakl : forall P : int31->Prop, P 0 -> (forall x d, P x -> P (sneakl d x)) -> forall x, P x. Proof. intros. assert (forall n, n<=size -> P (nshiftr x (size - n))). induction n; intros. rewrite nshiftr_size; auto. rewrite sneakl_shiftr. apply H0. change (P (nshiftr x (S (size - S n)))). replace (S (size - S n))%nat with (size - n)%nat by omega. apply IHn; omega. change x with (nshiftr x (size-size)); auto. Qed. Lemma int31_ind_twice : forall P : int31->Prop, P 0 -> (forall x, P x -> P (twice x)) -> (forall x, P x -> P (twice_plus_one x)) -> forall x, P x. Proof. induction x using int31_ind_sneakl; auto. destruct d; auto. Qed. (** * Some generic results about [recr] *) Section Recr. (** [recr] satisfies the fixpoint equation used for its definition. *) Variable (A:Type)(case0:A)(caserec:digits->int31->A->A). Lemma recr_aux_eqn : forall n x, iszero x = false -> recr_aux (S n) A case0 caserec x = caserec (firstr x) (shiftr x) (recr_aux n A case0 caserec (shiftr x)). Proof. intros; simpl; rewrite H; auto. Qed. Lemma recr_aux_converges : forall n p x, n <= size -> n <= p -> recr_aux n A case0 caserec (nshiftr x (size - n)) = recr_aux p A case0 caserec (nshiftr x (size - n)). Proof. induction n. simpl minus; intros. rewrite nshiftr_size; destruct p; simpl; auto. intros. destruct p. inversion H0. unfold recr_aux; fold recr_aux. destruct (iszero (nshiftr x (size - S n))); auto. f_equal. change (shiftr (nshiftr x (size - S n))) with (nshiftr x (S (size - S n))). replace (S (size - S n))%nat with (size - n)%nat by omega. apply IHn; auto with arith. Qed. Lemma recr_eqn : forall x, iszero x = false -> recr A case0 caserec x = caserec (firstr x) (shiftr x) (recr A case0 caserec (shiftr x)). Proof. intros. unfold recr. change x with (nshiftr x (size - size)). rewrite (recr_aux_converges size (S size)); auto with arith. rewrite recr_aux_eqn; auto. Qed. (** [recr] is usually equivalent to a variant [recrbis] written without [iszero] check. *) Fixpoint recrbis_aux (n:nat)(A:Type)(case0:A)(caserec:digits->int31->A->A) (i:int31) : A := match n with | O => case0 | S next => let si := shiftr i in caserec (firstr i) si (recrbis_aux next A case0 caserec si) end. Definition recrbis := recrbis_aux size. Hypothesis case0_caserec : caserec D0 0 case0 = case0. Lemma recrbis_aux_equiv : forall n x, recrbis_aux n A case0 caserec x = recr_aux n A case0 caserec x. Proof. induction n; simpl; auto; intros. case_eq (iszero x); intros; [ | f_equal; auto ]. rewrite (iszero_eq0 _ H); simpl; auto. replace (recrbis_aux n A case0 caserec 0) with case0; auto. clear H IHn; induction n; simpl; congruence. Qed. Lemma recrbis_equiv : forall x, recrbis A case0 caserec x = recr A case0 caserec x. Proof. intros; apply recrbis_aux_equiv; auto. Qed. End Recr. (** * Incrementation *) Section Incr. (** Variant of [incr] via [recrbis] *) Let Incr (b : digits) (si rec : int31) := match b with | D0 => sneakl D1 si | D1 => sneakl D0 rec end. Definition incrbis_aux n x := recrbis_aux n _ In Incr x. Lemma incrbis_aux_equiv : forall x, incrbis_aux size x = incr x. Proof. unfold incr, recr, incrbis_aux; fold Incr; intros. apply recrbis_aux_equiv; auto. Qed. (** Recursive equations satisfied by [incr] *) Lemma incr_eqn1 : forall x, firstr x = D0 -> incr x = twice_plus_one (shiftr x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0); simpl; auto. unfold incr; rewrite recr_eqn; fold incr; auto. rewrite H; auto. Qed. Lemma incr_eqn2 : forall x, firstr x = D1 -> incr x = twice (incr (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in H; simpl in H; discriminate. unfold incr; rewrite recr_eqn; fold incr; auto. rewrite H; auto. Qed. Lemma incr_twice : forall x, incr (twice x) = twice_plus_one x. Proof. intros. rewrite incr_eqn1; destruct x; simpl; auto. Qed. Lemma incr_twice_plus_one_firstl : forall x, firstl x = D0 -> incr (twice_plus_one x) = twice (incr x). Proof. intros. rewrite incr_eqn2; [ | destruct x; simpl; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. (** The previous result is actually true even without the constraint on [firstl], but this is harder to prove (see later). *) End Incr. (** * Conversion to [Z] : the [phi] function *) Section Phi. (** Variant of [phi] via [recrbis] *) Let Phi := fun b (_:int31) => match b with D0 => Z.double | D1 => Z.succ_double end. Definition phibis_aux n x := recrbis_aux n _ Z0 Phi x. Lemma phibis_aux_equiv : forall x, phibis_aux size x = phi x. Proof. unfold phi, recr, phibis_aux; fold Phi; intros. apply recrbis_aux_equiv; auto. Qed. (** Recursive equations satisfied by [phi] *) Lemma phi_eqn1 : forall x, firstr x = D0 -> phi x = Z.double (phi (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0); simpl; auto. intros; unfold phi; rewrite recr_eqn; fold phi; auto. rewrite H; auto. Qed. Lemma phi_eqn2 : forall x, firstr x = D1 -> phi x = Z.succ_double (phi (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in H; simpl in H; discriminate. intros; unfold phi; rewrite recr_eqn; fold phi; auto. rewrite H; auto. Qed. Lemma phi_twice_firstl : forall x, firstl x = D0 -> phi (twice x) = Z.double (phi x). Proof. intros. rewrite phi_eqn1; auto; [ | destruct x; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. Lemma phi_twice_plus_one_firstl : forall x, firstl x = D0 -> phi (twice_plus_one x) = Z.succ_double (phi x). Proof. intros. rewrite phi_eqn2; auto; [ | destruct x; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. End Phi. (** [phi x] is positive and lower than [2^31] *) Lemma phibis_aux_pos : forall n x, (0 <= phibis_aux n x)%Z. Proof. induction n. simpl; unfold phibis_aux; simpl; auto with zarith. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr x)). destruct (firstr x). specialize IHn with (shiftr x); rewrite Z.double_spec; omega. specialize IHn with (shiftr x); rewrite Z.succ_double_spec; omega. Qed. Lemma phibis_aux_bounded : forall n x, n <= size -> (phibis_aux n (nshiftr x (size-n)) < 2 ^ (Z.of_nat n))%Z. Proof. induction n. simpl minus; unfold phibis_aux; simpl; auto with zarith. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr (nshiftr x (size - S n)))). assert (shiftr (nshiftr x (size - S n)) = nshiftr x (size-n)). replace (size - n)%nat with (S (size - (S n))) by omega. simpl; auto. rewrite H0. assert (H1 : n <= size) by omega. specialize (IHn x H1). set (y:=phibis_aux n (nshiftr x (size - n))) in *. rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. case_eq (firstr (nshiftr x (size - S n))); intros. rewrite Z.double_spec; auto with zarith. rewrite Z.succ_double_spec; auto with zarith. Qed. Lemma phi_bounded : forall x, (0 <= phi x < 2 ^ (Z.of_nat size))%Z. Proof. intros. rewrite <- phibis_aux_equiv. split. apply phibis_aux_pos. change x with (nshiftr x (size-size)). apply phibis_aux_bounded; auto. Qed. Lemma phibis_aux_lowerbound : forall n x, firstr (nshiftr x n) = D1 -> (2 ^ Z.of_nat n <= phibis_aux (S n) x)%Z. Proof. induction n. intros. unfold nshiftr in H; simpl in *. unfold phibis_aux, recrbis_aux. rewrite H, Z.succ_double_spec; omega. intros. remember (S n) as m. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux m (shiftr x)). subst m. rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. assert (2^(Z.of_nat n) <= phibis_aux (S n) (shiftr x))%Z. apply IHn. rewrite <- nshiftr_S_tail; auto. destruct (firstr x). change (Z.double (phibis_aux (S n) (shiftr x))) with (2*(phibis_aux (S n) (shiftr x)))%Z. omega. rewrite Z.succ_double_spec; omega. Qed. Lemma phi_lowerbound : forall x, firstl x = D1 -> (2^(Z.of_nat (pred size)) <= phi x)%Z. Proof. intros. generalize (phibis_aux_lowerbound (pred size) x). rewrite <- firstl_firstr. change (S (pred size)) with size; auto. rewrite phibis_aux_equiv; auto. Qed. (** * Equivalence modulo [2^n] *) Section EqShiftL. (** After killing [n] bits at the left, are the numbers equal ?*) Definition EqShiftL n x y := nshiftl x n = nshiftl y n. Lemma EqShiftL_zero : forall x y, EqShiftL O x y <-> x = y. Proof. unfold EqShiftL; intros; unfold nshiftl; simpl; split; auto. Qed. Lemma EqShiftL_size : forall k x y, size<=k -> EqShiftL k x y. Proof. red; intros; rewrite 2 nshiftl_above_size; auto. Qed. Lemma EqShiftL_le : forall k k' x y, k <= k' -> EqShiftL k x y -> EqShiftL k' x y. Proof. unfold EqShiftL; intros. replace k' with ((k'-k)+k)%nat by omega. remember (k'-k)%nat as n. clear Heqn H k'. induction n; simpl; auto. f_equal; auto. Qed. Lemma EqShiftL_firstr : forall k x y, k < size -> EqShiftL k x y -> firstr x = firstr y. Proof. intros. rewrite 2 firstr_firstl. f_equal. apply EqShiftL_le with k; auto. unfold size. auto with arith. Qed. Lemma EqShiftL_twice : forall k x y, EqShiftL k (twice x) (twice y) <-> EqShiftL (S k) x y. Proof. intros; unfold EqShiftL. rewrite 2 nshiftl_S_tail; split; auto. Qed. (** * From int31 to list of digits. *) (** Lower (=rightmost) bits comes first. *) Definition i2l := recrbis _ nil (fun d _ rec => d::rec). Lemma i2l_length : forall x, length (i2l x) = size. Proof. intros; reflexivity. Qed. Fixpoint lshiftl l x := match l with | nil => x | d::l => sneakl d (lshiftl l x) end. Definition l2i l := lshiftl l On. Lemma l2i_i2l : forall x, l2i (i2l x) = x. Proof. destruct x; compute; auto. Qed. Lemma i2l_sneakr : forall x d, i2l (sneakr d x) = tail (i2l x) ++ d::nil. Proof. destruct x; compute; auto. Qed. Lemma i2l_sneakl : forall x d, i2l (sneakl d x) = d :: removelast (i2l x). Proof. destruct x; compute; auto. Qed. Lemma i2l_l2i : forall l, length l = size -> i2l (l2i l) = l. Proof. repeat (destruct l as [ |? l]; [intros; discriminate | ]). destruct l; [ | intros; discriminate]. intros _; compute; auto. Qed. Fixpoint cstlist (A:Type)(a:A) n := match n with | O => nil | S n => a::cstlist _ a n end. Lemma i2l_nshiftl : forall n x, n<=size -> i2l (nshiftl x n) = cstlist _ D0 n ++ firstn (size-n) (i2l x). Proof. induction n. intros. assert (firstn (size-0) (i2l x) = i2l x). rewrite <- minus_n_O, <- (i2l_length x). induction (i2l x); simpl; f_equal; auto. rewrite H0; clear H0. reflexivity. intros. rewrite nshiftl_S. unfold shiftl; rewrite i2l_sneakl. simpl cstlist. rewrite <- app_comm_cons; f_equal. rewrite IHn; [ | omega]. rewrite removelast_app. apply f_equal. replace (size-n)%nat with (S (size - S n))%nat by omega. rewrite removelast_firstn; auto. rewrite i2l_length; omega. generalize (firstn_length (size-n) (i2l x)). rewrite i2l_length. intros H0 H1. rewrite H1 in H0. rewrite min_l in H0 by omega. simpl length in H0. omega. Qed. (** [i2l] can be used to define a relation equivalent to [EqShiftL] *) Lemma EqShiftL_i2l : forall k x y, EqShiftL k x y <-> firstn (size-k) (i2l x) = firstn (size-k) (i2l y). Proof. intros. destruct (le_lt_dec size k) as [Hle|Hlt]. split; intros. replace (size-k)%nat with O by omega. unfold firstn; auto. apply EqShiftL_size; auto. unfold EqShiftL. assert (k <= size) by omega. split; intros. assert (i2l (nshiftl x k) = i2l (nshiftl y k)) by (f_equal; auto). rewrite 2 i2l_nshiftl in H1; auto. eapply app_inv_head; eauto. assert (i2l (nshiftl x k) = i2l (nshiftl y k)). rewrite 2 i2l_nshiftl; auto. f_equal; auto. rewrite <- (l2i_i2l (nshiftl x k)), <- (l2i_i2l (nshiftl y k)). f_equal; auto. Qed. (** This equivalence allows proving easily the following delicate result *) Lemma EqShiftL_twice_plus_one : forall k x y, EqShiftL k (twice_plus_one x) (twice_plus_one y) <-> EqShiftL (S k) x y. Proof. intros. destruct (le_lt_dec size k) as [Hle|Hlt]. split; intros; apply EqShiftL_size; auto. rewrite 2 EqShiftL_i2l. unfold twice_plus_one. rewrite 2 i2l_sneakl. replace (size-k)%nat with (S (size - S k))%nat by omega. remember (size - S k)%nat as n. remember (i2l x) as lx. remember (i2l y) as ly. simpl. rewrite 2 firstn_removelast. split; intros. injection H; auto. f_equal; auto. subst ly n; rewrite i2l_length; omega. subst lx n; rewrite i2l_length; omega. Qed. Lemma EqShiftL_shiftr : forall k x y, EqShiftL k x y -> EqShiftL (S k) (shiftr x) (shiftr y). Proof. intros. destruct (le_lt_dec size (S k)) as [Hle|Hlt]. apply EqShiftL_size; auto. case_eq (firstr x); intros. rewrite <- EqShiftL_twice. unfold twice; rewrite <- H0. rewrite <- sneakl_shiftr. rewrite (EqShiftL_firstr k x y); auto. rewrite <- sneakl_shiftr; auto. omega. rewrite <- EqShiftL_twice_plus_one. unfold twice_plus_one; rewrite <- H0. rewrite <- sneakl_shiftr. rewrite (EqShiftL_firstr k x y); auto. rewrite <- sneakl_shiftr; auto. omega. Qed. Lemma EqShiftL_incrbis : forall n k x y, n<=size -> (n+k=S size)%nat -> EqShiftL k x y -> EqShiftL k (incrbis_aux n x) (incrbis_aux n y). Proof. induction n; simpl; intros. red; auto. destruct (eq_nat_dec k size). subst k; apply EqShiftL_size; auto. unfold incrbis_aux; simpl; fold (incrbis_aux n (shiftr x)); fold (incrbis_aux n (shiftr y)). rewrite (EqShiftL_firstr k x y); auto; try omega. case_eq (firstr y); intros. rewrite EqShiftL_twice_plus_one. apply EqShiftL_shiftr; auto. rewrite EqShiftL_twice. apply IHn; try omega. apply EqShiftL_shiftr; auto. Qed. Lemma EqShiftL_incr : forall x y, EqShiftL 1 x y -> EqShiftL 1 (incr x) (incr y). Proof. intros. rewrite <- 2 incrbis_aux_equiv. apply EqShiftL_incrbis; auto. Qed. End EqShiftL. (** * More equations about [incr] *) Lemma incr_twice_plus_one : forall x, incr (twice_plus_one x) = twice (incr x). Proof. intros. rewrite incr_eqn2; [ | destruct x; simpl; auto]. apply EqShiftL_incr. red; destruct x; simpl; auto. Qed. Lemma incr_firstr : forall x, firstr (incr x) <> firstr x. Proof. intros. case_eq (firstr x); intros. rewrite incr_eqn1; auto. destruct (shiftr x); simpl; discriminate. rewrite incr_eqn2; auto. destruct (incr (shiftr x)); simpl; discriminate. Qed. Lemma incr_inv : forall x y, incr x = twice_plus_one y -> x = twice y. Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in *; simpl in *. change (incr 0) with 1 in H. symmetry; rewrite twice_zero; auto. case_eq (firstr x); intros. rewrite incr_eqn1 in H; auto. clear H0; destruct x; destruct y; simpl in *. injection H; intros; subst; auto. elim (incr_firstr x). rewrite H1, H; destruct y; simpl; auto. Qed. (** * Conversion from [Z] : the [phi_inv] function *) (** First, recursive equations *) Lemma phi_inv_double_plus_one : forall z, phi_inv (Z.succ_double z) = twice_plus_one (phi_inv z). Proof. destruct z; simpl; auto. induction p; simpl. rewrite 2 incr_twice; auto. rewrite incr_twice, incr_twice_plus_one. f_equal. apply incr_inv; auto. auto. Qed. Lemma phi_inv_double : forall z, phi_inv (Z.double z) = twice (phi_inv z). Proof. destruct z; simpl; auto. rewrite incr_twice_plus_one; auto. Qed. Lemma phi_inv_incr : forall z, phi_inv (Z.succ z) = incr (phi_inv z). Proof. destruct z. simpl; auto. simpl; auto. induction p; simpl; auto. rewrite <- Pos.add_1_r, IHp, incr_twice_plus_one; auto. rewrite incr_twice; auto. simpl; auto. destruct p; simpl; auto. rewrite incr_twice; auto. f_equal. rewrite incr_twice_plus_one; auto. induction p; simpl; auto. rewrite incr_twice; auto. f_equal. rewrite incr_twice_plus_one; auto. Qed. (** [phi_inv o inv], the always-exact and easy-to-prove trip : from int31 to Z and then back to int31. *) Lemma phi_inv_phi_aux : forall n x, n <= size -> phi_inv (phibis_aux n (nshiftr x (size-n))) = nshiftr x (size-n). Proof. induction n. intros; simpl minus. rewrite nshiftr_size; auto. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr (nshiftr x (size-S n)))). assert (shiftr (nshiftr x (size - S n)) = nshiftr x (size-n)). replace (size - n)%nat with (S (size - (S n))); auto; omega. rewrite H0. case_eq (firstr (nshiftr x (size - S n))); intros. rewrite phi_inv_double. rewrite IHn by omega. rewrite <- H0. remember (nshiftr x (size - S n)) as y. destruct y; simpl in H1; rewrite H1; auto. rewrite phi_inv_double_plus_one. rewrite IHn by omega. rewrite <- H0. remember (nshiftr x (size - S n)) as y. destruct y; simpl in H1; rewrite H1; auto. Qed. Lemma phi_inv_phi : forall x, phi_inv (phi x) = x. Proof. intros. rewrite <- phibis_aux_equiv. replace x with (nshiftr x (size - size)) by auto. apply phi_inv_phi_aux; auto. Qed. (** The other composition [phi o phi_inv] is harder to prove correct. In particular, an overflow can happen, so a modulo is needed. For the moment, we proceed via several steps, the first one being a detour to [positive_to_in31]. *) (** * [positive_to_int31] *) (** A variant of [p2i] with [twice] and [twice_plus_one] instead of [2*i] and [2*i+1] *) Fixpoint p2ibis n p : (N*int31)%type := match n with | O => (Npos p, On) | S n => match p with | xO p => let (r,i) := p2ibis n p in (r, twice i) | xI p => let (r,i) := p2ibis n p in (r, twice_plus_one i) | xH => (N0, In) end end. Lemma p2ibis_bounded : forall n p, nshiftr (snd (p2ibis n p)) n = 0. Proof. induction n. simpl; intros; auto. simpl p2ibis; intros. destruct p; simpl snd. specialize IHn with p. destruct (p2ibis n p). simpl @snd in *. rewrite nshiftr_S_tail. destruct (le_lt_dec size n) as [Hle|Hlt]. rewrite nshiftr_above_size; auto. assert (H:=nshiftr_0_firstl _ _ Hlt IHn). replace (shiftr (twice_plus_one i)) with i; auto. destruct i; simpl in *. rewrite H; auto. specialize IHn with p. destruct (p2ibis n p); simpl @snd in *. rewrite nshiftr_S_tail. destruct (le_lt_dec size n) as [Hle|Hlt]. rewrite nshiftr_above_size; auto. assert (H:=nshiftr_0_firstl _ _ Hlt IHn). replace (shiftr (twice i)) with i; auto. destruct i; simpl in *; rewrite H; auto. rewrite nshiftr_S_tail; auto. replace (shiftr In) with 0; auto. apply nshiftr_n_0. Qed. Local Open Scope Z_scope. Lemma p2ibis_spec : forall n p, (n<=size)%nat -> Zpos p = (Z.of_N (fst (p2ibis n p)))*2^(Z.of_nat n) + phi (snd (p2ibis n p)). Proof. induction n; intros. simpl; rewrite Pos.mul_1_r; auto. replace (2^(Z.of_nat (S n)))%Z with (2*2^(Z.of_nat n))%Z by (rewrite <- Z.pow_succ_r, <- Zpos_P_of_succ_nat; auto with zarith). rewrite (Z.mul_comm 2). assert (n<=size)%nat by omega. destruct p; simpl; [ | | auto]; specialize (IHn p H0); generalize (p2ibis_bounded n p); destruct (p2ibis n p) as (r,i); simpl in *; intros. change (Zpos p~1) with (2*Zpos p + 1)%Z. rewrite phi_twice_plus_one_firstl, Z.succ_double_spec. rewrite IHn; ring. apply (nshiftr_0_firstl n); auto; try omega. change (Zpos p~0) with (2*Zpos p)%Z. rewrite phi_twice_firstl. change (Z.double (phi i)) with (2*(phi i))%Z. rewrite IHn; ring. apply (nshiftr_0_firstl n); auto; try omega. Qed. (** We now prove that this [p2ibis] is related to [phi_inv_positive] *) Lemma phi_inv_positive_p2ibis : forall n p, (n<=size)%nat -> EqShiftL (size-n) (phi_inv_positive p) (snd (p2ibis n p)). Proof. induction n. intros. apply EqShiftL_size; auto. intros. simpl p2ibis; destruct p; [ | | red; auto]; specialize IHn with p; destruct (p2ibis n p); simpl @snd in *; simpl phi_inv_positive; rewrite ?EqShiftL_twice_plus_one, ?EqShiftL_twice; replace (S (size - S n))%nat with (size - n)%nat by omega; apply IHn; omega. Qed. (** This gives the expected result about [phi o phi_inv], at least for the positive case. *) Lemma phi_phi_inv_positive : forall p, phi (phi_inv_positive p) = (Zpos p) mod (2^(Z.of_nat size)). Proof. intros. replace (phi_inv_positive p) with (snd (p2ibis size p)). rewrite (p2ibis_spec size p) by auto. rewrite Z.add_comm, Z_mod_plus. symmetry; apply Zmod_small. apply phi_bounded. auto with zarith. symmetry. rewrite <- EqShiftL_zero. apply (phi_inv_positive_p2ibis size p); auto. Qed. (** Moreover, [p2ibis] is also related with [p2i] and hence with [positive_to_int31]. *) Lemma double_twice_firstl : forall x, firstl x = D0 -> (Twon*x = twice x)%int31. Proof. intros. unfold mul31. rewrite <- Z.double_spec, <- phi_twice_firstl, phi_inv_phi; auto. Qed. Lemma double_twice_plus_one_firstl : forall x, firstl x = D0 -> (Twon*x+In = twice_plus_one x)%int31. Proof. intros. rewrite double_twice_firstl; auto. unfold add31. rewrite phi_twice_firstl, <- Z.succ_double_spec, <- phi_twice_plus_one_firstl, phi_inv_phi; auto. Qed. Lemma p2i_p2ibis : forall n p, (n<=size)%nat -> p2i n p = p2ibis n p. Proof. induction n; simpl; auto; intros. destruct p; auto; specialize IHn with p; generalize (p2ibis_bounded n p); rewrite IHn; try omega; destruct (p2ibis n p); simpl; intros; f_equal; auto. apply double_twice_plus_one_firstl. apply (nshiftr_0_firstl n); auto; omega. apply double_twice_firstl. apply (nshiftr_0_firstl n); auto; omega. Qed. Lemma positive_to_int31_phi_inv_positive : forall p, snd (positive_to_int31 p) = phi_inv_positive p. Proof. intros; unfold positive_to_int31. rewrite p2i_p2ibis; auto. symmetry. rewrite <- EqShiftL_zero. apply (phi_inv_positive_p2ibis size); auto. Qed. Lemma positive_to_int31_spec : forall p, Zpos p = (Z.of_N (fst (positive_to_int31 p)))*2^(Z.of_nat size) + phi (snd (positive_to_int31 p)). Proof. unfold positive_to_int31. intros; rewrite p2i_p2ibis; auto. apply p2ibis_spec; auto. Qed. (** Thanks to the result about [phi o phi_inv_positive], we can now establish easily the most general results about [phi o twice] and so one. *) Lemma phi_twice : forall x, phi (twice x) = (Z.double (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_double. assert (0 <= Z.double (phi x)). rewrite Z.double_spec; generalize (phi_bounded x); omega. destruct (Z.double (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. Lemma phi_twice_plus_one : forall x, phi (twice_plus_one x) = (Z.succ_double (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_double_plus_one. assert (0 <= Z.succ_double (phi x)). rewrite Z.succ_double_spec; generalize (phi_bounded x); omega. destruct (Z.succ_double (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. Lemma phi_incr : forall x, phi (incr x) = (Z.succ (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_incr. assert (0 <= Z.succ (phi x)). change (Z.succ (phi x)) with ((phi x)+1)%Z; generalize (phi_bounded x); omega. destruct (Z.succ (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. (** With the previous results, we can deal with [phi o phi_inv] even in the negative case *) Lemma phi_phi_inv_negative : forall p, phi (incr (complement_negative p)) = (Zneg p) mod 2^(Z.of_nat size). Proof. induction p. simpl complement_negative. rewrite phi_incr in IHp. rewrite incr_twice, phi_twice_plus_one. remember (phi (complement_negative p)) as q. rewrite Z.succ_double_spec. replace (2*q+1) with (2*(Z.succ q)-1) by omega. rewrite <- Zminus_mod_idemp_l, <- Zmult_mod_idemp_r, IHp. rewrite Zmult_mod_idemp_r, Zminus_mod_idemp_l; auto with zarith. simpl complement_negative. rewrite incr_twice_plus_one, phi_twice. remember (phi (incr (complement_negative p))) as q. rewrite Z.double_spec, IHp, Zmult_mod_idemp_r; auto with zarith. simpl; auto. Qed. Lemma phi_phi_inv : forall z, phi (phi_inv z) = z mod 2 ^ (Z.of_nat size). Proof. destruct z. simpl; auto. apply phi_phi_inv_positive. apply phi_phi_inv_negative. Qed. End Basics. Instance int31_ops : ZnZ.Ops int31 := { digits := 31%positive; (* number of digits *) zdigits := 31; (* number of digits *) to_Z := phi; (* conversion to Z *) of_pos := positive_to_int31; (* positive -> N*int31 : p => N,i where p = N*2^31+phi i *) head0 := head031; (* number of head 0 *) tail0 := tail031; (* number of tail 0 *) zero := 0; one := 1; minus_one := Tn; (* 2^31 - 1 *) compare := compare31; eq0 := fun i => match i ?= 0 with Eq => true | _ => false end; opp_c := fun i => 0 -c i; opp := opp31; opp_carry := fun i => 0-i-1; succ_c := fun i => i +c 1; add_c := add31c; add_carry_c := add31carryc; succ := fun i => i + 1; add := add31; add_carry := fun i j => i + j + 1; pred_c := fun i => i -c 1; sub_c := sub31c; sub_carry_c := sub31carryc; pred := fun i => i - 1; sub := sub31; sub_carry := fun i j => i - j - 1; mul_c := mul31c; mul := mul31; square_c := fun x => x *c x; div21 := div3121; div_gt := div31; (* this is supposed to be the special case of division a/b where a > b *) div := div31; modulo_gt := fun i j => let (_,r) := i/j in r; modulo := fun i j => let (_,r) := i/j in r; gcd_gt := gcd31; gcd := gcd31; add_mul_div := addmuldiv31; pos_mod := (* modulo 2^p *) fun p i => match p ?= 31 with | Lt => addmuldiv31 p 0 (addmuldiv31 (31-p) i 0) | _ => i end; is_even := fun i => let (_,r) := i/2 in match r ?= 0 with Eq => true | _ => false end; sqrt2 := sqrt312; sqrt := sqrt31; lor := lor31; land := land31; lxor := lxor31 }. Section Int31_Specs. Local Open Scope Z_scope. Notation "[| x |]" := (phi x) (at level 0, x at level 99). Local Notation wB := (2 ^ (Z.of_nat size)). Lemma wB_pos : wB > 0. Proof. auto with zarith. Qed. Notation "[+| c |]" := (interp_carry 1 wB phi c) (at level 0, c at level 99). Notation "[-| c |]" := (interp_carry (-1) wB phi c) (at level 0, c at level 99). Notation "[|| x ||]" := (zn2z_to_Z wB phi x) (at level 0, x at level 99). Lemma spec_zdigits : [| 31 |] = 31. Proof. reflexivity. Qed. Lemma spec_more_than_1_digit: 1 < 31. Proof. auto with zarith. Qed. Lemma spec_0 : [| 0 |] = 0. Proof. reflexivity. Qed. Lemma spec_1 : [| 1 |] = 1. Proof. reflexivity. Qed. Lemma spec_m1 : [| Tn |] = wB - 1. Proof. reflexivity. Qed. Lemma spec_compare : forall x y, (x ?= y)%int31 = ([|x|] ?= [|y|]). Proof. reflexivity. Qed. (** Addition *) Lemma spec_add_c : forall x y, [+|add31c x y|] = [|x|] + [|y|]. Proof. intros; unfold add31c, add31, interp_carry; rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X+Y) mod wB ?= X+Y <> Eq -> [+|C1 (phi_inv (X+Y))|] = X+Y). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X+Y) wB). contradict H1; auto using Zmod_small with zarith. rewrite <- (Z_mod_plus_full (X+Y) (-1) wB). rewrite Zmod_small; romega. generalize (Z.compare_eq ((X+Y) mod wB) (X+Y)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_succ_c : forall x, [+|add31c x 1|] = [|x|] + 1. Proof. intros; apply spec_add_c. Qed. Lemma spec_add_carry_c : forall x y, [+|add31carryc x y|] = [|x|] + [|y|] + 1. Proof. intros. unfold add31carryc, interp_carry; rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X+Y+1) mod wB ?= X+Y+1 <> Eq -> [+|C1 (phi_inv (X+Y+1))|] = X+Y+1). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X+Y+1) wB). contradict H1; auto using Zmod_small with zarith. rewrite <- (Z_mod_plus_full (X+Y+1) (-1) wB). rewrite Zmod_small; romega. generalize (Z.compare_eq ((X+Y+1) mod wB) (X+Y+1)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_add : forall x y, [|x+y|] = ([|x|] + [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_add_carry : forall x y, [|x+y+1|] = ([|x|] + [|y|] + 1) mod wB. Proof. unfold add31; intros. repeat rewrite phi_phi_inv. apply Zplus_mod_idemp_l. Qed. Lemma spec_succ : forall x, [|x+1|] = ([|x|] + 1) mod wB. Proof. intros; rewrite <- spec_1; apply spec_add. Qed. (** Substraction *) Lemma spec_sub_c : forall x y, [-|sub31c x y|] = [|x|] - [|y|]. Proof. unfold sub31c, sub31, interp_carry; intros. rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X-Y) mod wB ?= X-Y <> Eq -> [-|C1 (phi_inv (X-Y))|] = X-Y). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X-Y) 0). rewrite <- (Z_mod_plus_full (X-Y) 1 wB). rewrite Zmod_small; romega. contradict H1; apply Zmod_small; romega. generalize (Z.compare_eq ((X-Y) mod wB) (X-Y)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_sub_carry_c : forall x y, [-|sub31carryc x y|] = [|x|] - [|y|] - 1. Proof. unfold sub31carryc, sub31, interp_carry; intros. rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X-Y-1) mod wB ?= X-Y-1 <> Eq -> [-|C1 (phi_inv (X-Y-1))|] = X-Y-1). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X-Y-1) 0). rewrite <- (Z_mod_plus_full (X-Y-1) 1 wB). rewrite Zmod_small; romega. contradict H1; apply Zmod_small; romega. generalize (Z.compare_eq ((X-Y-1) mod wB) (X-Y-1)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_sub : forall x y, [|x-y|] = ([|x|] - [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_sub_carry : forall x y, [|x-y-1|] = ([|x|] - [|y|] - 1) mod wB. Proof. unfold sub31; intros. repeat rewrite phi_phi_inv. apply Zminus_mod_idemp_l. Qed. Lemma spec_opp_c : forall x, [-|sub31c 0 x|] = -[|x|]. Proof. intros; apply spec_sub_c. Qed. Lemma spec_opp : forall x, [|0 - x|] = (-[|x|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_opp_carry : forall x, [|0 - x - 1|] = wB - [|x|] - 1. Proof. unfold sub31; intros. repeat rewrite phi_phi_inv. change [|1|] with 1; change [|0|] with 0. rewrite <- (Z_mod_plus_full (0-[|x|]) 1 wB). rewrite Zminus_mod_idemp_l. rewrite Zmod_small; generalize (phi_bounded x); romega. Qed. Lemma spec_pred_c : forall x, [-|sub31c x 1|] = [|x|] - 1. Proof. intros; apply spec_sub_c. Qed. Lemma spec_pred : forall x, [|x-1|] = ([|x|] - 1) mod wB. Proof. intros; apply spec_sub. Qed. (** Multiplication *) Lemma phi2_phi_inv2 : forall x, [||phi_inv2 x||] = x mod (wB^2). Proof. assert (forall z, (z / wB) mod wB * wB + z mod wB = z mod wB ^ 2). intros. assert ((z/wB) mod wB = z/wB - (z/wB/wB)*wB). rewrite (Z_div_mod_eq (z/wB) wB wB_pos) at 2; ring. assert (z mod wB = z - (z/wB)*wB). rewrite (Z_div_mod_eq z wB wB_pos) at 2; ring. rewrite H. rewrite H0 at 1. ring_simplify. rewrite Zdiv_Zdiv; auto with zarith. rewrite (Z_div_mod_eq z (wB*wB)) at 2; auto with zarith. change (wB*wB) with (wB^2); ring. unfold phi_inv2. destruct x; unfold zn2z_to_Z; rewrite ?phi_phi_inv; change base with wB; auto. Qed. Lemma spec_mul_c : forall x y, [|| mul31c x y ||] = [|x|] * [|y|]. Proof. unfold mul31c; intros. rewrite phi2_phi_inv2. apply Zmod_small. generalize (phi_bounded x)(phi_bounded y); intros. change (wB^2) with (wB * wB). auto using Z.mul_lt_mono_nonneg with zarith. Qed. Lemma spec_mul : forall x y, [|x*y|] = ([|x|] * [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_square_c : forall x, [|| mul31c x x ||] = [|x|] * [|x|]. Proof. intros; apply spec_mul_c. Qed. (** Division *) Lemma spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := div3121 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. unfold div3121; intros. generalize (phi_bounded a1)(phi_bounded a2)(phi_bounded b); intros. assert ([|b|]>0) by (auto with zarith). generalize (Z_div_mod (phi2 a1 a2) [|b|] H4) (Z_div_pos (phi2 a1 a2) [|b|] H4). unfold Z.div; destruct (Z.div_eucl (phi2 a1 a2) [|b|]). rewrite ?phi_phi_inv. destruct 1; intros. unfold phi2 in *. change base with wB; change base with wB in H5. change (Z.pow_pos 2 31) with wB; change (Z.pow_pos 2 31) with wB in H. rewrite H5, Z.mul_comm. replace (z0 mod wB) with z0 by (symmetry; apply Zmod_small; omega). replace (z mod wB) with z; auto with zarith. symmetry; apply Zmod_small. split. apply H7; change base with wB; auto with zarith. apply Z.mul_lt_mono_pos_r with [|b|]; [omega| ]. rewrite Z.mul_comm. apply Z.le_lt_trans with ([|b|]*z+z0); [omega| ]. rewrite <- H5. apply Z.le_lt_trans with ([|a1|]*wB+(wB-1)); [omega | ]. replace ([|a1|]*wB+(wB-1)) with (wB*([|a1|]+1)-1) by ring. assert (wB*([|a1|]+1) <= wB*[|b|]); try omega. apply Z.mul_le_mono_nonneg; omega. Qed. Lemma spec_div : forall a b, 0 < [|b|] -> let (q,r) := div31 a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. unfold div31; intros. assert ([|b|]>0) by (auto with zarith). generalize (Z_div_mod [|a|] [|b|] H0) (Z_div_pos [|a|] [|b|] H0). unfold Z.div; destruct (Z.div_eucl [|a|] [|b|]). rewrite ?phi_phi_inv. destruct 1; intros. rewrite H1, Z.mul_comm. generalize (phi_bounded a)(phi_bounded b); intros. replace (z0 mod wB) with z0 by (symmetry; apply Zmod_small; omega). replace (z mod wB) with z; auto with zarith. symmetry; apply Zmod_small. split; auto with zarith. apply Z.le_lt_trans with [|a|]; auto with zarith. rewrite H1. apply Z.le_trans with ([|b|]*z); try omega. rewrite <- (Z.mul_1_l z) at 1. apply Z.mul_le_mono_nonneg; auto with zarith. Qed. Lemma spec_mod : forall a b, 0 < [|b|] -> [|let (_,r) := (a/b)%int31 in r|] = [|a|] mod [|b|]. Proof. unfold div31; intros. assert ([|b|]>0) by (auto with zarith). unfold Z.modulo. generalize (Z_div_mod [|a|] [|b|] H0). destruct (Z.div_eucl [|a|] [|b|]). rewrite ?phi_phi_inv. destruct 1; intros. generalize (phi_bounded b); intros. apply Zmod_small; omega. Qed. Lemma phi_gcd : forall i j, [|gcd31 i j|] = Zgcdn (2*size) [|j|] [|i|]. Proof. unfold gcd31. induction (2*size)%nat; intros. reflexivity. simpl euler. unfold compare31. change [|On|] with 0. generalize (phi_bounded j)(phi_bounded i); intros. case_eq [|j|]; intros. simpl; intros. generalize (Zabs_spec [|i|]); omega. simpl. rewrite IHn, H1; f_equal. rewrite spec_mod, H1; auto. rewrite H1; compute; auto. rewrite H1 in H; destruct H as [H _]; compute in H; elim H; auto. Qed. Lemma spec_gcd : forall a b, Zis_gcd [|a|] [|b|] [|gcd31 a b|]. Proof. intros. rewrite phi_gcd. apply Zis_gcd_sym. apply Zgcdn_is_gcd. unfold Zgcd_bound. generalize (phi_bounded b). destruct [|b|]. unfold size; auto with zarith. intros (_,H). cut (Pos.size_nat p <= size)%nat; [ omega | rewrite <- Zpower2_Psize; auto]. intros (H,_); compute in H; elim H; auto. Qed. Lemma iter_int31_iter_nat : forall A f i a, iter_int31 i A f a = iter_nat (Z.abs_nat [|i|]) A f a. Proof. intros. unfold iter_int31. rewrite <- recrbis_equiv; auto; unfold recrbis. rewrite <- phibis_aux_equiv. revert i a; induction size. simpl; auto. simpl; intros. case_eq (firstr i); intros H; rewrite 2 IHn; unfold phibis_aux; simpl; rewrite ?H; fold (phibis_aux n (shiftr i)); generalize (phibis_aux_pos n (shiftr i)); intros; set (z := phibis_aux n (shiftr i)) in *; clearbody z; rewrite <- nat_rect_plus. f_equal. rewrite Z.double_spec, <- Z.add_diag. symmetry; apply Zabs2Nat.inj_add; auto with zarith. change (iter_nat (S (Z.abs_nat z) + (Z.abs_nat z))%nat A f a = iter_nat (Z.abs_nat (Z.succ_double z)) A f a); f_equal. rewrite Z.succ_double_spec, <- Z.add_diag. rewrite Zabs2Nat.inj_add; auto with zarith. rewrite Zabs2Nat.inj_add; auto with zarith. change (Z.abs_nat 1) with 1%nat; omega. Qed. Fixpoint addmuldiv31_alt n i j := match n with | O => i | S n => addmuldiv31_alt n (sneakl (firstl j) i) (shiftl j) end. Lemma addmuldiv31_equiv : forall p x y, addmuldiv31 p x y = addmuldiv31_alt (Z.abs_nat [|p|]) x y. Proof. intros. unfold addmuldiv31. rewrite iter_int31_iter_nat. set (n:=Z.abs_nat [|p|]); clearbody n; clear p. revert x y; induction n. simpl; auto. intros. simpl addmuldiv31_alt. replace (S n) with (n+1)%nat by (rewrite plus_comm; auto). rewrite nat_rect_plus; simpl; auto. Qed. Lemma spec_add_mul_div : forall x y p, [|p|] <= Zpos 31 -> [| addmuldiv31 p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos 31) - [|p|]))) mod wB. Proof. intros. rewrite addmuldiv31_equiv. assert ([|p|] = Z.of_nat (Z.abs_nat [|p|])). rewrite Zabs2Nat.id_abs; symmetry; apply Z.abs_eq. destruct (phi_bounded p); auto. rewrite H0; rewrite H0 in H; clear H0; rewrite Zabs2Nat.id. set (n := Z.abs_nat [|p|]) in *; clearbody n. assert (n <= 31)%nat. rewrite Nat2Z.inj_le; auto with zarith. clear p H; revert x y. induction n. simpl Z.of_nat; intros. rewrite Z.mul_1_r. replace ([|y|] / 2^(31-0)) with 0. rewrite Z.add_0_r. symmetry; apply Zmod_small; apply phi_bounded. symmetry; apply Zdiv_small; apply phi_bounded. simpl addmuldiv31_alt; intros. rewrite IHn; [ | omega ]. case_eq (firstl y); intros. rewrite phi_twice, Z.double_spec. rewrite phi_twice_firstl; auto. change (Z.double [|y|]) with (2*[|y|]). rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. rewrite Zplus_mod; rewrite Zmult_mod_idemp_l; rewrite <- Zplus_mod. f_equal. f_equal. ring. replace (31-Z.of_nat n) with (Z.succ(31-Z.succ(Z.of_nat n))) by ring. rewrite Z.pow_succ_r, <- Zdiv_Zdiv; auto with zarith. rewrite Z.mul_comm, Z_div_mult; auto with zarith. rewrite phi_twice_plus_one, Z.succ_double_spec. rewrite phi_twice; auto. change (Z.double [|y|]) with (2*[|y|]). rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. rewrite Zplus_mod; rewrite Zmult_mod_idemp_l; rewrite <- Zplus_mod. rewrite Z.mul_add_distr_r, Z.mul_1_l, <- Z.add_assoc. f_equal. f_equal. ring. assert ((2*[|y|]) mod wB = 2*[|y|] - wB). clear - H. symmetry. apply Zmod_unique with 1; [ | ring ]. generalize (phi_lowerbound _ H) (phi_bounded y). set (wB' := 2^Z.of_nat (pred size)). replace wB with (2*wB'); [ omega | ]. unfold wB'. rewrite <- Z.pow_succ_r, <- Nat2Z.inj_succ by (auto with zarith). f_equal. rewrite H1. replace wB with (2^(Z.of_nat n)*2^(31-Z.of_nat n)) by (rewrite <- Zpower_exp; auto with zarith; f_equal; unfold size; ring). unfold Z.sub; rewrite <- Z.mul_opp_l. rewrite Z_div_plus; auto with zarith. ring_simplify. replace (31+-Z.of_nat n) with (Z.succ(31-Z.succ(Z.of_nat n))) by ring. rewrite Z.pow_succ_r, <- Zdiv_Zdiv; auto with zarith. rewrite Z.mul_comm, Z_div_mult; auto with zarith. Qed. Lemma spec_pos_mod : forall w p, [|ZnZ.pos_mod p w|] = [|w|] mod (2 ^ [|p|]). Proof. unfold int31_ops, ZnZ.pos_mod, compare31. change [|31|] with 31%Z. assert (forall w p, 31<=p -> [|w|] = [|w|] mod 2^p). intros. generalize (phi_bounded w). symmetry; apply Zmod_small. split; auto with zarith. apply Z.lt_le_trans with wB; auto with zarith. apply Zpower_le_monotone; auto with zarith. intros. case_eq ([|p|] ?= 31); intros; [ apply H; rewrite (Z.compare_eq _ _ H0); auto with zarith | | apply H; change ([|p|]>31)%Z in H0; auto with zarith ]. change ([|p|]<31) in H0. rewrite spec_add_mul_div by auto with zarith. change [|0|] with 0%Z; rewrite Z.mul_0_l, Z.add_0_l. generalize (phi_bounded p)(phi_bounded w); intros. assert (31-[|p|]<wB). apply Z.le_lt_trans with 31%Z; auto with zarith. compute; auto. assert ([|31-p|]=31-[|p|]). unfold sub31; rewrite phi_phi_inv. change [|31|] with 31%Z. apply Zmod_small; auto with zarith. rewrite spec_add_mul_div by (rewrite H4; auto with zarith). change [|0|] with 0%Z; rewrite Zdiv_0_l, Z.add_0_r. rewrite H4. apply shift_unshift_mod_2; auto with zarith. Qed. (** Shift operations *) Lemma spec_head00: forall x, [|x|] = 0 -> [|head031 x|] = Zpos 31. Proof. intros. generalize (phi_inv_phi x). rewrite H; simpl phi_inv. intros H'; rewrite <- H'. simpl; auto. Qed. Fixpoint head031_alt n x := match n with | O => 0%nat | S n => match firstl x with | D0 => S (head031_alt n (shiftl x)) | D1 => 0%nat end end. Lemma head031_equiv : forall x, [|head031 x|] = Z.of_nat (head031_alt size x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H). simpl; auto. unfold head031, recl. change On with (phi_inv (Z.of_nat (31-size))). replace (head031_alt size x) with (head031_alt size x + (31 - size))%nat by auto. assert (size <= 31)%nat by auto with arith. revert x H; induction size; intros. simpl; auto. unfold recl_aux; fold recl_aux. unfold head031_alt; fold head031_alt. rewrite H. assert ([|phi_inv (Z.of_nat (31-S n))|] = Z.of_nat (31 - S n)). rewrite phi_phi_inv. apply Zmod_small. split. change 0 with (Z.of_nat O); apply inj_le; omega. apply Z.le_lt_trans with (Z.of_nat 31). apply inj_le; omega. compute; auto. case_eq (firstl x); intros; auto. rewrite plus_Sn_m, plus_n_Sm. replace (S (31 - S n)) with (31 - n)%nat by omega. rewrite <- IHn; [ | omega | ]. f_equal; f_equal. unfold add31. rewrite H1. f_equal. change [|In|] with 1. replace (31-n)%nat with (S (31 - S n))%nat by omega. rewrite Nat2Z.inj_succ; ring. clear - H H2. rewrite (sneakr_shiftl x) in H. rewrite H2 in H. case_eq (iszero (shiftl x)); intros; auto. rewrite (iszero_eq0 _ H0) in H; discriminate. Qed. Lemma phi_nz : forall x, 0 < [|x|] <-> x <> 0%int31. Proof. split; intros. red; intro; subst x; discriminate. assert ([|x|]<>0%Z). contradict H. rewrite <- (phi_inv_phi x); rewrite H; auto. generalize (phi_bounded x); auto with zarith. Qed. Lemma spec_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ ([|head031 x|]) * [|x|] < wB. Proof. intros. rewrite head031_equiv. assert (nshiftl x size = 0%int31). apply nshiftl_size. revert x H H0. unfold size at 2 5. induction size. simpl Z.of_nat. intros. compute in H0; rewrite H0 in H; discriminate. intros. simpl head031_alt. case_eq (firstl x); intros. rewrite (Nat2Z.inj_succ (head031_alt n (shiftl x))), Z.pow_succ_r; auto with zarith. rewrite <- Z.mul_assoc, Z.mul_comm, <- Z.mul_assoc, <-(Z.mul_comm 2). rewrite <- Z.double_spec, <- (phi_twice_firstl _ H1). apply IHn. rewrite phi_nz; rewrite phi_nz in H; contradict H. change twice with shiftl in H. rewrite (sneakr_shiftl x), H1, H; auto. rewrite <- nshiftl_S_tail; auto. change (2^(Z.of_nat 0)) with 1; rewrite Z.mul_1_l. generalize (phi_bounded x); unfold size; split; auto with zarith. change (2^(Z.of_nat 31)/2) with (2^(Z.of_nat (pred size))). apply phi_lowerbound; auto. Qed. Lemma spec_tail00: forall x, [|x|] = 0 -> [|tail031 x|] = Zpos 31. Proof. intros. generalize (phi_inv_phi x). rewrite H; simpl phi_inv. intros H'; rewrite <- H'. simpl; auto. Qed. Fixpoint tail031_alt n x := match n with | O => 0%nat | S n => match firstr x with | D0 => S (tail031_alt n (shiftr x)) | D1 => 0%nat end end. Lemma tail031_equiv : forall x, [|tail031 x|] = Z.of_nat (tail031_alt size x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H). simpl; auto. unfold tail031, recr. change On with (phi_inv (Z.of_nat (31-size))). replace (tail031_alt size x) with (tail031_alt size x + (31 - size))%nat by auto. assert (size <= 31)%nat by auto with arith. revert x H; induction size; intros. simpl; auto. unfold recr_aux; fold recr_aux. unfold tail031_alt; fold tail031_alt. rewrite H. assert ([|phi_inv (Z.of_nat (31-S n))|] = Z.of_nat (31 - S n)). rewrite phi_phi_inv. apply Zmod_small. split. change 0 with (Z.of_nat O); apply inj_le; omega. apply Z.le_lt_trans with (Z.of_nat 31). apply inj_le; omega. compute; auto. case_eq (firstr x); intros; auto. rewrite plus_Sn_m, plus_n_Sm. replace (S (31 - S n)) with (31 - n)%nat by omega. rewrite <- IHn; [ | omega | ]. f_equal; f_equal. unfold add31. rewrite H1. f_equal. change [|In|] with 1. replace (31-n)%nat with (S (31 - S n))%nat by omega. rewrite Nat2Z.inj_succ; ring. clear - H H2. rewrite (sneakl_shiftr x) in H. rewrite H2 in H. case_eq (iszero (shiftr x)); intros; auto. rewrite (iszero_eq0 _ H0) in H; discriminate. Qed. Lemma spec_tail0 : forall x, 0 < [|x|] -> exists y, 0 <= y /\ [|x|] = (2 * y + 1) * (2 ^ [|tail031 x|]). Proof. intros. rewrite tail031_equiv. assert (nshiftr x size = 0%int31). apply nshiftr_size. revert x H H0. induction size. simpl Z.of_nat. intros. compute in H0; rewrite H0 in H; discriminate. intros. simpl tail031_alt. case_eq (firstr x); intros. rewrite (Nat2Z.inj_succ (tail031_alt n (shiftr x))), Z.pow_succ_r; auto with zarith. destruct (IHn (shiftr x)) as (y & Hy1 & Hy2). rewrite phi_nz; rewrite phi_nz in H; contradict H. rewrite (sneakl_shiftr x), H1, H; auto. rewrite <- nshiftr_S_tail; auto. exists y; split; auto. rewrite phi_eqn1; auto. rewrite Z.double_spec, Hy2; ring. exists [|shiftr x|]. split. generalize (phi_bounded (shiftr x)); auto with zarith. rewrite phi_eqn2; auto. rewrite Z.succ_double_spec; simpl; ring. Qed. (* Sqrt *) (* Direct transcription of an old proof of a fortran program in boyer-moore *) Lemma quotient_by_2 a: a - 1 <= (a/2) + (a/2). Proof. case (Z_mod_lt a 2); auto with zarith. intros H1; rewrite Zmod_eq_full; auto with zarith. Qed. Lemma sqrt_main_trick j k: 0 <= j -> 0 <= k -> (j * k) + j <= ((j + k)/2 + 1) ^ 2. Proof. intros Hj; generalize Hj k; pattern j; apply natlike_ind; auto; clear k j Hj. intros _ k Hk; repeat rewrite Z.add_0_l. apply Z.mul_nonneg_nonneg; generalize (Z_div_pos k 2); auto with zarith. intros j Hj Hrec _ k Hk; pattern k; apply natlike_ind; auto; clear k Hk. rewrite Z.mul_0_r, Z.add_0_r, Z.add_0_l. generalize (sqr_pos (Z.succ j / 2)) (quotient_by_2 (Z.succ j)); unfold Z.succ. rewrite Z.pow_2_r, Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. auto with zarith. intros k Hk _. replace ((Z.succ j + Z.succ k) / 2) with ((j + k)/2 + 1). generalize (Hrec Hj k Hk) (quotient_by_2 (j + k)). unfold Z.succ; repeat rewrite Z.pow_2_r; repeat rewrite Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. repeat rewrite Z.mul_1_l; repeat rewrite Z.mul_1_r. auto with zarith. rewrite Z.add_comm, <- Z_div_plus_full_l; auto with zarith. apply f_equal2 with (f := Z.div); auto with zarith. Qed. Lemma sqrt_main i j: 0 <= i -> 0 < j -> i < ((j + (i/j))/2 + 1) ^ 2. Proof. intros Hi Hj. assert (Hij: 0 <= i/j) by (apply Z_div_pos; auto with zarith). apply Z.lt_le_trans with (2 := sqrt_main_trick _ _ (Z.lt_le_incl _ _ Hj) Hij). pattern i at 1; rewrite (Z_div_mod_eq i j); case (Z_mod_lt i j); auto with zarith. Qed. Lemma sqrt_init i: 1 < i -> i < (i/2 + 1) ^ 2. Proof. intros Hi. assert (H1: 0 <= i - 2) by auto with zarith. assert (H2: 1 <= (i / 2) ^ 2); auto with zarith. replace i with (1* 2 + (i - 2)); auto with zarith. rewrite Z.pow_2_r, Z_div_plus_full_l; auto with zarith. generalize (sqr_pos ((i - 2)/ 2)) (Z_div_pos (i - 2) 2). rewrite Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. auto with zarith. generalize (quotient_by_2 i). rewrite Z.pow_2_r in H2 |- *; repeat (rewrite Z.mul_add_distr_r || rewrite Z.mul_add_distr_l || rewrite Z.mul_1_l || rewrite Z.mul_1_r). auto with zarith. Qed. Lemma sqrt_test_true i j: 0 <= i -> 0 < j -> i/j >= j -> j ^ 2 <= i. Proof. intros Hi Hj Hd; rewrite Z.pow_2_r. apply Z.le_trans with (j * (i/j)); auto with zarith. apply Z_mult_div_ge; auto with zarith. Qed. Lemma sqrt_test_false i j: 0 <= i -> 0 < j -> i/j < j -> (j + (i/j))/2 < j. Proof. intros Hi Hj H; case (Z.le_gt_cases j ((j + (i/j))/2)); auto. intros H1; contradict H; apply Z.le_ngt. assert (2 * j <= j + (i/j)); auto with zarith. apply Z.le_trans with (2 * ((j + (i/j))/2)); auto with zarith. apply Z_mult_div_ge; auto with zarith. Qed. Lemma sqrt31_step_def rec i j: sqrt31_step rec i j = match (fst (i/j) ?= j)%int31 with Lt => rec i (fst ((j + fst(i/j))/2))%int31 | _ => j end. Proof. unfold sqrt31_step; case div31; intros. simpl; case compare31; auto. Qed. Lemma div31_phi i j: 0 < [|j|] -> [|fst (i/j)%int31|] = [|i|]/[|j|]. intros Hj; generalize (spec_div i j Hj). case div31; intros q r; simpl @fst. intros (H1,H2); apply Zdiv_unique with [|r|]; auto with zarith. rewrite H1; ring. Qed. Lemma sqrt31_step_correct rec i j: 0 < [|i|] -> 0 < [|j|] -> [|i|] < ([|j|] + 1) ^ 2 -> 2 * [|j|] < wB -> (forall j1 : int31, 0 < [|j1|] < [|j|] -> [|i|] < ([|j1|] + 1) ^ 2 -> [|rec i j1|] ^ 2 <= [|i|] < ([|rec i j1|] + 1) ^ 2) -> [|sqrt31_step rec i j|] ^ 2 <= [|i|] < ([|sqrt31_step rec i j|] + 1) ^ 2. Proof. assert (Hp2: 0 < [|2|]) by exact (eq_refl Lt). intros Hi Hj Hij H31 Hrec; rewrite sqrt31_step_def. rewrite spec_compare, div31_phi; auto. case Z.compare_spec; auto; intros Hc; try (split; auto; apply sqrt_test_true; auto with zarith; fail). apply Hrec; repeat rewrite div31_phi; auto with zarith. replace [|(j + fst (i / j)%int31)|] with ([|j|] + [|i|] / [|j|]). split. apply Z.le_succ_l in Hj. change (1 <= [|j|]) in Hj. Z.le_elim Hj. replace ([|j|] + [|i|]/[|j|]) with (1 * 2 + (([|j|] - 2) + [|i|] / [|j|])); try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= [|i|]/ [|j|]) by (apply Z_div_pos; auto with zarith). assert (0 <= ([|j|] - 2 + [|i|] / [|j|]) / [|2|]) ; auto with zarith. rewrite <- Hj, Zdiv_1_r. replace (1 + [|i|])%Z with (1 * 2 + ([|i|] - 1))%Z; try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= ([|i|] - 1) /2)%Z by (apply Z_div_pos; auto with zarith). change ([|2|]) with 2%Z; auto with zarith. apply sqrt_test_false; auto with zarith. rewrite spec_add, div31_phi; auto. symmetry; apply Zmod_small. split; auto with zarith. replace [|j + fst (i / j)%int31|] with ([|j|] + [|i|] / [|j|]). apply sqrt_main; auto with zarith. rewrite spec_add, div31_phi; auto. symmetry; apply Zmod_small. split; auto with zarith. Qed. Lemma iter31_sqrt_correct n rec i j: 0 < [|i|] -> 0 < [|j|] -> [|i|] < ([|j|] + 1) ^ 2 -> 2 * [|j|] < 2 ^ (Z.of_nat size) -> (forall j1, 0 < [|j1|] -> 2^(Z.of_nat n) + [|j1|] <= [|j|] -> [|i|] < ([|j1|] + 1) ^ 2 -> 2 * [|j1|] < 2 ^ (Z.of_nat size) -> [|rec i j1|] ^ 2 <= [|i|] < ([|rec i j1|] + 1) ^ 2) -> [|iter31_sqrt n rec i j|] ^ 2 <= [|i|] < ([|iter31_sqrt n rec i j|] + 1) ^ 2. Proof. revert rec i j; elim n; unfold iter31_sqrt; fold iter31_sqrt; clear n. intros rec i j Hi Hj Hij H31 Hrec; apply sqrt31_step_correct; auto with zarith. intros; apply Hrec; auto with zarith. rewrite Z.pow_0_r; auto with zarith. intros n Hrec rec i j Hi Hj Hij H31 HHrec. apply sqrt31_step_correct; auto. intros j1 Hj1 Hjp1; apply Hrec; auto with zarith. intros j2 Hj2 H2j2 Hjp2 Hj31; apply Hrec; auto with zarith. intros j3 Hj3 Hpj3. apply HHrec; auto. rewrite Nat2Z.inj_succ, Z.pow_succ_r. apply Z.le_trans with (2 ^Z.of_nat n + [|j2|]); auto with zarith. apply Nat2Z.is_nonneg. Qed. Lemma spec_sqrt : forall x, [|sqrt31 x|] ^ 2 <= [|x|] < ([|sqrt31 x|] + 1) ^ 2. Proof. intros i; unfold sqrt31. rewrite spec_compare. case Z.compare_spec; change [|1|] with 1; intros Hi; auto with zarith. repeat rewrite Z.pow_2_r; auto with zarith. apply iter31_sqrt_correct; auto with zarith. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. replace ([|i|]) with (1 * 2 + ([|i|] - 2))%Z; try ring. assert (0 <= ([|i|] - 2)/2)%Z by (apply Z_div_pos; auto with zarith). rewrite Z_div_plus_full_l; auto with zarith. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply sqrt_init; auto. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply Z.le_lt_trans with ([|i|]). apply Z_mult_div_ge; auto with zarith. case (phi_bounded i); auto. intros j2 H1 H2; contradict H2; apply Z.lt_nge. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply Z.le_lt_trans with ([|i|]); auto with zarith. assert (0 <= [|i|]/2)%Z by (apply Z_div_pos; auto with zarith). apply Z.le_trans with (2 * ([|i|]/2)); auto with zarith. apply Z_mult_div_ge; auto with zarith. case (phi_bounded i); unfold size; auto with zarith. change [|0|] with 0; auto with zarith. case (phi_bounded i); repeat rewrite Z.pow_2_r; auto with zarith. Qed. Lemma sqrt312_step_def rec ih il j: sqrt312_step rec ih il j = match (ih ?= j)%int31 with Eq => j | Gt => j | _ => match (fst (div3121 ih il j) ?= j)%int31 with Lt => let m := match j +c fst (div3121 ih il j) with C0 m1 => fst (m1/2)%int31 | C1 m1 => (fst (m1/2) + v30)%int31 end in rec ih il m | _ => j end end. Proof. unfold sqrt312_step; case div3121; intros. simpl; case compare31; auto. Qed. Lemma sqrt312_lower_bound ih il j: phi2 ih il < ([|j|] + 1) ^ 2 -> [|ih|] <= [|j|]. Proof. intros H1. case (phi_bounded j); intros Hbj _. case (phi_bounded il); intros Hbil _. case (phi_bounded ih); intros Hbih Hbih1. assert (([|ih|] < [|j|] + 1)%Z); auto with zarith. apply Z.square_lt_simpl_nonneg; auto with zarith. repeat rewrite <-Z.pow_2_r; apply Z.le_lt_trans with (2 := H1). apply Z.le_trans with ([|ih|] * base)%Z; unfold phi2, base; try rewrite Z.pow_2_r; auto with zarith. Qed. Lemma div312_phi ih il j: (2^30 <= [|j|] -> [|ih|] < [|j|] -> [|fst (div3121 ih il j)|] = phi2 ih il/[|j|])%Z. Proof. intros Hj Hj1. generalize (spec_div21 ih il j Hj Hj1). case div3121; intros q r (Hq, Hr). apply Zdiv_unique with (phi r); auto with zarith. simpl @fst; apply eq_trans with (1 := Hq); ring. Qed. Lemma sqrt312_step_correct rec ih il j: 2 ^ 29 <= [|ih|] -> 0 < [|j|] -> phi2 ih il < ([|j|] + 1) ^ 2 -> (forall j1, 0 < [|j1|] < [|j|] -> phi2 ih il < ([|j1|] + 1) ^ 2 -> [|rec ih il j1|] ^ 2 <= phi2 ih il < ([|rec ih il j1|] + 1) ^ 2) -> [|sqrt312_step rec ih il j|] ^ 2 <= phi2 ih il < ([|sqrt312_step rec ih il j|] + 1) ^ 2. Proof. assert (Hp2: (0 < [|2|])%Z) by exact (eq_refl Lt). intros Hih Hj Hij Hrec; rewrite sqrt312_step_def. assert (H1: ([|ih|] <= [|j|])%Z) by (apply sqrt312_lower_bound with il; auto). case (phi_bounded ih); intros Hih1 _. case (phi_bounded il); intros Hil1 _. case (phi_bounded j); intros _ Hj1. assert (Hp3: (0 < phi2 ih il)). unfold phi2; apply Z.lt_le_trans with ([|ih|] * base)%Z; auto with zarith. apply Z.mul_pos_pos; auto with zarith. apply Z.lt_le_trans with (2:= Hih); auto with zarith. rewrite spec_compare. case Z.compare_spec; intros Hc1. split; auto. apply sqrt_test_true; auto. unfold phi2, base; auto with zarith. unfold phi2; rewrite Hc1. assert (0 <= [|il|]/[|j|]) by (apply Z_div_pos; auto with zarith). rewrite Z.mul_comm, Z_div_plus_full_l; unfold base; auto with zarith. simpl wB in Hj1. unfold Z.pow_pos in Hj1. simpl in Hj1. auto with zarith. case (Z.le_gt_cases (2 ^ 30) [|j|]); intros Hjj. rewrite spec_compare; case Z.compare_spec; rewrite div312_phi; auto; intros Hc; try (split; auto; apply sqrt_test_true; auto with zarith; fail). apply Hrec. assert (Hf1: 0 <= phi2 ih il/ [|j|]) by (apply Z_div_pos; auto with zarith). apply Z.le_succ_l in Hj. change (1 <= [|j|]) in Hj. Z.le_elim Hj. 2: contradict Hc; apply Z.le_ngt; rewrite <- Hj, Zdiv_1_r; auto with zarith. assert (Hf3: 0 < ([|j|] + phi2 ih il / [|j|]) / 2). replace ([|j|] + phi2 ih il/ [|j|])%Z with (1 * 2 + (([|j|] - 2) + phi2 ih il / [|j|])); try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= ([|j|] - 2 + phi2 ih il / [|j|]) / 2) ; auto with zarith. assert (Hf4: ([|j|] + phi2 ih il / [|j|]) / 2 < [|j|]). apply sqrt_test_false; auto with zarith. generalize (spec_add_c j (fst (div3121 ih il j))). unfold interp_carry; case add31c; intros r; rewrite div312_phi; auto with zarith. rewrite div31_phi; change [|2|] with 2%Z; auto with zarith. intros HH; rewrite HH; clear HH; auto with zarith. rewrite spec_add, div31_phi; change [|2|] with 2%Z; auto. rewrite Z.mul_1_l; intros HH. rewrite Z.add_comm, <- Z_div_plus_full_l; auto with zarith. change (phi v30 * 2) with (2 ^ Z.of_nat size). rewrite HH, Zmod_small; auto with zarith. replace (phi match j +c fst (div3121 ih il j) with | C0 m1 => fst (m1 / 2)%int31 | C1 m1 => fst (m1 / 2)%int31 + v30 end) with ((([|j|] + (phi2 ih il)/([|j|]))/2)). apply sqrt_main; auto with zarith. generalize (spec_add_c j (fst (div3121 ih il j))). unfold interp_carry; case add31c; intros r; rewrite div312_phi; auto with zarith. rewrite div31_phi; auto with zarith. intros HH; rewrite HH; auto with zarith. intros HH; rewrite <- HH. change (1 * 2 ^ Z.of_nat size) with (phi (v30) * 2). rewrite Z_div_plus_full_l; auto with zarith. rewrite Z.add_comm. rewrite spec_add, Zmod_small. rewrite div31_phi; auto. split; auto with zarith. case (phi_bounded (fst (r/2)%int31)); case (phi_bounded v30); auto with zarith. rewrite div31_phi; change (phi 2) with 2%Z; auto. change (2 ^Z.of_nat size) with (base/2 + phi v30). assert (phi r / 2 < base/2); auto with zarith. apply Z.mul_lt_mono_pos_r with 2; auto with zarith. change (base/2 * 2) with base. apply Z.le_lt_trans with (phi r). rewrite Z.mul_comm; apply Z_mult_div_ge; auto with zarith. case (phi_bounded r); auto with zarith. contradict Hij; apply Z.le_ngt. assert ((1 + [|j|]) <= 2 ^ 30); auto with zarith. apply Z.le_trans with ((2 ^ 30) * (2 ^ 30)); auto with zarith. assert (0 <= 1 + [|j|]); auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. change ((2 ^ 30) * (2 ^ 30)) with ((2 ^ 29) * base). apply Z.le_trans with ([|ih|] * base); auto with zarith. unfold phi2, base; auto with zarith. split; auto. apply sqrt_test_true; auto. unfold phi2, base; auto with zarith. apply Z.le_ge; apply Z.le_trans with (([|j|] * base)/[|j|]). rewrite Z.mul_comm, Z_div_mult; auto with zarith. apply Z.ge_le; apply Z_div_ge; auto with zarith. Qed. Lemma iter312_sqrt_correct n rec ih il j: 2^29 <= [|ih|] -> 0 < [|j|] -> phi2 ih il < ([|j|] + 1) ^ 2 -> (forall j1, 0 < [|j1|] -> 2^(Z.of_nat n) + [|j1|] <= [|j|] -> phi2 ih il < ([|j1|] + 1) ^ 2 -> [|rec ih il j1|] ^ 2 <= phi2 ih il < ([|rec ih il j1|] + 1) ^ 2) -> [|iter312_sqrt n rec ih il j|] ^ 2 <= phi2 ih il < ([|iter312_sqrt n rec ih il j|] + 1) ^ 2. Proof. revert rec ih il j; elim n; unfold iter312_sqrt; fold iter312_sqrt; clear n. intros rec ih il j Hi Hj Hij Hrec; apply sqrt312_step_correct; auto with zarith. intros; apply Hrec; auto with zarith. rewrite Z.pow_0_r; auto with zarith. intros n Hrec rec ih il j Hi Hj Hij HHrec. apply sqrt312_step_correct; auto. intros j1 Hj1 Hjp1; apply Hrec; auto with zarith. intros j2 Hj2 H2j2 Hjp2; apply Hrec; auto with zarith. intros j3 Hj3 Hpj3. apply HHrec; auto. rewrite Nat2Z.inj_succ, Z.pow_succ_r. apply Z.le_trans with (2 ^Z.of_nat n + [|j2|])%Z; auto with zarith. apply Nat2Z.is_nonneg. Qed. (* Avoid expanding [iter312_sqrt] before variables in the context. *) Strategy 1 [iter312_sqrt]. Lemma spec_sqrt2 : forall x y, wB/ 4 <= [|x|] -> let (s,r) := sqrt312 x y in [||WW x y||] = [|s|] ^ 2 + [+|r|] /\ [+|r|] <= 2 * [|s|]. Proof. intros ih il Hih; unfold sqrt312. change [||WW ih il||] with (phi2 ih il). assert (Hbin: forall s, s * s + 2* s + 1 = (s + 1) ^ 2) by (intros s; ring). assert (Hb: 0 <= base) by (red; intros HH; discriminate). assert (Hi2: phi2 ih il < (phi Tn + 1) ^ 2). { change ((phi Tn + 1) ^ 2) with (2^62). apply Z.le_lt_trans with ((2^31 -1) * base + (2^31 - 1)); auto with zarith. 2: simpl; unfold Z.pow_pos; simpl; auto with zarith. case (phi_bounded ih); case (phi_bounded il); intros H1 H2 H3 H4. unfold base, Z.pow, Z.pow_pos in H2,H4; simpl in H2,H4. unfold phi2. cbn [Z.pow Z.pow_pos Pos.iter]. auto with zarith. } case (iter312_sqrt_correct 31 (fun _ _ j => j) ih il Tn); auto with zarith. change [|Tn|] with 2147483647; auto with zarith. intros j1 _ HH; contradict HH. apply Z.lt_nge. change [|Tn|] with 2147483647; auto with zarith. change (2 ^ Z.of_nat 31) with 2147483648; auto with zarith. case (phi_bounded j1); auto with zarith. set (s := iter312_sqrt 31 (fun _ _ j : int31 => j) ih il Tn). intros Hs1 Hs2. generalize (spec_mul_c s s); case mul31c. simpl zn2z_to_Z; intros HH. assert ([|s|] = 0). { symmetry in HH. rewrite Z.mul_eq_0 in HH. destruct HH; auto. } contradict Hs2; apply Z.le_ngt; rewrite H. change ((0 + 1) ^ 2) with 1. apply Z.le_trans with (2 ^ Z.of_nat size / 4 * base). simpl; auto with zarith. apply Z.le_trans with ([|ih|] * base); auto with zarith. unfold phi2; case (phi_bounded il); auto with zarith. intros ih1 il1. change [||WW ih1 il1||] with (phi2 ih1 il1). intros Hihl1. generalize (spec_sub_c il il1). case sub31c; intros il2 Hil2. rewrite spec_compare; case Z.compare_spec. unfold interp_carry in *. intros H1; split. rewrite Z.pow_2_r, <- Hihl1. unfold phi2; ring[Hil2 H1]. replace [|il2|] with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2; rewrite H1, Hil2; ring. unfold interp_carry. intros H1; contradict Hs1. apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2. case (phi_bounded il); intros _ H2. apply Z.lt_le_trans with (([|ih|] + 1) * base + 0). rewrite Z.mul_add_distr_r, Z.add_0_r; auto with zarith. case (phi_bounded il1); intros H3 _. apply Z.add_le_mono; auto with zarith. unfold interp_carry in *; change (1 * 2 ^ Z.of_nat size) with base. rewrite Z.pow_2_r, <- Hihl1, Hil2. intros H1. rewrite <- Z.le_succ_l, <- Z.add_1_r in H1. Z.le_elim H1. contradict Hs2; apply Z.le_ngt. replace (([|s|] + 1) ^ 2) with (phi2 ih1 il1 + 2 * [|s|] + 1). unfold phi2. case (phi_bounded il); intros Hpil _. assert (Hl1l: [|il1|] <= [|il|]). { case (phi_bounded il2); rewrite Hil2; auto with zarith. } assert ([|ih1|] * base + 2 * [|s|] + 1 <= [|ih|] * base); auto with zarith. case (phi_bounded s); change (2 ^ Z.of_nat size) with base; intros _ Hps. case (phi_bounded ih1); intros Hpih1 _; auto with zarith. apply Z.le_trans with (([|ih1|] + 2) * base); auto with zarith. rewrite Z.mul_add_distr_r. assert (2 * [|s|] + 1 <= 2 * base); auto with zarith. rewrite Hihl1, Hbin; auto. split. unfold phi2; rewrite <- H1; ring. replace (base + ([|il|] - [|il1|])) with (phi2 ih il - ([|s|] * [|s|])). rewrite <-Hbin in Hs2; auto with zarith. rewrite <- Hihl1; unfold phi2; rewrite <- H1; ring. unfold interp_carry in Hil2 |- *. unfold interp_carry; change (1 * 2 ^ Z.of_nat size) with base. assert (Hsih: [|ih - 1|] = [|ih|] - 1). { rewrite spec_sub, Zmod_small; auto; change [|1|] with 1. case (phi_bounded ih); intros H1 H2. generalize Hih; change (2 ^ Z.of_nat size / 4) with 536870912. split; auto with zarith. } rewrite spec_compare; case Z.compare_spec. rewrite Hsih. intros H1; split. rewrite Z.pow_2_r, <- Hihl1. unfold phi2; rewrite <-H1. transitivity ([|ih|] * base + [|il1|] + ([|il|] - [|il1|])). ring. rewrite <-Hil2. change (2 ^ Z.of_nat size) with base; ring. replace [|il2|] with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2. rewrite <-H1. ring_simplify. transitivity (base + ([|il|] - [|il1|])). ring. rewrite <-Hil2. change (2 ^ Z.of_nat size) with base; ring. rewrite Hsih; intros H1. assert (He: [|ih|] = [|ih1|]). { apply Z.le_antisymm; auto with zarith. case (Z.le_gt_cases [|ih1|] [|ih|]); auto; intros H2. contradict Hs1; apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2. case (phi_bounded il); change (2 ^ Z.of_nat size) with base; intros _ Hpil1. apply Z.lt_le_trans with (([|ih|] + 1) * base). rewrite Z.mul_add_distr_r, Z.mul_1_l; auto with zarith. case (phi_bounded il1); intros Hpil2 _. apply Z.le_trans with (([|ih1|]) * base); auto with zarith. } rewrite Z.pow_2_r, <-Hihl1; unfold phi2; rewrite <-He. contradict Hs1; apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2; rewrite He. assert (phi il - phi il1 < 0); auto with zarith. rewrite <-Hil2. case (phi_bounded il2); auto with zarith. intros H1. rewrite Z.pow_2_r, <-Hihl1. assert (H2 : [|ih1|]+2 <= [|ih|]); auto with zarith. Z.le_elim H2. contradict Hs2; apply Z.le_ngt. replace (([|s|] + 1) ^ 2) with (phi2 ih1 il1 + 2 * [|s|] + 1). unfold phi2. assert ([|ih1|] * base + 2 * phi s + 1 <= [|ih|] * base + ([|il|] - [|il1|])); auto with zarith. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base). case (phi_bounded il2); intros Hpil2 _. apply Z.le_trans with ([|ih|] * base + - base); auto with zarith. case (phi_bounded s); change (2 ^ Z.of_nat size) with base; intros _ Hps. assert (2 * [|s|] + 1 <= 2 * base); auto with zarith. apply Z.le_trans with ([|ih1|] * base + 2 * base); auto with zarith. assert (Hi: ([|ih1|] + 3) * base <= [|ih|] * base); auto with zarith. rewrite Z.mul_add_distr_r in Hi; auto with zarith. rewrite Hihl1, Hbin; auto. unfold phi2; rewrite <-H2. split. replace [|il|] with (([|il|] - [|il1|]) + [|il1|]); try ring. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base); ring. replace (base + [|il2|]) with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2; rewrite <-H2. replace [|il|] with (([|il|] - [|il1|]) + [|il1|]); try ring. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base); ring. Qed. (** [iszero] *) Lemma spec_eq0 : forall x, ZnZ.eq0 x = true -> [|x|] = 0. Proof. clear; unfold ZnZ.eq0, int31_ops. unfold compare31; intros. change [|0|] with 0 in H. apply Z.compare_eq. now destruct ([|x|] ?= 0). Qed. (* Even *) Lemma spec_is_even : forall x, if ZnZ.is_even x then [|x|] mod 2 = 0 else [|x|] mod 2 = 1. Proof. unfold ZnZ.is_even, int31_ops; intros. generalize (spec_div x 2). destruct (x/2)%int31 as (q,r); intros. unfold compare31. change [|2|] with 2 in H. change [|0|] with 0. destruct H; auto with zarith. replace ([|x|] mod 2) with [|r|]. destruct H; auto with zarith. case Z.compare_spec; auto with zarith. apply Zmod_unique with [|q|]; auto with zarith. Qed. (* Bitwise *) Lemma log2_phi_bounded x : Z.log2 [|x|] < Z.of_nat size. Proof. destruct (phi_bounded x) as (H,H'). Z.le_elim H. - now apply Z.log2_lt_pow2. - now rewrite <- H. Qed. Lemma spec_lor x y : [| ZnZ.lor x y |] = Z.lor [|x|] [|y|]. Proof. unfold ZnZ.lor,int31_ops. unfold lor31. rewrite phi_phi_inv. apply Z.mod_small; split; trivial. - apply Z.lor_nonneg; split; apply phi_bounded. - apply Z.log2_lt_cancel. rewrite Z.log2_pow2 by easy. rewrite Z.log2_lor; try apply phi_bounded. apply Z.max_lub_lt; apply log2_phi_bounded. Qed. Lemma spec_land x y : [| ZnZ.land x y |] = Z.land [|x|] [|y|]. Proof. unfold ZnZ.land, int31_ops. unfold land31. rewrite phi_phi_inv. apply Z.mod_small; split; trivial. - apply Z.land_nonneg; left; apply phi_bounded. - apply Z.log2_lt_cancel. rewrite Z.log2_pow2 by easy. eapply Z.le_lt_trans. apply Z.log2_land; try apply phi_bounded. apply Z.min_lt_iff; left; apply log2_phi_bounded. Qed. Lemma spec_lxor x y : [| ZnZ.lxor x y |] = Z.lxor [|x|] [|y|]. Proof. unfold ZnZ.lxor, int31_ops. unfold lxor31. rewrite phi_phi_inv. apply Z.mod_small; split; trivial. - apply Z.lxor_nonneg; split; intros; apply phi_bounded. - apply Z.log2_lt_cancel. rewrite Z.log2_pow2 by easy. eapply Z.le_lt_trans. apply Z.log2_lxor; try apply phi_bounded. apply Z.max_lub_lt; apply log2_phi_bounded. Qed. Global Instance int31_specs : ZnZ.Specs int31_ops := { spec_to_Z := phi_bounded; spec_of_pos := positive_to_int31_spec; spec_zdigits := spec_zdigits; spec_more_than_1_digit := spec_more_than_1_digit; spec_0 := spec_0; spec_1 := spec_1; spec_m1 := spec_m1; spec_compare := spec_compare; spec_eq0 := spec_eq0; spec_opp_c := spec_opp_c; spec_opp := spec_opp; spec_opp_carry := spec_opp_carry; spec_succ_c := spec_succ_c; spec_add_c := spec_add_c; spec_add_carry_c := spec_add_carry_c; spec_succ := spec_succ; spec_add := spec_add; spec_add_carry := spec_add_carry; spec_pred_c := spec_pred_c; spec_sub_c := spec_sub_c; spec_sub_carry_c := spec_sub_carry_c; spec_pred := spec_pred; spec_sub := spec_sub; spec_sub_carry := spec_sub_carry; spec_mul_c := spec_mul_c; spec_mul := spec_mul; spec_square_c := spec_square_c; spec_div21 := spec_div21; spec_div_gt := fun a b _ => spec_div a b; spec_div := spec_div; spec_modulo_gt := fun a b _ => spec_mod a b; spec_modulo := spec_mod; spec_gcd_gt := fun a b _ => spec_gcd a b; spec_gcd := spec_gcd; spec_head00 := spec_head00; spec_head0 := spec_head0; spec_tail00 := spec_tail00; spec_tail0 := spec_tail0; spec_add_mul_div := spec_add_mul_div; spec_pos_mod := spec_pos_mod; spec_is_even := spec_is_even; spec_sqrt2 := spec_sqrt2; spec_sqrt := spec_sqrt; spec_lor := spec_lor; spec_land := spec_land; spec_lxor := spec_lxor }. End Int31_Specs. Module Int31Cyclic <: CyclicType. Definition t := int31. Definition ops := int31_ops. Definition specs := int31_specs. End Int31Cyclic.
(************************************************************************) (* v * The Coq Proof Assistant / The Coq Development Team *) (* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2015 *) (* \VV/ **************************************************************) (* // * This file is distributed under the terms of the *) (* * GNU Lesser General Public License Version 2.1 *) (************************************************************************) (** * Int31 numbers defines indeed a cyclic structure : Z/(2^31)Z *) (** Author: Arnaud Spiwack (+ Pierre Letouzey) *) Require Import List. Require Import Min. Require Export Int31. Require Import Znumtheory. Require Import Zgcd_alt. Require Import Zpow_facts. Require Import BigNumPrelude. Require Import CyclicAxioms. Require Import ROmega. Local Open Scope nat_scope. Local Open Scope int31_scope. Section Basics. (** * Basic results about [iszero], [shiftl], [shiftr] *) Lemma iszero_eq0 : forall x, iszero x = true -> x=0. Proof. destruct x; simpl; intros. repeat match goal with H:(if ?d then _ else _) = true |- _ => destruct d; try discriminate end. reflexivity. Qed. Lemma iszero_not_eq0 : forall x, iszero x = false -> x<>0. Proof. intros x H Eq; rewrite Eq in H; simpl in *; discriminate. Qed. Lemma sneakl_shiftr : forall x, x = sneakl (firstr x) (shiftr x). Proof. destruct x; simpl; auto. Qed. Lemma sneakr_shiftl : forall x, x = sneakr (firstl x) (shiftl x). Proof. destruct x; simpl; auto. Qed. Lemma twice_zero : forall x, twice x = 0 <-> twice_plus_one x = 1. Proof. destruct x; simpl in *; split; intro H; injection H; intros; subst; auto. Qed. Lemma twice_or_twice_plus_one : forall x, x = twice (shiftr x) \/ x = twice_plus_one (shiftr x). Proof. intros; case_eq (firstr x); intros. destruct x; simpl in *; rewrite H; auto. destruct x; simpl in *; rewrite H; auto. Qed. (** * Iterated shift to the right *) Definition nshiftr x := nat_rect _ x (fun _ => shiftr). Lemma nshiftr_S : forall n x, nshiftr x (S n) = shiftr (nshiftr x n). Proof. reflexivity. Qed. Lemma nshiftr_S_tail : forall n x, nshiftr x (S n) = nshiftr (shiftr x) n. Proof. intros n; elim n; simpl; auto. intros; now f_equal. Qed. Lemma nshiftr_n_0 : forall n, nshiftr 0 n = 0. Proof. induction n; simpl; auto. rewrite IHn; auto. Qed. Lemma nshiftr_size : forall x, nshiftr x size = 0. Proof. destruct x; simpl; auto. Qed. Lemma nshiftr_above_size : forall k x, size<=k -> nshiftr x k = 0. Proof. intros. replace k with ((k-size)+size)%nat by omega. induction (k-size)%nat; auto. rewrite nshiftr_size; auto. simpl; rewrite IHn; auto. Qed. (** * Iterated shift to the left *) Definition nshiftl x := nat_rect _ x (fun _ => shiftl). Lemma nshiftl_S : forall n x, nshiftl x (S n) = shiftl (nshiftl x n). Proof. reflexivity. Qed. Lemma nshiftl_S_tail : forall n x, nshiftl x (S n) = nshiftl (shiftl x) n. Proof. intros n; elim n; simpl; intros; now f_equal. Qed. Lemma nshiftl_n_0 : forall n, nshiftl 0 n = 0. Proof. induction n; simpl; auto. rewrite IHn; auto. Qed. Lemma nshiftl_size : forall x, nshiftl x size = 0. Proof. destruct x; simpl; auto. Qed. Lemma nshiftl_above_size : forall k x, size<=k -> nshiftl x k = 0. Proof. intros. replace k with ((k-size)+size)%nat by omega. induction (k-size)%nat; auto. rewrite nshiftl_size; auto. simpl; rewrite IHn; auto. Qed. Lemma firstr_firstl : forall x, firstr x = firstl (nshiftl x (pred size)). Proof. destruct x; simpl; auto. Qed. Lemma firstl_firstr : forall x, firstl x = firstr (nshiftr x (pred size)). Proof. destruct x; simpl; auto. Qed. (** More advanced results about [nshiftr] *) Lemma nshiftr_predsize_0_firstl : forall x, nshiftr x (pred size) = 0 -> firstl x = D0. Proof. destruct x; compute; intros H; injection H; intros; subst; auto. Qed. Lemma nshiftr_0_propagates : forall n p x, n <= p -> nshiftr x n = 0 -> nshiftr x p = 0. Proof. intros. replace p with ((p-n)+n)%nat by omega. induction (p-n)%nat. simpl; auto. simpl; rewrite IHn0; auto. Qed. Lemma nshiftr_0_firstl : forall n x, n < size -> nshiftr x n = 0 -> firstl x = D0. Proof. intros. apply nshiftr_predsize_0_firstl. apply nshiftr_0_propagates with n; auto; omega. Qed. (** * Some induction principles over [int31] *) (** Not used for the moment. Are they really useful ? *) Lemma int31_ind_sneakl : forall P : int31->Prop, P 0 -> (forall x d, P x -> P (sneakl d x)) -> forall x, P x. Proof. intros. assert (forall n, n<=size -> P (nshiftr x (size - n))). induction n; intros. rewrite nshiftr_size; auto. rewrite sneakl_shiftr. apply H0. change (P (nshiftr x (S (size - S n)))). replace (S (size - S n))%nat with (size - n)%nat by omega. apply IHn; omega. change x with (nshiftr x (size-size)); auto. Qed. Lemma int31_ind_twice : forall P : int31->Prop, P 0 -> (forall x, P x -> P (twice x)) -> (forall x, P x -> P (twice_plus_one x)) -> forall x, P x. Proof. induction x using int31_ind_sneakl; auto. destruct d; auto. Qed. (** * Some generic results about [recr] *) Section Recr. (** [recr] satisfies the fixpoint equation used for its definition. *) Variable (A:Type)(case0:A)(caserec:digits->int31->A->A). Lemma recr_aux_eqn : forall n x, iszero x = false -> recr_aux (S n) A case0 caserec x = caserec (firstr x) (shiftr x) (recr_aux n A case0 caserec (shiftr x)). Proof. intros; simpl; rewrite H; auto. Qed. Lemma recr_aux_converges : forall n p x, n <= size -> n <= p -> recr_aux n A case0 caserec (nshiftr x (size - n)) = recr_aux p A case0 caserec (nshiftr x (size - n)). Proof. induction n. simpl minus; intros. rewrite nshiftr_size; destruct p; simpl; auto. intros. destruct p. inversion H0. unfold recr_aux; fold recr_aux. destruct (iszero (nshiftr x (size - S n))); auto. f_equal. change (shiftr (nshiftr x (size - S n))) with (nshiftr x (S (size - S n))). replace (S (size - S n))%nat with (size - n)%nat by omega. apply IHn; auto with arith. Qed. Lemma recr_eqn : forall x, iszero x = false -> recr A case0 caserec x = caserec (firstr x) (shiftr x) (recr A case0 caserec (shiftr x)). Proof. intros. unfold recr. change x with (nshiftr x (size - size)). rewrite (recr_aux_converges size (S size)); auto with arith. rewrite recr_aux_eqn; auto. Qed. (** [recr] is usually equivalent to a variant [recrbis] written without [iszero] check. *) Fixpoint recrbis_aux (n:nat)(A:Type)(case0:A)(caserec:digits->int31->A->A) (i:int31) : A := match n with | O => case0 | S next => let si := shiftr i in caserec (firstr i) si (recrbis_aux next A case0 caserec si) end. Definition recrbis := recrbis_aux size. Hypothesis case0_caserec : caserec D0 0 case0 = case0. Lemma recrbis_aux_equiv : forall n x, recrbis_aux n A case0 caserec x = recr_aux n A case0 caserec x. Proof. induction n; simpl; auto; intros. case_eq (iszero x); intros; [ | f_equal; auto ]. rewrite (iszero_eq0 _ H); simpl; auto. replace (recrbis_aux n A case0 caserec 0) with case0; auto. clear H IHn; induction n; simpl; congruence. Qed. Lemma recrbis_equiv : forall x, recrbis A case0 caserec x = recr A case0 caserec x. Proof. intros; apply recrbis_aux_equiv; auto. Qed. End Recr. (** * Incrementation *) Section Incr. (** Variant of [incr] via [recrbis] *) Let Incr (b : digits) (si rec : int31) := match b with | D0 => sneakl D1 si | D1 => sneakl D0 rec end. Definition incrbis_aux n x := recrbis_aux n _ In Incr x. Lemma incrbis_aux_equiv : forall x, incrbis_aux size x = incr x. Proof. unfold incr, recr, incrbis_aux; fold Incr; intros. apply recrbis_aux_equiv; auto. Qed. (** Recursive equations satisfied by [incr] *) Lemma incr_eqn1 : forall x, firstr x = D0 -> incr x = twice_plus_one (shiftr x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0); simpl; auto. unfold incr; rewrite recr_eqn; fold incr; auto. rewrite H; auto. Qed. Lemma incr_eqn2 : forall x, firstr x = D1 -> incr x = twice (incr (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in H; simpl in H; discriminate. unfold incr; rewrite recr_eqn; fold incr; auto. rewrite H; auto. Qed. Lemma incr_twice : forall x, incr (twice x) = twice_plus_one x. Proof. intros. rewrite incr_eqn1; destruct x; simpl; auto. Qed. Lemma incr_twice_plus_one_firstl : forall x, firstl x = D0 -> incr (twice_plus_one x) = twice (incr x). Proof. intros. rewrite incr_eqn2; [ | destruct x; simpl; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. (** The previous result is actually true even without the constraint on [firstl], but this is harder to prove (see later). *) End Incr. (** * Conversion to [Z] : the [phi] function *) Section Phi. (** Variant of [phi] via [recrbis] *) Let Phi := fun b (_:int31) => match b with D0 => Z.double | D1 => Z.succ_double end. Definition phibis_aux n x := recrbis_aux n _ Z0 Phi x. Lemma phibis_aux_equiv : forall x, phibis_aux size x = phi x. Proof. unfold phi, recr, phibis_aux; fold Phi; intros. apply recrbis_aux_equiv; auto. Qed. (** Recursive equations satisfied by [phi] *) Lemma phi_eqn1 : forall x, firstr x = D0 -> phi x = Z.double (phi (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0); simpl; auto. intros; unfold phi; rewrite recr_eqn; fold phi; auto. rewrite H; auto. Qed. Lemma phi_eqn2 : forall x, firstr x = D1 -> phi x = Z.succ_double (phi (shiftr x)). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in H; simpl in H; discriminate. intros; unfold phi; rewrite recr_eqn; fold phi; auto. rewrite H; auto. Qed. Lemma phi_twice_firstl : forall x, firstl x = D0 -> phi (twice x) = Z.double (phi x). Proof. intros. rewrite phi_eqn1; auto; [ | destruct x; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. Lemma phi_twice_plus_one_firstl : forall x, firstl x = D0 -> phi (twice_plus_one x) = Z.succ_double (phi x). Proof. intros. rewrite phi_eqn2; auto; [ | destruct x; auto ]. f_equal; f_equal. destruct x; simpl in *; rewrite H; auto. Qed. End Phi. (** [phi x] is positive and lower than [2^31] *) Lemma phibis_aux_pos : forall n x, (0 <= phibis_aux n x)%Z. Proof. induction n. simpl; unfold phibis_aux; simpl; auto with zarith. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr x)). destruct (firstr x). specialize IHn with (shiftr x); rewrite Z.double_spec; omega. specialize IHn with (shiftr x); rewrite Z.succ_double_spec; omega. Qed. Lemma phibis_aux_bounded : forall n x, n <= size -> (phibis_aux n (nshiftr x (size-n)) < 2 ^ (Z.of_nat n))%Z. Proof. induction n. simpl minus; unfold phibis_aux; simpl; auto with zarith. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr (nshiftr x (size - S n)))). assert (shiftr (nshiftr x (size - S n)) = nshiftr x (size-n)). replace (size - n)%nat with (S (size - (S n))) by omega. simpl; auto. rewrite H0. assert (H1 : n <= size) by omega. specialize (IHn x H1). set (y:=phibis_aux n (nshiftr x (size - n))) in *. rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. case_eq (firstr (nshiftr x (size - S n))); intros. rewrite Z.double_spec; auto with zarith. rewrite Z.succ_double_spec; auto with zarith. Qed. Lemma phi_bounded : forall x, (0 <= phi x < 2 ^ (Z.of_nat size))%Z. Proof. intros. rewrite <- phibis_aux_equiv. split. apply phibis_aux_pos. change x with (nshiftr x (size-size)). apply phibis_aux_bounded; auto. Qed. Lemma phibis_aux_lowerbound : forall n x, firstr (nshiftr x n) = D1 -> (2 ^ Z.of_nat n <= phibis_aux (S n) x)%Z. Proof. induction n. intros. unfold nshiftr in H; simpl in *. unfold phibis_aux, recrbis_aux. rewrite H, Z.succ_double_spec; omega. intros. remember (S n) as m. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux m (shiftr x)). subst m. rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. assert (2^(Z.of_nat n) <= phibis_aux (S n) (shiftr x))%Z. apply IHn. rewrite <- nshiftr_S_tail; auto. destruct (firstr x). change (Z.double (phibis_aux (S n) (shiftr x))) with (2*(phibis_aux (S n) (shiftr x)))%Z. omega. rewrite Z.succ_double_spec; omega. Qed. Lemma phi_lowerbound : forall x, firstl x = D1 -> (2^(Z.of_nat (pred size)) <= phi x)%Z. Proof. intros. generalize (phibis_aux_lowerbound (pred size) x). rewrite <- firstl_firstr. change (S (pred size)) with size; auto. rewrite phibis_aux_equiv; auto. Qed. (** * Equivalence modulo [2^n] *) Section EqShiftL. (** After killing [n] bits at the left, are the numbers equal ?*) Definition EqShiftL n x y := nshiftl x n = nshiftl y n. Lemma EqShiftL_zero : forall x y, EqShiftL O x y <-> x = y. Proof. unfold EqShiftL; intros; unfold nshiftl; simpl; split; auto. Qed. Lemma EqShiftL_size : forall k x y, size<=k -> EqShiftL k x y. Proof. red; intros; rewrite 2 nshiftl_above_size; auto. Qed. Lemma EqShiftL_le : forall k k' x y, k <= k' -> EqShiftL k x y -> EqShiftL k' x y. Proof. unfold EqShiftL; intros. replace k' with ((k'-k)+k)%nat by omega. remember (k'-k)%nat as n. clear Heqn H k'. induction n; simpl; auto. f_equal; auto. Qed. Lemma EqShiftL_firstr : forall k x y, k < size -> EqShiftL k x y -> firstr x = firstr y. Proof. intros. rewrite 2 firstr_firstl. f_equal. apply EqShiftL_le with k; auto. unfold size. auto with arith. Qed. Lemma EqShiftL_twice : forall k x y, EqShiftL k (twice x) (twice y) <-> EqShiftL (S k) x y. Proof. intros; unfold EqShiftL. rewrite 2 nshiftl_S_tail; split; auto. Qed. (** * From int31 to list of digits. *) (** Lower (=rightmost) bits comes first. *) Definition i2l := recrbis _ nil (fun d _ rec => d::rec). Lemma i2l_length : forall x, length (i2l x) = size. Proof. intros; reflexivity. Qed. Fixpoint lshiftl l x := match l with | nil => x | d::l => sneakl d (lshiftl l x) end. Definition l2i l := lshiftl l On. Lemma l2i_i2l : forall x, l2i (i2l x) = x. Proof. destruct x; compute; auto. Qed. Lemma i2l_sneakr : forall x d, i2l (sneakr d x) = tail (i2l x) ++ d::nil. Proof. destruct x; compute; auto. Qed. Lemma i2l_sneakl : forall x d, i2l (sneakl d x) = d :: removelast (i2l x). Proof. destruct x; compute; auto. Qed. Lemma i2l_l2i : forall l, length l = size -> i2l (l2i l) = l. Proof. repeat (destruct l as [ |? l]; [intros; discriminate | ]). destruct l; [ | intros; discriminate]. intros _; compute; auto. Qed. Fixpoint cstlist (A:Type)(a:A) n := match n with | O => nil | S n => a::cstlist _ a n end. Lemma i2l_nshiftl : forall n x, n<=size -> i2l (nshiftl x n) = cstlist _ D0 n ++ firstn (size-n) (i2l x). Proof. induction n. intros. assert (firstn (size-0) (i2l x) = i2l x). rewrite <- minus_n_O, <- (i2l_length x). induction (i2l x); simpl; f_equal; auto. rewrite H0; clear H0. reflexivity. intros. rewrite nshiftl_S. unfold shiftl; rewrite i2l_sneakl. simpl cstlist. rewrite <- app_comm_cons; f_equal. rewrite IHn; [ | omega]. rewrite removelast_app. apply f_equal. replace (size-n)%nat with (S (size - S n))%nat by omega. rewrite removelast_firstn; auto. rewrite i2l_length; omega. generalize (firstn_length (size-n) (i2l x)). rewrite i2l_length. intros H0 H1. rewrite H1 in H0. rewrite min_l in H0 by omega. simpl length in H0. omega. Qed. (** [i2l] can be used to define a relation equivalent to [EqShiftL] *) Lemma EqShiftL_i2l : forall k x y, EqShiftL k x y <-> firstn (size-k) (i2l x) = firstn (size-k) (i2l y). Proof. intros. destruct (le_lt_dec size k) as [Hle|Hlt]. split; intros. replace (size-k)%nat with O by omega. unfold firstn; auto. apply EqShiftL_size; auto. unfold EqShiftL. assert (k <= size) by omega. split; intros. assert (i2l (nshiftl x k) = i2l (nshiftl y k)) by (f_equal; auto). rewrite 2 i2l_nshiftl in H1; auto. eapply app_inv_head; eauto. assert (i2l (nshiftl x k) = i2l (nshiftl y k)). rewrite 2 i2l_nshiftl; auto. f_equal; auto. rewrite <- (l2i_i2l (nshiftl x k)), <- (l2i_i2l (nshiftl y k)). f_equal; auto. Qed. (** This equivalence allows proving easily the following delicate result *) Lemma EqShiftL_twice_plus_one : forall k x y, EqShiftL k (twice_plus_one x) (twice_plus_one y) <-> EqShiftL (S k) x y. Proof. intros. destruct (le_lt_dec size k) as [Hle|Hlt]. split; intros; apply EqShiftL_size; auto. rewrite 2 EqShiftL_i2l. unfold twice_plus_one. rewrite 2 i2l_sneakl. replace (size-k)%nat with (S (size - S k))%nat by omega. remember (size - S k)%nat as n. remember (i2l x) as lx. remember (i2l y) as ly. simpl. rewrite 2 firstn_removelast. split; intros. injection H; auto. f_equal; auto. subst ly n; rewrite i2l_length; omega. subst lx n; rewrite i2l_length; omega. Qed. Lemma EqShiftL_shiftr : forall k x y, EqShiftL k x y -> EqShiftL (S k) (shiftr x) (shiftr y). Proof. intros. destruct (le_lt_dec size (S k)) as [Hle|Hlt]. apply EqShiftL_size; auto. case_eq (firstr x); intros. rewrite <- EqShiftL_twice. unfold twice; rewrite <- H0. rewrite <- sneakl_shiftr. rewrite (EqShiftL_firstr k x y); auto. rewrite <- sneakl_shiftr; auto. omega. rewrite <- EqShiftL_twice_plus_one. unfold twice_plus_one; rewrite <- H0. rewrite <- sneakl_shiftr. rewrite (EqShiftL_firstr k x y); auto. rewrite <- sneakl_shiftr; auto. omega. Qed. Lemma EqShiftL_incrbis : forall n k x y, n<=size -> (n+k=S size)%nat -> EqShiftL k x y -> EqShiftL k (incrbis_aux n x) (incrbis_aux n y). Proof. induction n; simpl; intros. red; auto. destruct (eq_nat_dec k size). subst k; apply EqShiftL_size; auto. unfold incrbis_aux; simpl; fold (incrbis_aux n (shiftr x)); fold (incrbis_aux n (shiftr y)). rewrite (EqShiftL_firstr k x y); auto; try omega. case_eq (firstr y); intros. rewrite EqShiftL_twice_plus_one. apply EqShiftL_shiftr; auto. rewrite EqShiftL_twice. apply IHn; try omega. apply EqShiftL_shiftr; auto. Qed. Lemma EqShiftL_incr : forall x y, EqShiftL 1 x y -> EqShiftL 1 (incr x) (incr y). Proof. intros. rewrite <- 2 incrbis_aux_equiv. apply EqShiftL_incrbis; auto. Qed. End EqShiftL. (** * More equations about [incr] *) Lemma incr_twice_plus_one : forall x, incr (twice_plus_one x) = twice (incr x). Proof. intros. rewrite incr_eqn2; [ | destruct x; simpl; auto]. apply EqShiftL_incr. red; destruct x; simpl; auto. Qed. Lemma incr_firstr : forall x, firstr (incr x) <> firstr x. Proof. intros. case_eq (firstr x); intros. rewrite incr_eqn1; auto. destruct (shiftr x); simpl; discriminate. rewrite incr_eqn2; auto. destruct (incr (shiftr x)); simpl; discriminate. Qed. Lemma incr_inv : forall x y, incr x = twice_plus_one y -> x = twice y. Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H0) in *; simpl in *. change (incr 0) with 1 in H. symmetry; rewrite twice_zero; auto. case_eq (firstr x); intros. rewrite incr_eqn1 in H; auto. clear H0; destruct x; destruct y; simpl in *. injection H; intros; subst; auto. elim (incr_firstr x). rewrite H1, H; destruct y; simpl; auto. Qed. (** * Conversion from [Z] : the [phi_inv] function *) (** First, recursive equations *) Lemma phi_inv_double_plus_one : forall z, phi_inv (Z.succ_double z) = twice_plus_one (phi_inv z). Proof. destruct z; simpl; auto. induction p; simpl. rewrite 2 incr_twice; auto. rewrite incr_twice, incr_twice_plus_one. f_equal. apply incr_inv; auto. auto. Qed. Lemma phi_inv_double : forall z, phi_inv (Z.double z) = twice (phi_inv z). Proof. destruct z; simpl; auto. rewrite incr_twice_plus_one; auto. Qed. Lemma phi_inv_incr : forall z, phi_inv (Z.succ z) = incr (phi_inv z). Proof. destruct z. simpl; auto. simpl; auto. induction p; simpl; auto. rewrite <- Pos.add_1_r, IHp, incr_twice_plus_one; auto. rewrite incr_twice; auto. simpl; auto. destruct p; simpl; auto. rewrite incr_twice; auto. f_equal. rewrite incr_twice_plus_one; auto. induction p; simpl; auto. rewrite incr_twice; auto. f_equal. rewrite incr_twice_plus_one; auto. Qed. (** [phi_inv o inv], the always-exact and easy-to-prove trip : from int31 to Z and then back to int31. *) Lemma phi_inv_phi_aux : forall n x, n <= size -> phi_inv (phibis_aux n (nshiftr x (size-n))) = nshiftr x (size-n). Proof. induction n. intros; simpl minus. rewrite nshiftr_size; auto. intros. unfold phibis_aux, recrbis_aux; fold recrbis_aux; fold (phibis_aux n (shiftr (nshiftr x (size-S n)))). assert (shiftr (nshiftr x (size - S n)) = nshiftr x (size-n)). replace (size - n)%nat with (S (size - (S n))); auto; omega. rewrite H0. case_eq (firstr (nshiftr x (size - S n))); intros. rewrite phi_inv_double. rewrite IHn by omega. rewrite <- H0. remember (nshiftr x (size - S n)) as y. destruct y; simpl in H1; rewrite H1; auto. rewrite phi_inv_double_plus_one. rewrite IHn by omega. rewrite <- H0. remember (nshiftr x (size - S n)) as y. destruct y; simpl in H1; rewrite H1; auto. Qed. Lemma phi_inv_phi : forall x, phi_inv (phi x) = x. Proof. intros. rewrite <- phibis_aux_equiv. replace x with (nshiftr x (size - size)) by auto. apply phi_inv_phi_aux; auto. Qed. (** The other composition [phi o phi_inv] is harder to prove correct. In particular, an overflow can happen, so a modulo is needed. For the moment, we proceed via several steps, the first one being a detour to [positive_to_in31]. *) (** * [positive_to_int31] *) (** A variant of [p2i] with [twice] and [twice_plus_one] instead of [2*i] and [2*i+1] *) Fixpoint p2ibis n p : (N*int31)%type := match n with | O => (Npos p, On) | S n => match p with | xO p => let (r,i) := p2ibis n p in (r, twice i) | xI p => let (r,i) := p2ibis n p in (r, twice_plus_one i) | xH => (N0, In) end end. Lemma p2ibis_bounded : forall n p, nshiftr (snd (p2ibis n p)) n = 0. Proof. induction n. simpl; intros; auto. simpl p2ibis; intros. destruct p; simpl snd. specialize IHn with p. destruct (p2ibis n p). simpl @snd in *. rewrite nshiftr_S_tail. destruct (le_lt_dec size n) as [Hle|Hlt]. rewrite nshiftr_above_size; auto. assert (H:=nshiftr_0_firstl _ _ Hlt IHn). replace (shiftr (twice_plus_one i)) with i; auto. destruct i; simpl in *. rewrite H; auto. specialize IHn with p. destruct (p2ibis n p); simpl @snd in *. rewrite nshiftr_S_tail. destruct (le_lt_dec size n) as [Hle|Hlt]. rewrite nshiftr_above_size; auto. assert (H:=nshiftr_0_firstl _ _ Hlt IHn). replace (shiftr (twice i)) with i; auto. destruct i; simpl in *; rewrite H; auto. rewrite nshiftr_S_tail; auto. replace (shiftr In) with 0; auto. apply nshiftr_n_0. Qed. Local Open Scope Z_scope. Lemma p2ibis_spec : forall n p, (n<=size)%nat -> Zpos p = (Z.of_N (fst (p2ibis n p)))*2^(Z.of_nat n) + phi (snd (p2ibis n p)). Proof. induction n; intros. simpl; rewrite Pos.mul_1_r; auto. replace (2^(Z.of_nat (S n)))%Z with (2*2^(Z.of_nat n))%Z by (rewrite <- Z.pow_succ_r, <- Zpos_P_of_succ_nat; auto with zarith). rewrite (Z.mul_comm 2). assert (n<=size)%nat by omega. destruct p; simpl; [ | | auto]; specialize (IHn p H0); generalize (p2ibis_bounded n p); destruct (p2ibis n p) as (r,i); simpl in *; intros. change (Zpos p~1) with (2*Zpos p + 1)%Z. rewrite phi_twice_plus_one_firstl, Z.succ_double_spec. rewrite IHn; ring. apply (nshiftr_0_firstl n); auto; try omega. change (Zpos p~0) with (2*Zpos p)%Z. rewrite phi_twice_firstl. change (Z.double (phi i)) with (2*(phi i))%Z. rewrite IHn; ring. apply (nshiftr_0_firstl n); auto; try omega. Qed. (** We now prove that this [p2ibis] is related to [phi_inv_positive] *) Lemma phi_inv_positive_p2ibis : forall n p, (n<=size)%nat -> EqShiftL (size-n) (phi_inv_positive p) (snd (p2ibis n p)). Proof. induction n. intros. apply EqShiftL_size; auto. intros. simpl p2ibis; destruct p; [ | | red; auto]; specialize IHn with p; destruct (p2ibis n p); simpl @snd in *; simpl phi_inv_positive; rewrite ?EqShiftL_twice_plus_one, ?EqShiftL_twice; replace (S (size - S n))%nat with (size - n)%nat by omega; apply IHn; omega. Qed. (** This gives the expected result about [phi o phi_inv], at least for the positive case. *) Lemma phi_phi_inv_positive : forall p, phi (phi_inv_positive p) = (Zpos p) mod (2^(Z.of_nat size)). Proof. intros. replace (phi_inv_positive p) with (snd (p2ibis size p)). rewrite (p2ibis_spec size p) by auto. rewrite Z.add_comm, Z_mod_plus. symmetry; apply Zmod_small. apply phi_bounded. auto with zarith. symmetry. rewrite <- EqShiftL_zero. apply (phi_inv_positive_p2ibis size p); auto. Qed. (** Moreover, [p2ibis] is also related with [p2i] and hence with [positive_to_int31]. *) Lemma double_twice_firstl : forall x, firstl x = D0 -> (Twon*x = twice x)%int31. Proof. intros. unfold mul31. rewrite <- Z.double_spec, <- phi_twice_firstl, phi_inv_phi; auto. Qed. Lemma double_twice_plus_one_firstl : forall x, firstl x = D0 -> (Twon*x+In = twice_plus_one x)%int31. Proof. intros. rewrite double_twice_firstl; auto. unfold add31. rewrite phi_twice_firstl, <- Z.succ_double_spec, <- phi_twice_plus_one_firstl, phi_inv_phi; auto. Qed. Lemma p2i_p2ibis : forall n p, (n<=size)%nat -> p2i n p = p2ibis n p. Proof. induction n; simpl; auto; intros. destruct p; auto; specialize IHn with p; generalize (p2ibis_bounded n p); rewrite IHn; try omega; destruct (p2ibis n p); simpl; intros; f_equal; auto. apply double_twice_plus_one_firstl. apply (nshiftr_0_firstl n); auto; omega. apply double_twice_firstl. apply (nshiftr_0_firstl n); auto; omega. Qed. Lemma positive_to_int31_phi_inv_positive : forall p, snd (positive_to_int31 p) = phi_inv_positive p. Proof. intros; unfold positive_to_int31. rewrite p2i_p2ibis; auto. symmetry. rewrite <- EqShiftL_zero. apply (phi_inv_positive_p2ibis size); auto. Qed. Lemma positive_to_int31_spec : forall p, Zpos p = (Z.of_N (fst (positive_to_int31 p)))*2^(Z.of_nat size) + phi (snd (positive_to_int31 p)). Proof. unfold positive_to_int31. intros; rewrite p2i_p2ibis; auto. apply p2ibis_spec; auto. Qed. (** Thanks to the result about [phi o phi_inv_positive], we can now establish easily the most general results about [phi o twice] and so one. *) Lemma phi_twice : forall x, phi (twice x) = (Z.double (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_double. assert (0 <= Z.double (phi x)). rewrite Z.double_spec; generalize (phi_bounded x); omega. destruct (Z.double (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. Lemma phi_twice_plus_one : forall x, phi (twice_plus_one x) = (Z.succ_double (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_double_plus_one. assert (0 <= Z.succ_double (phi x)). rewrite Z.succ_double_spec; generalize (phi_bounded x); omega. destruct (Z.succ_double (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. Lemma phi_incr : forall x, phi (incr x) = (Z.succ (phi x)) mod 2^(Z.of_nat size). Proof. intros. pattern x at 1; rewrite <- (phi_inv_phi x). rewrite <- phi_inv_incr. assert (0 <= Z.succ (phi x)). change (Z.succ (phi x)) with ((phi x)+1)%Z; generalize (phi_bounded x); omega. destruct (Z.succ (phi x)). simpl; auto. apply phi_phi_inv_positive. compute in H; elim H; auto. Qed. (** With the previous results, we can deal with [phi o phi_inv] even in the negative case *) Lemma phi_phi_inv_negative : forall p, phi (incr (complement_negative p)) = (Zneg p) mod 2^(Z.of_nat size). Proof. induction p. simpl complement_negative. rewrite phi_incr in IHp. rewrite incr_twice, phi_twice_plus_one. remember (phi (complement_negative p)) as q. rewrite Z.succ_double_spec. replace (2*q+1) with (2*(Z.succ q)-1) by omega. rewrite <- Zminus_mod_idemp_l, <- Zmult_mod_idemp_r, IHp. rewrite Zmult_mod_idemp_r, Zminus_mod_idemp_l; auto with zarith. simpl complement_negative. rewrite incr_twice_plus_one, phi_twice. remember (phi (incr (complement_negative p))) as q. rewrite Z.double_spec, IHp, Zmult_mod_idemp_r; auto with zarith. simpl; auto. Qed. Lemma phi_phi_inv : forall z, phi (phi_inv z) = z mod 2 ^ (Z.of_nat size). Proof. destruct z. simpl; auto. apply phi_phi_inv_positive. apply phi_phi_inv_negative. Qed. End Basics. Instance int31_ops : ZnZ.Ops int31 := { digits := 31%positive; (* number of digits *) zdigits := 31; (* number of digits *) to_Z := phi; (* conversion to Z *) of_pos := positive_to_int31; (* positive -> N*int31 : p => N,i where p = N*2^31+phi i *) head0 := head031; (* number of head 0 *) tail0 := tail031; (* number of tail 0 *) zero := 0; one := 1; minus_one := Tn; (* 2^31 - 1 *) compare := compare31; eq0 := fun i => match i ?= 0 with Eq => true | _ => false end; opp_c := fun i => 0 -c i; opp := opp31; opp_carry := fun i => 0-i-1; succ_c := fun i => i +c 1; add_c := add31c; add_carry_c := add31carryc; succ := fun i => i + 1; add := add31; add_carry := fun i j => i + j + 1; pred_c := fun i => i -c 1; sub_c := sub31c; sub_carry_c := sub31carryc; pred := fun i => i - 1; sub := sub31; sub_carry := fun i j => i - j - 1; mul_c := mul31c; mul := mul31; square_c := fun x => x *c x; div21 := div3121; div_gt := div31; (* this is supposed to be the special case of division a/b where a > b *) div := div31; modulo_gt := fun i j => let (_,r) := i/j in r; modulo := fun i j => let (_,r) := i/j in r; gcd_gt := gcd31; gcd := gcd31; add_mul_div := addmuldiv31; pos_mod := (* modulo 2^p *) fun p i => match p ?= 31 with | Lt => addmuldiv31 p 0 (addmuldiv31 (31-p) i 0) | _ => i end; is_even := fun i => let (_,r) := i/2 in match r ?= 0 with Eq => true | _ => false end; sqrt2 := sqrt312; sqrt := sqrt31; lor := lor31; land := land31; lxor := lxor31 }. Section Int31_Specs. Local Open Scope Z_scope. Notation "[| x |]" := (phi x) (at level 0, x at level 99). Local Notation wB := (2 ^ (Z.of_nat size)). Lemma wB_pos : wB > 0. Proof. auto with zarith. Qed. Notation "[+| c |]" := (interp_carry 1 wB phi c) (at level 0, c at level 99). Notation "[-| c |]" := (interp_carry (-1) wB phi c) (at level 0, c at level 99). Notation "[|| x ||]" := (zn2z_to_Z wB phi x) (at level 0, x at level 99). Lemma spec_zdigits : [| 31 |] = 31. Proof. reflexivity. Qed. Lemma spec_more_than_1_digit: 1 < 31. Proof. auto with zarith. Qed. Lemma spec_0 : [| 0 |] = 0. Proof. reflexivity. Qed. Lemma spec_1 : [| 1 |] = 1. Proof. reflexivity. Qed. Lemma spec_m1 : [| Tn |] = wB - 1. Proof. reflexivity. Qed. Lemma spec_compare : forall x y, (x ?= y)%int31 = ([|x|] ?= [|y|]). Proof. reflexivity. Qed. (** Addition *) Lemma spec_add_c : forall x y, [+|add31c x y|] = [|x|] + [|y|]. Proof. intros; unfold add31c, add31, interp_carry; rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X+Y) mod wB ?= X+Y <> Eq -> [+|C1 (phi_inv (X+Y))|] = X+Y). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X+Y) wB). contradict H1; auto using Zmod_small with zarith. rewrite <- (Z_mod_plus_full (X+Y) (-1) wB). rewrite Zmod_small; romega. generalize (Z.compare_eq ((X+Y) mod wB) (X+Y)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_succ_c : forall x, [+|add31c x 1|] = [|x|] + 1. Proof. intros; apply spec_add_c. Qed. Lemma spec_add_carry_c : forall x y, [+|add31carryc x y|] = [|x|] + [|y|] + 1. Proof. intros. unfold add31carryc, interp_carry; rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X+Y+1) mod wB ?= X+Y+1 <> Eq -> [+|C1 (phi_inv (X+Y+1))|] = X+Y+1). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X+Y+1) wB). contradict H1; auto using Zmod_small with zarith. rewrite <- (Z_mod_plus_full (X+Y+1) (-1) wB). rewrite Zmod_small; romega. generalize (Z.compare_eq ((X+Y+1) mod wB) (X+Y+1)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_add : forall x y, [|x+y|] = ([|x|] + [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_add_carry : forall x y, [|x+y+1|] = ([|x|] + [|y|] + 1) mod wB. Proof. unfold add31; intros. repeat rewrite phi_phi_inv. apply Zplus_mod_idemp_l. Qed. Lemma spec_succ : forall x, [|x+1|] = ([|x|] + 1) mod wB. Proof. intros; rewrite <- spec_1; apply spec_add. Qed. (** Substraction *) Lemma spec_sub_c : forall x y, [-|sub31c x y|] = [|x|] - [|y|]. Proof. unfold sub31c, sub31, interp_carry; intros. rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X-Y) mod wB ?= X-Y <> Eq -> [-|C1 (phi_inv (X-Y))|] = X-Y). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X-Y) 0). rewrite <- (Z_mod_plus_full (X-Y) 1 wB). rewrite Zmod_small; romega. contradict H1; apply Zmod_small; romega. generalize (Z.compare_eq ((X-Y) mod wB) (X-Y)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_sub_carry_c : forall x y, [-|sub31carryc x y|] = [|x|] - [|y|] - 1. Proof. unfold sub31carryc, sub31, interp_carry; intros. rewrite phi_phi_inv. generalize (phi_bounded x)(phi_bounded y); intros. set (X:=[|x|]) in *; set (Y:=[|y|]) in *; clearbody X Y. assert ((X-Y-1) mod wB ?= X-Y-1 <> Eq -> [-|C1 (phi_inv (X-Y-1))|] = X-Y-1). unfold interp_carry; rewrite phi_phi_inv, Z.compare_eq_iff; intros. destruct (Z_lt_le_dec (X-Y-1) 0). rewrite <- (Z_mod_plus_full (X-Y-1) 1 wB). rewrite Zmod_small; romega. contradict H1; apply Zmod_small; romega. generalize (Z.compare_eq ((X-Y-1) mod wB) (X-Y-1)); intros Heq. destruct Z.compare; intros; [ rewrite phi_phi_inv; auto | now apply H1 | now apply H1]. Qed. Lemma spec_sub : forall x y, [|x-y|] = ([|x|] - [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_sub_carry : forall x y, [|x-y-1|] = ([|x|] - [|y|] - 1) mod wB. Proof. unfold sub31; intros. repeat rewrite phi_phi_inv. apply Zminus_mod_idemp_l. Qed. Lemma spec_opp_c : forall x, [-|sub31c 0 x|] = -[|x|]. Proof. intros; apply spec_sub_c. Qed. Lemma spec_opp : forall x, [|0 - x|] = (-[|x|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_opp_carry : forall x, [|0 - x - 1|] = wB - [|x|] - 1. Proof. unfold sub31; intros. repeat rewrite phi_phi_inv. change [|1|] with 1; change [|0|] with 0. rewrite <- (Z_mod_plus_full (0-[|x|]) 1 wB). rewrite Zminus_mod_idemp_l. rewrite Zmod_small; generalize (phi_bounded x); romega. Qed. Lemma spec_pred_c : forall x, [-|sub31c x 1|] = [|x|] - 1. Proof. intros; apply spec_sub_c. Qed. Lemma spec_pred : forall x, [|x-1|] = ([|x|] - 1) mod wB. Proof. intros; apply spec_sub. Qed. (** Multiplication *) Lemma phi2_phi_inv2 : forall x, [||phi_inv2 x||] = x mod (wB^2). Proof. assert (forall z, (z / wB) mod wB * wB + z mod wB = z mod wB ^ 2). intros. assert ((z/wB) mod wB = z/wB - (z/wB/wB)*wB). rewrite (Z_div_mod_eq (z/wB) wB wB_pos) at 2; ring. assert (z mod wB = z - (z/wB)*wB). rewrite (Z_div_mod_eq z wB wB_pos) at 2; ring. rewrite H. rewrite H0 at 1. ring_simplify. rewrite Zdiv_Zdiv; auto with zarith. rewrite (Z_div_mod_eq z (wB*wB)) at 2; auto with zarith. change (wB*wB) with (wB^2); ring. unfold phi_inv2. destruct x; unfold zn2z_to_Z; rewrite ?phi_phi_inv; change base with wB; auto. Qed. Lemma spec_mul_c : forall x y, [|| mul31c x y ||] = [|x|] * [|y|]. Proof. unfold mul31c; intros. rewrite phi2_phi_inv2. apply Zmod_small. generalize (phi_bounded x)(phi_bounded y); intros. change (wB^2) with (wB * wB). auto using Z.mul_lt_mono_nonneg with zarith. Qed. Lemma spec_mul : forall x y, [|x*y|] = ([|x|] * [|y|]) mod wB. Proof. intros; apply phi_phi_inv. Qed. Lemma spec_square_c : forall x, [|| mul31c x x ||] = [|x|] * [|x|]. Proof. intros; apply spec_mul_c. Qed. (** Division *) Lemma spec_div21 : forall a1 a2 b, wB/2 <= [|b|] -> [|a1|] < [|b|] -> let (q,r) := div3121 a1 a2 b in [|a1|] *wB+ [|a2|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. unfold div3121; intros. generalize (phi_bounded a1)(phi_bounded a2)(phi_bounded b); intros. assert ([|b|]>0) by (auto with zarith). generalize (Z_div_mod (phi2 a1 a2) [|b|] H4) (Z_div_pos (phi2 a1 a2) [|b|] H4). unfold Z.div; destruct (Z.div_eucl (phi2 a1 a2) [|b|]). rewrite ?phi_phi_inv. destruct 1; intros. unfold phi2 in *. change base with wB; change base with wB in H5. change (Z.pow_pos 2 31) with wB; change (Z.pow_pos 2 31) with wB in H. rewrite H5, Z.mul_comm. replace (z0 mod wB) with z0 by (symmetry; apply Zmod_small; omega). replace (z mod wB) with z; auto with zarith. symmetry; apply Zmod_small. split. apply H7; change base with wB; auto with zarith. apply Z.mul_lt_mono_pos_r with [|b|]; [omega| ]. rewrite Z.mul_comm. apply Z.le_lt_trans with ([|b|]*z+z0); [omega| ]. rewrite <- H5. apply Z.le_lt_trans with ([|a1|]*wB+(wB-1)); [omega | ]. replace ([|a1|]*wB+(wB-1)) with (wB*([|a1|]+1)-1) by ring. assert (wB*([|a1|]+1) <= wB*[|b|]); try omega. apply Z.mul_le_mono_nonneg; omega. Qed. Lemma spec_div : forall a b, 0 < [|b|] -> let (q,r) := div31 a b in [|a|] = [|q|] * [|b|] + [|r|] /\ 0 <= [|r|] < [|b|]. Proof. unfold div31; intros. assert ([|b|]>0) by (auto with zarith). generalize (Z_div_mod [|a|] [|b|] H0) (Z_div_pos [|a|] [|b|] H0). unfold Z.div; destruct (Z.div_eucl [|a|] [|b|]). rewrite ?phi_phi_inv. destruct 1; intros. rewrite H1, Z.mul_comm. generalize (phi_bounded a)(phi_bounded b); intros. replace (z0 mod wB) with z0 by (symmetry; apply Zmod_small; omega). replace (z mod wB) with z; auto with zarith. symmetry; apply Zmod_small. split; auto with zarith. apply Z.le_lt_trans with [|a|]; auto with zarith. rewrite H1. apply Z.le_trans with ([|b|]*z); try omega. rewrite <- (Z.mul_1_l z) at 1. apply Z.mul_le_mono_nonneg; auto with zarith. Qed. Lemma spec_mod : forall a b, 0 < [|b|] -> [|let (_,r) := (a/b)%int31 in r|] = [|a|] mod [|b|]. Proof. unfold div31; intros. assert ([|b|]>0) by (auto with zarith). unfold Z.modulo. generalize (Z_div_mod [|a|] [|b|] H0). destruct (Z.div_eucl [|a|] [|b|]). rewrite ?phi_phi_inv. destruct 1; intros. generalize (phi_bounded b); intros. apply Zmod_small; omega. Qed. Lemma phi_gcd : forall i j, [|gcd31 i j|] = Zgcdn (2*size) [|j|] [|i|]. Proof. unfold gcd31. induction (2*size)%nat; intros. reflexivity. simpl euler. unfold compare31. change [|On|] with 0. generalize (phi_bounded j)(phi_bounded i); intros. case_eq [|j|]; intros. simpl; intros. generalize (Zabs_spec [|i|]); omega. simpl. rewrite IHn, H1; f_equal. rewrite spec_mod, H1; auto. rewrite H1; compute; auto. rewrite H1 in H; destruct H as [H _]; compute in H; elim H; auto. Qed. Lemma spec_gcd : forall a b, Zis_gcd [|a|] [|b|] [|gcd31 a b|]. Proof. intros. rewrite phi_gcd. apply Zis_gcd_sym. apply Zgcdn_is_gcd. unfold Zgcd_bound. generalize (phi_bounded b). destruct [|b|]. unfold size; auto with zarith. intros (_,H). cut (Pos.size_nat p <= size)%nat; [ omega | rewrite <- Zpower2_Psize; auto]. intros (H,_); compute in H; elim H; auto. Qed. Lemma iter_int31_iter_nat : forall A f i a, iter_int31 i A f a = iter_nat (Z.abs_nat [|i|]) A f a. Proof. intros. unfold iter_int31. rewrite <- recrbis_equiv; auto; unfold recrbis. rewrite <- phibis_aux_equiv. revert i a; induction size. simpl; auto. simpl; intros. case_eq (firstr i); intros H; rewrite 2 IHn; unfold phibis_aux; simpl; rewrite ?H; fold (phibis_aux n (shiftr i)); generalize (phibis_aux_pos n (shiftr i)); intros; set (z := phibis_aux n (shiftr i)) in *; clearbody z; rewrite <- nat_rect_plus. f_equal. rewrite Z.double_spec, <- Z.add_diag. symmetry; apply Zabs2Nat.inj_add; auto with zarith. change (iter_nat (S (Z.abs_nat z) + (Z.abs_nat z))%nat A f a = iter_nat (Z.abs_nat (Z.succ_double z)) A f a); f_equal. rewrite Z.succ_double_spec, <- Z.add_diag. rewrite Zabs2Nat.inj_add; auto with zarith. rewrite Zabs2Nat.inj_add; auto with zarith. change (Z.abs_nat 1) with 1%nat; omega. Qed. Fixpoint addmuldiv31_alt n i j := match n with | O => i | S n => addmuldiv31_alt n (sneakl (firstl j) i) (shiftl j) end. Lemma addmuldiv31_equiv : forall p x y, addmuldiv31 p x y = addmuldiv31_alt (Z.abs_nat [|p|]) x y. Proof. intros. unfold addmuldiv31. rewrite iter_int31_iter_nat. set (n:=Z.abs_nat [|p|]); clearbody n; clear p. revert x y; induction n. simpl; auto. intros. simpl addmuldiv31_alt. replace (S n) with (n+1)%nat by (rewrite plus_comm; auto). rewrite nat_rect_plus; simpl; auto. Qed. Lemma spec_add_mul_div : forall x y p, [|p|] <= Zpos 31 -> [| addmuldiv31 p x y |] = ([|x|] * (2 ^ [|p|]) + [|y|] / (2 ^ ((Zpos 31) - [|p|]))) mod wB. Proof. intros. rewrite addmuldiv31_equiv. assert ([|p|] = Z.of_nat (Z.abs_nat [|p|])). rewrite Zabs2Nat.id_abs; symmetry; apply Z.abs_eq. destruct (phi_bounded p); auto. rewrite H0; rewrite H0 in H; clear H0; rewrite Zabs2Nat.id. set (n := Z.abs_nat [|p|]) in *; clearbody n. assert (n <= 31)%nat. rewrite Nat2Z.inj_le; auto with zarith. clear p H; revert x y. induction n. simpl Z.of_nat; intros. rewrite Z.mul_1_r. replace ([|y|] / 2^(31-0)) with 0. rewrite Z.add_0_r. symmetry; apply Zmod_small; apply phi_bounded. symmetry; apply Zdiv_small; apply phi_bounded. simpl addmuldiv31_alt; intros. rewrite IHn; [ | omega ]. case_eq (firstl y); intros. rewrite phi_twice, Z.double_spec. rewrite phi_twice_firstl; auto. change (Z.double [|y|]) with (2*[|y|]). rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. rewrite Zplus_mod; rewrite Zmult_mod_idemp_l; rewrite <- Zplus_mod. f_equal. f_equal. ring. replace (31-Z.of_nat n) with (Z.succ(31-Z.succ(Z.of_nat n))) by ring. rewrite Z.pow_succ_r, <- Zdiv_Zdiv; auto with zarith. rewrite Z.mul_comm, Z_div_mult; auto with zarith. rewrite phi_twice_plus_one, Z.succ_double_spec. rewrite phi_twice; auto. change (Z.double [|y|]) with (2*[|y|]). rewrite Nat2Z.inj_succ, Z.pow_succ_r; auto with zarith. rewrite Zplus_mod; rewrite Zmult_mod_idemp_l; rewrite <- Zplus_mod. rewrite Z.mul_add_distr_r, Z.mul_1_l, <- Z.add_assoc. f_equal. f_equal. ring. assert ((2*[|y|]) mod wB = 2*[|y|] - wB). clear - H. symmetry. apply Zmod_unique with 1; [ | ring ]. generalize (phi_lowerbound _ H) (phi_bounded y). set (wB' := 2^Z.of_nat (pred size)). replace wB with (2*wB'); [ omega | ]. unfold wB'. rewrite <- Z.pow_succ_r, <- Nat2Z.inj_succ by (auto with zarith). f_equal. rewrite H1. replace wB with (2^(Z.of_nat n)*2^(31-Z.of_nat n)) by (rewrite <- Zpower_exp; auto with zarith; f_equal; unfold size; ring). unfold Z.sub; rewrite <- Z.mul_opp_l. rewrite Z_div_plus; auto with zarith. ring_simplify. replace (31+-Z.of_nat n) with (Z.succ(31-Z.succ(Z.of_nat n))) by ring. rewrite Z.pow_succ_r, <- Zdiv_Zdiv; auto with zarith. rewrite Z.mul_comm, Z_div_mult; auto with zarith. Qed. Lemma spec_pos_mod : forall w p, [|ZnZ.pos_mod p w|] = [|w|] mod (2 ^ [|p|]). Proof. unfold int31_ops, ZnZ.pos_mod, compare31. change [|31|] with 31%Z. assert (forall w p, 31<=p -> [|w|] = [|w|] mod 2^p). intros. generalize (phi_bounded w). symmetry; apply Zmod_small. split; auto with zarith. apply Z.lt_le_trans with wB; auto with zarith. apply Zpower_le_monotone; auto with zarith. intros. case_eq ([|p|] ?= 31); intros; [ apply H; rewrite (Z.compare_eq _ _ H0); auto with zarith | | apply H; change ([|p|]>31)%Z in H0; auto with zarith ]. change ([|p|]<31) in H0. rewrite spec_add_mul_div by auto with zarith. change [|0|] with 0%Z; rewrite Z.mul_0_l, Z.add_0_l. generalize (phi_bounded p)(phi_bounded w); intros. assert (31-[|p|]<wB). apply Z.le_lt_trans with 31%Z; auto with zarith. compute; auto. assert ([|31-p|]=31-[|p|]). unfold sub31; rewrite phi_phi_inv. change [|31|] with 31%Z. apply Zmod_small; auto with zarith. rewrite spec_add_mul_div by (rewrite H4; auto with zarith). change [|0|] with 0%Z; rewrite Zdiv_0_l, Z.add_0_r. rewrite H4. apply shift_unshift_mod_2; auto with zarith. Qed. (** Shift operations *) Lemma spec_head00: forall x, [|x|] = 0 -> [|head031 x|] = Zpos 31. Proof. intros. generalize (phi_inv_phi x). rewrite H; simpl phi_inv. intros H'; rewrite <- H'. simpl; auto. Qed. Fixpoint head031_alt n x := match n with | O => 0%nat | S n => match firstl x with | D0 => S (head031_alt n (shiftl x)) | D1 => 0%nat end end. Lemma head031_equiv : forall x, [|head031 x|] = Z.of_nat (head031_alt size x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H). simpl; auto. unfold head031, recl. change On with (phi_inv (Z.of_nat (31-size))). replace (head031_alt size x) with (head031_alt size x + (31 - size))%nat by auto. assert (size <= 31)%nat by auto with arith. revert x H; induction size; intros. simpl; auto. unfold recl_aux; fold recl_aux. unfold head031_alt; fold head031_alt. rewrite H. assert ([|phi_inv (Z.of_nat (31-S n))|] = Z.of_nat (31 - S n)). rewrite phi_phi_inv. apply Zmod_small. split. change 0 with (Z.of_nat O); apply inj_le; omega. apply Z.le_lt_trans with (Z.of_nat 31). apply inj_le; omega. compute; auto. case_eq (firstl x); intros; auto. rewrite plus_Sn_m, plus_n_Sm. replace (S (31 - S n)) with (31 - n)%nat by omega. rewrite <- IHn; [ | omega | ]. f_equal; f_equal. unfold add31. rewrite H1. f_equal. change [|In|] with 1. replace (31-n)%nat with (S (31 - S n))%nat by omega. rewrite Nat2Z.inj_succ; ring. clear - H H2. rewrite (sneakr_shiftl x) in H. rewrite H2 in H. case_eq (iszero (shiftl x)); intros; auto. rewrite (iszero_eq0 _ H0) in H; discriminate. Qed. Lemma phi_nz : forall x, 0 < [|x|] <-> x <> 0%int31. Proof. split; intros. red; intro; subst x; discriminate. assert ([|x|]<>0%Z). contradict H. rewrite <- (phi_inv_phi x); rewrite H; auto. generalize (phi_bounded x); auto with zarith. Qed. Lemma spec_head0 : forall x, 0 < [|x|] -> wB/ 2 <= 2 ^ ([|head031 x|]) * [|x|] < wB. Proof. intros. rewrite head031_equiv. assert (nshiftl x size = 0%int31). apply nshiftl_size. revert x H H0. unfold size at 2 5. induction size. simpl Z.of_nat. intros. compute in H0; rewrite H0 in H; discriminate. intros. simpl head031_alt. case_eq (firstl x); intros. rewrite (Nat2Z.inj_succ (head031_alt n (shiftl x))), Z.pow_succ_r; auto with zarith. rewrite <- Z.mul_assoc, Z.mul_comm, <- Z.mul_assoc, <-(Z.mul_comm 2). rewrite <- Z.double_spec, <- (phi_twice_firstl _ H1). apply IHn. rewrite phi_nz; rewrite phi_nz in H; contradict H. change twice with shiftl in H. rewrite (sneakr_shiftl x), H1, H; auto. rewrite <- nshiftl_S_tail; auto. change (2^(Z.of_nat 0)) with 1; rewrite Z.mul_1_l. generalize (phi_bounded x); unfold size; split; auto with zarith. change (2^(Z.of_nat 31)/2) with (2^(Z.of_nat (pred size))). apply phi_lowerbound; auto. Qed. Lemma spec_tail00: forall x, [|x|] = 0 -> [|tail031 x|] = Zpos 31. Proof. intros. generalize (phi_inv_phi x). rewrite H; simpl phi_inv. intros H'; rewrite <- H'. simpl; auto. Qed. Fixpoint tail031_alt n x := match n with | O => 0%nat | S n => match firstr x with | D0 => S (tail031_alt n (shiftr x)) | D1 => 0%nat end end. Lemma tail031_equiv : forall x, [|tail031 x|] = Z.of_nat (tail031_alt size x). Proof. intros. case_eq (iszero x); intros. rewrite (iszero_eq0 _ H). simpl; auto. unfold tail031, recr. change On with (phi_inv (Z.of_nat (31-size))). replace (tail031_alt size x) with (tail031_alt size x + (31 - size))%nat by auto. assert (size <= 31)%nat by auto with arith. revert x H; induction size; intros. simpl; auto. unfold recr_aux; fold recr_aux. unfold tail031_alt; fold tail031_alt. rewrite H. assert ([|phi_inv (Z.of_nat (31-S n))|] = Z.of_nat (31 - S n)). rewrite phi_phi_inv. apply Zmod_small. split. change 0 with (Z.of_nat O); apply inj_le; omega. apply Z.le_lt_trans with (Z.of_nat 31). apply inj_le; omega. compute; auto. case_eq (firstr x); intros; auto. rewrite plus_Sn_m, plus_n_Sm. replace (S (31 - S n)) with (31 - n)%nat by omega. rewrite <- IHn; [ | omega | ]. f_equal; f_equal. unfold add31. rewrite H1. f_equal. change [|In|] with 1. replace (31-n)%nat with (S (31 - S n))%nat by omega. rewrite Nat2Z.inj_succ; ring. clear - H H2. rewrite (sneakl_shiftr x) in H. rewrite H2 in H. case_eq (iszero (shiftr x)); intros; auto. rewrite (iszero_eq0 _ H0) in H; discriminate. Qed. Lemma spec_tail0 : forall x, 0 < [|x|] -> exists y, 0 <= y /\ [|x|] = (2 * y + 1) * (2 ^ [|tail031 x|]). Proof. intros. rewrite tail031_equiv. assert (nshiftr x size = 0%int31). apply nshiftr_size. revert x H H0. induction size. simpl Z.of_nat. intros. compute in H0; rewrite H0 in H; discriminate. intros. simpl tail031_alt. case_eq (firstr x); intros. rewrite (Nat2Z.inj_succ (tail031_alt n (shiftr x))), Z.pow_succ_r; auto with zarith. destruct (IHn (shiftr x)) as (y & Hy1 & Hy2). rewrite phi_nz; rewrite phi_nz in H; contradict H. rewrite (sneakl_shiftr x), H1, H; auto. rewrite <- nshiftr_S_tail; auto. exists y; split; auto. rewrite phi_eqn1; auto. rewrite Z.double_spec, Hy2; ring. exists [|shiftr x|]. split. generalize (phi_bounded (shiftr x)); auto with zarith. rewrite phi_eqn2; auto. rewrite Z.succ_double_spec; simpl; ring. Qed. (* Sqrt *) (* Direct transcription of an old proof of a fortran program in boyer-moore *) Lemma quotient_by_2 a: a - 1 <= (a/2) + (a/2). Proof. case (Z_mod_lt a 2); auto with zarith. intros H1; rewrite Zmod_eq_full; auto with zarith. Qed. Lemma sqrt_main_trick j k: 0 <= j -> 0 <= k -> (j * k) + j <= ((j + k)/2 + 1) ^ 2. Proof. intros Hj; generalize Hj k; pattern j; apply natlike_ind; auto; clear k j Hj. intros _ k Hk; repeat rewrite Z.add_0_l. apply Z.mul_nonneg_nonneg; generalize (Z_div_pos k 2); auto with zarith. intros j Hj Hrec _ k Hk; pattern k; apply natlike_ind; auto; clear k Hk. rewrite Z.mul_0_r, Z.add_0_r, Z.add_0_l. generalize (sqr_pos (Z.succ j / 2)) (quotient_by_2 (Z.succ j)); unfold Z.succ. rewrite Z.pow_2_r, Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. auto with zarith. intros k Hk _. replace ((Z.succ j + Z.succ k) / 2) with ((j + k)/2 + 1). generalize (Hrec Hj k Hk) (quotient_by_2 (j + k)). unfold Z.succ; repeat rewrite Z.pow_2_r; repeat rewrite Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. repeat rewrite Z.mul_1_l; repeat rewrite Z.mul_1_r. auto with zarith. rewrite Z.add_comm, <- Z_div_plus_full_l; auto with zarith. apply f_equal2 with (f := Z.div); auto with zarith. Qed. Lemma sqrt_main i j: 0 <= i -> 0 < j -> i < ((j + (i/j))/2 + 1) ^ 2. Proof. intros Hi Hj. assert (Hij: 0 <= i/j) by (apply Z_div_pos; auto with zarith). apply Z.lt_le_trans with (2 := sqrt_main_trick _ _ (Z.lt_le_incl _ _ Hj) Hij). pattern i at 1; rewrite (Z_div_mod_eq i j); case (Z_mod_lt i j); auto with zarith. Qed. Lemma sqrt_init i: 1 < i -> i < (i/2 + 1) ^ 2. Proof. intros Hi. assert (H1: 0 <= i - 2) by auto with zarith. assert (H2: 1 <= (i / 2) ^ 2); auto with zarith. replace i with (1* 2 + (i - 2)); auto with zarith. rewrite Z.pow_2_r, Z_div_plus_full_l; auto with zarith. generalize (sqr_pos ((i - 2)/ 2)) (Z_div_pos (i - 2) 2). rewrite Z.mul_add_distr_r; repeat rewrite Z.mul_add_distr_l. auto with zarith. generalize (quotient_by_2 i). rewrite Z.pow_2_r in H2 |- *; repeat (rewrite Z.mul_add_distr_r || rewrite Z.mul_add_distr_l || rewrite Z.mul_1_l || rewrite Z.mul_1_r). auto with zarith. Qed. Lemma sqrt_test_true i j: 0 <= i -> 0 < j -> i/j >= j -> j ^ 2 <= i. Proof. intros Hi Hj Hd; rewrite Z.pow_2_r. apply Z.le_trans with (j * (i/j)); auto with zarith. apply Z_mult_div_ge; auto with zarith. Qed. Lemma sqrt_test_false i j: 0 <= i -> 0 < j -> i/j < j -> (j + (i/j))/2 < j. Proof. intros Hi Hj H; case (Z.le_gt_cases j ((j + (i/j))/2)); auto. intros H1; contradict H; apply Z.le_ngt. assert (2 * j <= j + (i/j)); auto with zarith. apply Z.le_trans with (2 * ((j + (i/j))/2)); auto with zarith. apply Z_mult_div_ge; auto with zarith. Qed. Lemma sqrt31_step_def rec i j: sqrt31_step rec i j = match (fst (i/j) ?= j)%int31 with Lt => rec i (fst ((j + fst(i/j))/2))%int31 | _ => j end. Proof. unfold sqrt31_step; case div31; intros. simpl; case compare31; auto. Qed. Lemma div31_phi i j: 0 < [|j|] -> [|fst (i/j)%int31|] = [|i|]/[|j|]. intros Hj; generalize (spec_div i j Hj). case div31; intros q r; simpl @fst. intros (H1,H2); apply Zdiv_unique with [|r|]; auto with zarith. rewrite H1; ring. Qed. Lemma sqrt31_step_correct rec i j: 0 < [|i|] -> 0 < [|j|] -> [|i|] < ([|j|] + 1) ^ 2 -> 2 * [|j|] < wB -> (forall j1 : int31, 0 < [|j1|] < [|j|] -> [|i|] < ([|j1|] + 1) ^ 2 -> [|rec i j1|] ^ 2 <= [|i|] < ([|rec i j1|] + 1) ^ 2) -> [|sqrt31_step rec i j|] ^ 2 <= [|i|] < ([|sqrt31_step rec i j|] + 1) ^ 2. Proof. assert (Hp2: 0 < [|2|]) by exact (eq_refl Lt). intros Hi Hj Hij H31 Hrec; rewrite sqrt31_step_def. rewrite spec_compare, div31_phi; auto. case Z.compare_spec; auto; intros Hc; try (split; auto; apply sqrt_test_true; auto with zarith; fail). apply Hrec; repeat rewrite div31_phi; auto with zarith. replace [|(j + fst (i / j)%int31)|] with ([|j|] + [|i|] / [|j|]). split. apply Z.le_succ_l in Hj. change (1 <= [|j|]) in Hj. Z.le_elim Hj. replace ([|j|] + [|i|]/[|j|]) with (1 * 2 + (([|j|] - 2) + [|i|] / [|j|])); try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= [|i|]/ [|j|]) by (apply Z_div_pos; auto with zarith). assert (0 <= ([|j|] - 2 + [|i|] / [|j|]) / [|2|]) ; auto with zarith. rewrite <- Hj, Zdiv_1_r. replace (1 + [|i|])%Z with (1 * 2 + ([|i|] - 1))%Z; try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= ([|i|] - 1) /2)%Z by (apply Z_div_pos; auto with zarith). change ([|2|]) with 2%Z; auto with zarith. apply sqrt_test_false; auto with zarith. rewrite spec_add, div31_phi; auto. symmetry; apply Zmod_small. split; auto with zarith. replace [|j + fst (i / j)%int31|] with ([|j|] + [|i|] / [|j|]). apply sqrt_main; auto with zarith. rewrite spec_add, div31_phi; auto. symmetry; apply Zmod_small. split; auto with zarith. Qed. Lemma iter31_sqrt_correct n rec i j: 0 < [|i|] -> 0 < [|j|] -> [|i|] < ([|j|] + 1) ^ 2 -> 2 * [|j|] < 2 ^ (Z.of_nat size) -> (forall j1, 0 < [|j1|] -> 2^(Z.of_nat n) + [|j1|] <= [|j|] -> [|i|] < ([|j1|] + 1) ^ 2 -> 2 * [|j1|] < 2 ^ (Z.of_nat size) -> [|rec i j1|] ^ 2 <= [|i|] < ([|rec i j1|] + 1) ^ 2) -> [|iter31_sqrt n rec i j|] ^ 2 <= [|i|] < ([|iter31_sqrt n rec i j|] + 1) ^ 2. Proof. revert rec i j; elim n; unfold iter31_sqrt; fold iter31_sqrt; clear n. intros rec i j Hi Hj Hij H31 Hrec; apply sqrt31_step_correct; auto with zarith. intros; apply Hrec; auto with zarith. rewrite Z.pow_0_r; auto with zarith. intros n Hrec rec i j Hi Hj Hij H31 HHrec. apply sqrt31_step_correct; auto. intros j1 Hj1 Hjp1; apply Hrec; auto with zarith. intros j2 Hj2 H2j2 Hjp2 Hj31; apply Hrec; auto with zarith. intros j3 Hj3 Hpj3. apply HHrec; auto. rewrite Nat2Z.inj_succ, Z.pow_succ_r. apply Z.le_trans with (2 ^Z.of_nat n + [|j2|]); auto with zarith. apply Nat2Z.is_nonneg. Qed. Lemma spec_sqrt : forall x, [|sqrt31 x|] ^ 2 <= [|x|] < ([|sqrt31 x|] + 1) ^ 2. Proof. intros i; unfold sqrt31. rewrite spec_compare. case Z.compare_spec; change [|1|] with 1; intros Hi; auto with zarith. repeat rewrite Z.pow_2_r; auto with zarith. apply iter31_sqrt_correct; auto with zarith. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. replace ([|i|]) with (1 * 2 + ([|i|] - 2))%Z; try ring. assert (0 <= ([|i|] - 2)/2)%Z by (apply Z_div_pos; auto with zarith). rewrite Z_div_plus_full_l; auto with zarith. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply sqrt_init; auto. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply Z.le_lt_trans with ([|i|]). apply Z_mult_div_ge; auto with zarith. case (phi_bounded i); auto. intros j2 H1 H2; contradict H2; apply Z.lt_nge. rewrite div31_phi; change ([|2|]) with 2; auto with zarith. apply Z.le_lt_trans with ([|i|]); auto with zarith. assert (0 <= [|i|]/2)%Z by (apply Z_div_pos; auto with zarith). apply Z.le_trans with (2 * ([|i|]/2)); auto with zarith. apply Z_mult_div_ge; auto with zarith. case (phi_bounded i); unfold size; auto with zarith. change [|0|] with 0; auto with zarith. case (phi_bounded i); repeat rewrite Z.pow_2_r; auto with zarith. Qed. Lemma sqrt312_step_def rec ih il j: sqrt312_step rec ih il j = match (ih ?= j)%int31 with Eq => j | Gt => j | _ => match (fst (div3121 ih il j) ?= j)%int31 with Lt => let m := match j +c fst (div3121 ih il j) with C0 m1 => fst (m1/2)%int31 | C1 m1 => (fst (m1/2) + v30)%int31 end in rec ih il m | _ => j end end. Proof. unfold sqrt312_step; case div3121; intros. simpl; case compare31; auto. Qed. Lemma sqrt312_lower_bound ih il j: phi2 ih il < ([|j|] + 1) ^ 2 -> [|ih|] <= [|j|]. Proof. intros H1. case (phi_bounded j); intros Hbj _. case (phi_bounded il); intros Hbil _. case (phi_bounded ih); intros Hbih Hbih1. assert (([|ih|] < [|j|] + 1)%Z); auto with zarith. apply Z.square_lt_simpl_nonneg; auto with zarith. repeat rewrite <-Z.pow_2_r; apply Z.le_lt_trans with (2 := H1). apply Z.le_trans with ([|ih|] * base)%Z; unfold phi2, base; try rewrite Z.pow_2_r; auto with zarith. Qed. Lemma div312_phi ih il j: (2^30 <= [|j|] -> [|ih|] < [|j|] -> [|fst (div3121 ih il j)|] = phi2 ih il/[|j|])%Z. Proof. intros Hj Hj1. generalize (spec_div21 ih il j Hj Hj1). case div3121; intros q r (Hq, Hr). apply Zdiv_unique with (phi r); auto with zarith. simpl @fst; apply eq_trans with (1 := Hq); ring. Qed. Lemma sqrt312_step_correct rec ih il j: 2 ^ 29 <= [|ih|] -> 0 < [|j|] -> phi2 ih il < ([|j|] + 1) ^ 2 -> (forall j1, 0 < [|j1|] < [|j|] -> phi2 ih il < ([|j1|] + 1) ^ 2 -> [|rec ih il j1|] ^ 2 <= phi2 ih il < ([|rec ih il j1|] + 1) ^ 2) -> [|sqrt312_step rec ih il j|] ^ 2 <= phi2 ih il < ([|sqrt312_step rec ih il j|] + 1) ^ 2. Proof. assert (Hp2: (0 < [|2|])%Z) by exact (eq_refl Lt). intros Hih Hj Hij Hrec; rewrite sqrt312_step_def. assert (H1: ([|ih|] <= [|j|])%Z) by (apply sqrt312_lower_bound with il; auto). case (phi_bounded ih); intros Hih1 _. case (phi_bounded il); intros Hil1 _. case (phi_bounded j); intros _ Hj1. assert (Hp3: (0 < phi2 ih il)). unfold phi2; apply Z.lt_le_trans with ([|ih|] * base)%Z; auto with zarith. apply Z.mul_pos_pos; auto with zarith. apply Z.lt_le_trans with (2:= Hih); auto with zarith. rewrite spec_compare. case Z.compare_spec; intros Hc1. split; auto. apply sqrt_test_true; auto. unfold phi2, base; auto with zarith. unfold phi2; rewrite Hc1. assert (0 <= [|il|]/[|j|]) by (apply Z_div_pos; auto with zarith). rewrite Z.mul_comm, Z_div_plus_full_l; unfold base; auto with zarith. simpl wB in Hj1. unfold Z.pow_pos in Hj1. simpl in Hj1. auto with zarith. case (Z.le_gt_cases (2 ^ 30) [|j|]); intros Hjj. rewrite spec_compare; case Z.compare_spec; rewrite div312_phi; auto; intros Hc; try (split; auto; apply sqrt_test_true; auto with zarith; fail). apply Hrec. assert (Hf1: 0 <= phi2 ih il/ [|j|]) by (apply Z_div_pos; auto with zarith). apply Z.le_succ_l in Hj. change (1 <= [|j|]) in Hj. Z.le_elim Hj. 2: contradict Hc; apply Z.le_ngt; rewrite <- Hj, Zdiv_1_r; auto with zarith. assert (Hf3: 0 < ([|j|] + phi2 ih il / [|j|]) / 2). replace ([|j|] + phi2 ih il/ [|j|])%Z with (1 * 2 + (([|j|] - 2) + phi2 ih il / [|j|])); try ring. rewrite Z_div_plus_full_l; auto with zarith. assert (0 <= ([|j|] - 2 + phi2 ih il / [|j|]) / 2) ; auto with zarith. assert (Hf4: ([|j|] + phi2 ih il / [|j|]) / 2 < [|j|]). apply sqrt_test_false; auto with zarith. generalize (spec_add_c j (fst (div3121 ih il j))). unfold interp_carry; case add31c; intros r; rewrite div312_phi; auto with zarith. rewrite div31_phi; change [|2|] with 2%Z; auto with zarith. intros HH; rewrite HH; clear HH; auto with zarith. rewrite spec_add, div31_phi; change [|2|] with 2%Z; auto. rewrite Z.mul_1_l; intros HH. rewrite Z.add_comm, <- Z_div_plus_full_l; auto with zarith. change (phi v30 * 2) with (2 ^ Z.of_nat size). rewrite HH, Zmod_small; auto with zarith. replace (phi match j +c fst (div3121 ih il j) with | C0 m1 => fst (m1 / 2)%int31 | C1 m1 => fst (m1 / 2)%int31 + v30 end) with ((([|j|] + (phi2 ih il)/([|j|]))/2)). apply sqrt_main; auto with zarith. generalize (spec_add_c j (fst (div3121 ih il j))). unfold interp_carry; case add31c; intros r; rewrite div312_phi; auto with zarith. rewrite div31_phi; auto with zarith. intros HH; rewrite HH; auto with zarith. intros HH; rewrite <- HH. change (1 * 2 ^ Z.of_nat size) with (phi (v30) * 2). rewrite Z_div_plus_full_l; auto with zarith. rewrite Z.add_comm. rewrite spec_add, Zmod_small. rewrite div31_phi; auto. split; auto with zarith. case (phi_bounded (fst (r/2)%int31)); case (phi_bounded v30); auto with zarith. rewrite div31_phi; change (phi 2) with 2%Z; auto. change (2 ^Z.of_nat size) with (base/2 + phi v30). assert (phi r / 2 < base/2); auto with zarith. apply Z.mul_lt_mono_pos_r with 2; auto with zarith. change (base/2 * 2) with base. apply Z.le_lt_trans with (phi r). rewrite Z.mul_comm; apply Z_mult_div_ge; auto with zarith. case (phi_bounded r); auto with zarith. contradict Hij; apply Z.le_ngt. assert ((1 + [|j|]) <= 2 ^ 30); auto with zarith. apply Z.le_trans with ((2 ^ 30) * (2 ^ 30)); auto with zarith. assert (0 <= 1 + [|j|]); auto with zarith. apply Z.mul_le_mono_nonneg; auto with zarith. change ((2 ^ 30) * (2 ^ 30)) with ((2 ^ 29) * base). apply Z.le_trans with ([|ih|] * base); auto with zarith. unfold phi2, base; auto with zarith. split; auto. apply sqrt_test_true; auto. unfold phi2, base; auto with zarith. apply Z.le_ge; apply Z.le_trans with (([|j|] * base)/[|j|]). rewrite Z.mul_comm, Z_div_mult; auto with zarith. apply Z.ge_le; apply Z_div_ge; auto with zarith. Qed. Lemma iter312_sqrt_correct n rec ih il j: 2^29 <= [|ih|] -> 0 < [|j|] -> phi2 ih il < ([|j|] + 1) ^ 2 -> (forall j1, 0 < [|j1|] -> 2^(Z.of_nat n) + [|j1|] <= [|j|] -> phi2 ih il < ([|j1|] + 1) ^ 2 -> [|rec ih il j1|] ^ 2 <= phi2 ih il < ([|rec ih il j1|] + 1) ^ 2) -> [|iter312_sqrt n rec ih il j|] ^ 2 <= phi2 ih il < ([|iter312_sqrt n rec ih il j|] + 1) ^ 2. Proof. revert rec ih il j; elim n; unfold iter312_sqrt; fold iter312_sqrt; clear n. intros rec ih il j Hi Hj Hij Hrec; apply sqrt312_step_correct; auto with zarith. intros; apply Hrec; auto with zarith. rewrite Z.pow_0_r; auto with zarith. intros n Hrec rec ih il j Hi Hj Hij HHrec. apply sqrt312_step_correct; auto. intros j1 Hj1 Hjp1; apply Hrec; auto with zarith. intros j2 Hj2 H2j2 Hjp2; apply Hrec; auto with zarith. intros j3 Hj3 Hpj3. apply HHrec; auto. rewrite Nat2Z.inj_succ, Z.pow_succ_r. apply Z.le_trans with (2 ^Z.of_nat n + [|j2|])%Z; auto with zarith. apply Nat2Z.is_nonneg. Qed. (* Avoid expanding [iter312_sqrt] before variables in the context. *) Strategy 1 [iter312_sqrt]. Lemma spec_sqrt2 : forall x y, wB/ 4 <= [|x|] -> let (s,r) := sqrt312 x y in [||WW x y||] = [|s|] ^ 2 + [+|r|] /\ [+|r|] <= 2 * [|s|]. Proof. intros ih il Hih; unfold sqrt312. change [||WW ih il||] with (phi2 ih il). assert (Hbin: forall s, s * s + 2* s + 1 = (s + 1) ^ 2) by (intros s; ring). assert (Hb: 0 <= base) by (red; intros HH; discriminate). assert (Hi2: phi2 ih il < (phi Tn + 1) ^ 2). { change ((phi Tn + 1) ^ 2) with (2^62). apply Z.le_lt_trans with ((2^31 -1) * base + (2^31 - 1)); auto with zarith. 2: simpl; unfold Z.pow_pos; simpl; auto with zarith. case (phi_bounded ih); case (phi_bounded il); intros H1 H2 H3 H4. unfold base, Z.pow, Z.pow_pos in H2,H4; simpl in H2,H4. unfold phi2. cbn [Z.pow Z.pow_pos Pos.iter]. auto with zarith. } case (iter312_sqrt_correct 31 (fun _ _ j => j) ih il Tn); auto with zarith. change [|Tn|] with 2147483647; auto with zarith. intros j1 _ HH; contradict HH. apply Z.lt_nge. change [|Tn|] with 2147483647; auto with zarith. change (2 ^ Z.of_nat 31) with 2147483648; auto with zarith. case (phi_bounded j1); auto with zarith. set (s := iter312_sqrt 31 (fun _ _ j : int31 => j) ih il Tn). intros Hs1 Hs2. generalize (spec_mul_c s s); case mul31c. simpl zn2z_to_Z; intros HH. assert ([|s|] = 0). { symmetry in HH. rewrite Z.mul_eq_0 in HH. destruct HH; auto. } contradict Hs2; apply Z.le_ngt; rewrite H. change ((0 + 1) ^ 2) with 1. apply Z.le_trans with (2 ^ Z.of_nat size / 4 * base). simpl; auto with zarith. apply Z.le_trans with ([|ih|] * base); auto with zarith. unfold phi2; case (phi_bounded il); auto with zarith. intros ih1 il1. change [||WW ih1 il1||] with (phi2 ih1 il1). intros Hihl1. generalize (spec_sub_c il il1). case sub31c; intros il2 Hil2. rewrite spec_compare; case Z.compare_spec. unfold interp_carry in *. intros H1; split. rewrite Z.pow_2_r, <- Hihl1. unfold phi2; ring[Hil2 H1]. replace [|il2|] with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2; rewrite H1, Hil2; ring. unfold interp_carry. intros H1; contradict Hs1. apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2. case (phi_bounded il); intros _ H2. apply Z.lt_le_trans with (([|ih|] + 1) * base + 0). rewrite Z.mul_add_distr_r, Z.add_0_r; auto with zarith. case (phi_bounded il1); intros H3 _. apply Z.add_le_mono; auto with zarith. unfold interp_carry in *; change (1 * 2 ^ Z.of_nat size) with base. rewrite Z.pow_2_r, <- Hihl1, Hil2. intros H1. rewrite <- Z.le_succ_l, <- Z.add_1_r in H1. Z.le_elim H1. contradict Hs2; apply Z.le_ngt. replace (([|s|] + 1) ^ 2) with (phi2 ih1 il1 + 2 * [|s|] + 1). unfold phi2. case (phi_bounded il); intros Hpil _. assert (Hl1l: [|il1|] <= [|il|]). { case (phi_bounded il2); rewrite Hil2; auto with zarith. } assert ([|ih1|] * base + 2 * [|s|] + 1 <= [|ih|] * base); auto with zarith. case (phi_bounded s); change (2 ^ Z.of_nat size) with base; intros _ Hps. case (phi_bounded ih1); intros Hpih1 _; auto with zarith. apply Z.le_trans with (([|ih1|] + 2) * base); auto with zarith. rewrite Z.mul_add_distr_r. assert (2 * [|s|] + 1 <= 2 * base); auto with zarith. rewrite Hihl1, Hbin; auto. split. unfold phi2; rewrite <- H1; ring. replace (base + ([|il|] - [|il1|])) with (phi2 ih il - ([|s|] * [|s|])). rewrite <-Hbin in Hs2; auto with zarith. rewrite <- Hihl1; unfold phi2; rewrite <- H1; ring. unfold interp_carry in Hil2 |- *. unfold interp_carry; change (1 * 2 ^ Z.of_nat size) with base. assert (Hsih: [|ih - 1|] = [|ih|] - 1). { rewrite spec_sub, Zmod_small; auto; change [|1|] with 1. case (phi_bounded ih); intros H1 H2. generalize Hih; change (2 ^ Z.of_nat size / 4) with 536870912. split; auto with zarith. } rewrite spec_compare; case Z.compare_spec. rewrite Hsih. intros H1; split. rewrite Z.pow_2_r, <- Hihl1. unfold phi2; rewrite <-H1. transitivity ([|ih|] * base + [|il1|] + ([|il|] - [|il1|])). ring. rewrite <-Hil2. change (2 ^ Z.of_nat size) with base; ring. replace [|il2|] with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2. rewrite <-H1. ring_simplify. transitivity (base + ([|il|] - [|il1|])). ring. rewrite <-Hil2. change (2 ^ Z.of_nat size) with base; ring. rewrite Hsih; intros H1. assert (He: [|ih|] = [|ih1|]). { apply Z.le_antisymm; auto with zarith. case (Z.le_gt_cases [|ih1|] [|ih|]); auto; intros H2. contradict Hs1; apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2. case (phi_bounded il); change (2 ^ Z.of_nat size) with base; intros _ Hpil1. apply Z.lt_le_trans with (([|ih|] + 1) * base). rewrite Z.mul_add_distr_r, Z.mul_1_l; auto with zarith. case (phi_bounded il1); intros Hpil2 _. apply Z.le_trans with (([|ih1|]) * base); auto with zarith. } rewrite Z.pow_2_r, <-Hihl1; unfold phi2; rewrite <-He. contradict Hs1; apply Z.lt_nge; rewrite Z.pow_2_r, <-Hihl1. unfold phi2; rewrite He. assert (phi il - phi il1 < 0); auto with zarith. rewrite <-Hil2. case (phi_bounded il2); auto with zarith. intros H1. rewrite Z.pow_2_r, <-Hihl1. assert (H2 : [|ih1|]+2 <= [|ih|]); auto with zarith. Z.le_elim H2. contradict Hs2; apply Z.le_ngt. replace (([|s|] + 1) ^ 2) with (phi2 ih1 il1 + 2 * [|s|] + 1). unfold phi2. assert ([|ih1|] * base + 2 * phi s + 1 <= [|ih|] * base + ([|il|] - [|il1|])); auto with zarith. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base). case (phi_bounded il2); intros Hpil2 _. apply Z.le_trans with ([|ih|] * base + - base); auto with zarith. case (phi_bounded s); change (2 ^ Z.of_nat size) with base; intros _ Hps. assert (2 * [|s|] + 1 <= 2 * base); auto with zarith. apply Z.le_trans with ([|ih1|] * base + 2 * base); auto with zarith. assert (Hi: ([|ih1|] + 3) * base <= [|ih|] * base); auto with zarith. rewrite Z.mul_add_distr_r in Hi; auto with zarith. rewrite Hihl1, Hbin; auto. unfold phi2; rewrite <-H2. split. replace [|il|] with (([|il|] - [|il1|]) + [|il1|]); try ring. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base); ring. replace (base + [|il2|]) with (phi2 ih il - phi2 ih1 il1). rewrite Hihl1. rewrite <-Hbin in Hs2; auto with zarith. unfold phi2; rewrite <-H2. replace [|il|] with (([|il|] - [|il1|]) + [|il1|]); try ring. rewrite <-Hil2. change (-1 * 2 ^ Z.of_nat size) with (-base); ring. Qed. (** [iszero] *) Lemma spec_eq0 : forall x, ZnZ.eq0 x = true -> [|x|] = 0. Proof. clear; unfold ZnZ.eq0, int31_ops. unfold compare31; intros. change [|0|] with 0 in H. apply Z.compare_eq. now destruct ([|x|] ?= 0). Qed. (* Even *) Lemma spec_is_even : forall x, if ZnZ.is_even x then [|x|] mod 2 = 0 else [|x|] mod 2 = 1. Proof. unfold ZnZ.is_even, int31_ops; intros. generalize (spec_div x 2). destruct (x/2)%int31 as (q,r); intros. unfold compare31. change [|2|] with 2 in H. change [|0|] with 0. destruct H; auto with zarith. replace ([|x|] mod 2) with [|r|]. destruct H; auto with zarith. case Z.compare_spec; auto with zarith. apply Zmod_unique with [|q|]; auto with zarith. Qed. (* Bitwise *) Lemma log2_phi_bounded x : Z.log2 [|x|] < Z.of_nat size. Proof. destruct (phi_bounded x) as (H,H'). Z.le_elim H. - now apply Z.log2_lt_pow2. - now rewrite <- H. Qed. Lemma spec_lor x y : [| ZnZ.lor x y |] = Z.lor [|x|] [|y|]. Proof. unfold ZnZ.lor,int31_ops. unfold lor31. rewrite phi_phi_inv. apply Z.mod_small; split; trivial. - apply Z.lor_nonneg; split; apply phi_bounded. - apply Z.log2_lt_cancel. rewrite Z.log2_pow2 by easy. rewrite Z.log2_lor; try apply phi_bounded. apply Z.max_lub_lt; apply log2_phi_bounded. Qed. Lemma spec_land x y : [| ZnZ.land x y |] = Z.land [|x|] [|y|]. Proof. unfold ZnZ.land, int31_ops. unfold land31. rewrite phi_phi_inv. apply Z.mod_small; split; trivial. - apply Z.land_nonneg; left; apply phi_bounded. - apply Z.log2_lt_cancel. rewrite Z.log2_pow2 by easy. eapply Z.le_lt_trans. apply Z.log2_land; try apply phi_bounded. apply Z.min_lt_iff; left; apply log2_phi_bounded. Qed. Lemma spec_lxor x y : [| ZnZ.lxor x y |] = Z.lxor [|x|] [|y|]. Proof. unfold ZnZ.lxor, int31_ops. unfold lxor31. rewrite phi_phi_inv. apply Z.mod_small; split; trivial. - apply Z.lxor_nonneg; split; intros; apply phi_bounded. - apply Z.log2_lt_cancel. rewrite Z.log2_pow2 by easy. eapply Z.le_lt_trans. apply Z.log2_lxor; try apply phi_bounded. apply Z.max_lub_lt; apply log2_phi_bounded. Qed. Global Instance int31_specs : ZnZ.Specs int31_ops := { spec_to_Z := phi_bounded; spec_of_pos := positive_to_int31_spec; spec_zdigits := spec_zdigits; spec_more_than_1_digit := spec_more_than_1_digit; spec_0 := spec_0; spec_1 := spec_1; spec_m1 := spec_m1; spec_compare := spec_compare; spec_eq0 := spec_eq0; spec_opp_c := spec_opp_c; spec_opp := spec_opp; spec_opp_carry := spec_opp_carry; spec_succ_c := spec_succ_c; spec_add_c := spec_add_c; spec_add_carry_c := spec_add_carry_c; spec_succ := spec_succ; spec_add := spec_add; spec_add_carry := spec_add_carry; spec_pred_c := spec_pred_c; spec_sub_c := spec_sub_c; spec_sub_carry_c := spec_sub_carry_c; spec_pred := spec_pred; spec_sub := spec_sub; spec_sub_carry := spec_sub_carry; spec_mul_c := spec_mul_c; spec_mul := spec_mul; spec_square_c := spec_square_c; spec_div21 := spec_div21; spec_div_gt := fun a b _ => spec_div a b; spec_div := spec_div; spec_modulo_gt := fun a b _ => spec_mod a b; spec_modulo := spec_mod; spec_gcd_gt := fun a b _ => spec_gcd a b; spec_gcd := spec_gcd; spec_head00 := spec_head00; spec_head0 := spec_head0; spec_tail00 := spec_tail00; spec_tail0 := spec_tail0; spec_add_mul_div := spec_add_mul_div; spec_pos_mod := spec_pos_mod; spec_is_even := spec_is_even; spec_sqrt2 := spec_sqrt2; spec_sqrt := spec_sqrt; spec_lor := spec_lor; spec_land := spec_land; spec_lxor := spec_lxor }. End Int31_Specs. Module Int31Cyclic <: CyclicType. Definition t := int31. Definition ops := int31_ops. Definition specs := int31_specs. End Int31Cyclic.
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ecc_buf.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_ecc_buf #( parameter TCQ = 100, parameter PAYLOAD_WIDTH = 64, parameter DATA_BUF_ADDR_WIDTH = 4, parameter DATA_BUF_OFFSET_WIDTH = 1, parameter DATA_WIDTH = 64, parameter nCK_PER_CLK = 4 ) ( /*AUTOARG*/ // Outputs rd_merge_data, // Inputs clk, rst, rd_data_addr, rd_data_offset, wr_data_addr, wr_data_offset, rd_data, wr_ecc_buf ); input clk; input rst; // RMW architecture supports only 16 data buffer entries. // Allow DATA_BUF_ADDR_WIDTH to be greater than 4, but // assume the upper bits are used for tagging. input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr; input [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset; wire [4:0] buf_wr_addr; input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr; input [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset; reg [4:0] buf_rd_addr_r; generate if (DATA_BUF_ADDR_WIDTH >= 4) begin : ge_4_addr_bits always @(posedge clk) buf_rd_addr_r <= #TCQ{wr_data_addr[3:0], wr_data_offset}; assign buf_wr_addr = {rd_data_addr[3:0], rd_data_offset}; end else begin : lt_4_addr_bits always @(posedge clk) buf_rd_addr_r <= #TCQ{{4-DATA_BUF_ADDR_WIDTH{1'b0}}, wr_data_addr[DATA_BUF_ADDR_WIDTH-1:0], wr_data_offset}; assign buf_wr_addr = {{4-DATA_BUF_ADDR_WIDTH{1'b0}}, rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0], rd_data_offset}; end endgenerate input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data; reg [2*nCK_PER_CLK*DATA_WIDTH-1:0] payload; integer h; always @(/*AS*/rd_data) for (h=0; h<2*nCK_PER_CLK; h=h+1) payload[h*DATA_WIDTH+:DATA_WIDTH] = rd_data[h*PAYLOAD_WIDTH+:DATA_WIDTH]; input wr_ecc_buf; localparam BUF_WIDTH = 2*nCK_PER_CLK*DATA_WIDTH; localparam FULL_RAM_CNT = (BUF_WIDTH/6); localparam REMAINDER = BUF_WIDTH % 6; localparam RAM_CNT = FULL_RAM_CNT + ((REMAINDER == 0 ) ? 0 : 1); localparam RAM_WIDTH = (RAM_CNT*6); wire [RAM_WIDTH-1:0] buf_out_data; generate begin : ram_buf wire [RAM_WIDTH-1:0] buf_in_data; if (REMAINDER == 0) assign buf_in_data = payload; else assign buf_in_data = {{6-REMAINDER{1'b0}}, payload}; genvar i; for (i=0; i<RAM_CNT; i=i+1) begin : rd_buffer_ram RAM32M #(.INIT_A(64'h0000000000000000), .INIT_B(64'h0000000000000000), .INIT_C(64'h0000000000000000), .INIT_D(64'h0000000000000000) ) RAM32M0 ( .DOA(buf_out_data[((i*6)+4)+:2]), .DOB(buf_out_data[((i*6)+2)+:2]), .DOC(buf_out_data[((i*6)+0)+:2]), .DOD(), .DIA(buf_in_data[((i*6)+4)+:2]), .DIB(buf_in_data[((i*6)+2)+:2]), .DIC(buf_in_data[((i*6)+0)+:2]), .DID(2'b0), .ADDRA(buf_rd_addr_r), .ADDRB(buf_rd_addr_r), .ADDRC(buf_rd_addr_r), .ADDRD(buf_wr_addr), .WE(wr_ecc_buf), .WCLK(clk) ); end // block: rd_buffer_ram end endgenerate output wire [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data; assign rd_merge_data = buf_out_data[2*nCK_PER_CLK*DATA_WIDTH-1:0]; endmodule
//***************************************************************************** // (c) Copyright 2008 - 2013 Xilinx, Inc. All rights reserved. // // This file contains confidential and proprietary information // of Xilinx, Inc. and is protected under U.S. and // international copyright and other intellectual property // laws. // // DISCLAIMER // This disclaimer is not a license and does not grant any // rights to the materials distributed herewith. Except as // otherwise provided in a valid license issued to you by // Xilinx, and to the maximum extent permitted by applicable // law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND // WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES // AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING // BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- // INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and // (2) Xilinx shall not be liable (whether in contract or tort, // including negligence, or under any other theory of // liability) for any loss or damage of any kind or nature // related to, arising under or in connection with these // materials, including for any direct, or any indirect, // special, incidental, or consequential loss or damage // (including loss of data, profits, goodwill, or any type of // loss or damage suffered as a result of any action brought // by a third party) even if such damage or loss was // reasonably foreseeable or Xilinx had been advised of the // possibility of the same. // // CRITICAL APPLICATIONS // Xilinx products are not designed or intended to be fail- // safe, or for use in any application requiring fail-safe // performance, such as life-support or safety devices or // systems, Class III medical devices, nuclear facilities, // applications related to the deployment of airbags, or any // other applications that could lead to death, personal // injury, or severe property or environmental damage // (individually and collectively, "Critical // Applications"). Customer assumes the sole risk and // liability of any use of Xilinx products in Critical // Applications, subject only to applicable laws and // regulations governing limitations on product liability. // // THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS // PART OF THIS FILE AT ALL TIMES. // //***************************************************************************** // ____ ____ // / /\/ / // /___/ \ / Vendor : Xilinx // \ \ \/ Version : %version // \ \ Application : MIG // / / Filename : ecc_buf.v // /___/ /\ Date Last Modified : $date$ // \ \ / \ Date Created : Tue Jun 30 2009 // \___\/\___\ // //Device : 7-Series //Design Name : DDR3 SDRAM //Purpose : //Reference : //Revision History : //***************************************************************************** `timescale 1ps/1ps module mig_7series_v1_9_ecc_buf #( parameter TCQ = 100, parameter PAYLOAD_WIDTH = 64, parameter DATA_BUF_ADDR_WIDTH = 4, parameter DATA_BUF_OFFSET_WIDTH = 1, parameter DATA_WIDTH = 64, parameter nCK_PER_CLK = 4 ) ( /*AUTOARG*/ // Outputs rd_merge_data, // Inputs clk, rst, rd_data_addr, rd_data_offset, wr_data_addr, wr_data_offset, rd_data, wr_ecc_buf ); input clk; input rst; // RMW architecture supports only 16 data buffer entries. // Allow DATA_BUF_ADDR_WIDTH to be greater than 4, but // assume the upper bits are used for tagging. input [DATA_BUF_ADDR_WIDTH-1:0] rd_data_addr; input [DATA_BUF_OFFSET_WIDTH-1:0] rd_data_offset; wire [4:0] buf_wr_addr; input [DATA_BUF_ADDR_WIDTH-1:0] wr_data_addr; input [DATA_BUF_OFFSET_WIDTH-1:0] wr_data_offset; reg [4:0] buf_rd_addr_r; generate if (DATA_BUF_ADDR_WIDTH >= 4) begin : ge_4_addr_bits always @(posedge clk) buf_rd_addr_r <= #TCQ{wr_data_addr[3:0], wr_data_offset}; assign buf_wr_addr = {rd_data_addr[3:0], rd_data_offset}; end else begin : lt_4_addr_bits always @(posedge clk) buf_rd_addr_r <= #TCQ{{4-DATA_BUF_ADDR_WIDTH{1'b0}}, wr_data_addr[DATA_BUF_ADDR_WIDTH-1:0], wr_data_offset}; assign buf_wr_addr = {{4-DATA_BUF_ADDR_WIDTH{1'b0}}, rd_data_addr[DATA_BUF_ADDR_WIDTH-1:0], rd_data_offset}; end endgenerate input [2*nCK_PER_CLK*PAYLOAD_WIDTH-1:0] rd_data; reg [2*nCK_PER_CLK*DATA_WIDTH-1:0] payload; integer h; always @(/*AS*/rd_data) for (h=0; h<2*nCK_PER_CLK; h=h+1) payload[h*DATA_WIDTH+:DATA_WIDTH] = rd_data[h*PAYLOAD_WIDTH+:DATA_WIDTH]; input wr_ecc_buf; localparam BUF_WIDTH = 2*nCK_PER_CLK*DATA_WIDTH; localparam FULL_RAM_CNT = (BUF_WIDTH/6); localparam REMAINDER = BUF_WIDTH % 6; localparam RAM_CNT = FULL_RAM_CNT + ((REMAINDER == 0 ) ? 0 : 1); localparam RAM_WIDTH = (RAM_CNT*6); wire [RAM_WIDTH-1:0] buf_out_data; generate begin : ram_buf wire [RAM_WIDTH-1:0] buf_in_data; if (REMAINDER == 0) assign buf_in_data = payload; else assign buf_in_data = {{6-REMAINDER{1'b0}}, payload}; genvar i; for (i=0; i<RAM_CNT; i=i+1) begin : rd_buffer_ram RAM32M #(.INIT_A(64'h0000000000000000), .INIT_B(64'h0000000000000000), .INIT_C(64'h0000000000000000), .INIT_D(64'h0000000000000000) ) RAM32M0 ( .DOA(buf_out_data[((i*6)+4)+:2]), .DOB(buf_out_data[((i*6)+2)+:2]), .DOC(buf_out_data[((i*6)+0)+:2]), .DOD(), .DIA(buf_in_data[((i*6)+4)+:2]), .DIB(buf_in_data[((i*6)+2)+:2]), .DIC(buf_in_data[((i*6)+0)+:2]), .DID(2'b0), .ADDRA(buf_rd_addr_r), .ADDRB(buf_rd_addr_r), .ADDRC(buf_rd_addr_r), .ADDRD(buf_wr_addr), .WE(wr_ecc_buf), .WCLK(clk) ); end // block: rd_buffer_ram end endgenerate output wire [2*nCK_PER_CLK*DATA_WIDTH-1:0] rd_merge_data; assign rd_merge_data = buf_out_data[2*nCK_PER_CLK*DATA_WIDTH-1:0]; endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: ff.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: A D/Q flip flop. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module ff ( input CLK, input D, output reg Q ); always @ (posedge CLK) begin Q <= #1 D; end endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: ff.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: A D/Q flip flop. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `timescale 1ns/1ns module ff ( input CLK, input D, output reg Q ); always @ (posedge CLK) begin Q <= #1 D; end endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 04/26/2016 08:42:14 AM // Design Name: // Module Name: ROM_test // Project Name: // Target Devices: // Tool Versions: // Description: // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module ROM_test #(parameter W=32, parameter N=0) ( input wire [9:0] address, output reg [W-1:0] data ); localparam ROM_FILE32_A ="/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_A.txt"; localparam ROM_FILE64_A="/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_A.txt"; localparam ROM_FILE32_B= "/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_B.txt"; localparam ROM_FILE64_B= "/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_B.txt"; //(* rom_style="{distributed | block}" *) reg [W-1:0] rom_test [1023:0]; generate if(W==32) initial begin if(N==0) $readmemh(ROM_FILE32_A, rom_test, 0, 1023); else $readmemh(ROM_FILE32_B, rom_test, 0, 1023); end else initial begin if(N==0) $readmemh(ROM_FILE64_A, rom_test, 0, 1023); else $readmemh(ROM_FILE64_B, rom_test, 0, 1023); end endgenerate always @* begin data = rom_test[address]; end endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 04/26/2016 08:42:14 AM // Design Name: // Module Name: ROM_test // Project Name: // Target Devices: // Tool Versions: // Description: // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module ROM_test #(parameter W=32, parameter N=0) ( input wire [9:0] address, output reg [W-1:0] data ); localparam ROM_FILE32_A ="/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_A.txt"; localparam ROM_FILE64_A="/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_A.txt"; localparam ROM_FILE32_B= "/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_B.txt"; localparam ROM_FILE64_B= "/media/francis/Acer/Proyecto de graduacion/Link to GitHub/Proyecto_De_Graduacion/TXTVerification/Hexadecimal_B.txt"; //(* rom_style="{distributed | block}" *) reg [W-1:0] rom_test [1023:0]; generate if(W==32) initial begin if(N==0) $readmemh(ROM_FILE32_A, rom_test, 0, 1023); else $readmemh(ROM_FILE32_B, rom_test, 0, 1023); end else initial begin if(N==0) $readmemh(ROM_FILE64_A, rom_test, 0, 1023); else $readmemh(ROM_FILE64_B, rom_test, 0, 1023); end endgenerate always @* begin data = rom_test[address]; end endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: interrupt.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Manages the interrupt vector and sends interrupts. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_INTR_IDLE 2'd0 `define S_INTR_INTR 2'd1 `define S_INTR_CLR_0 2'd2 `define S_INTR_CLR_1 2'd3 `timescale 1ns/1ns module interrupt #( parameter C_NUM_CHNL = 4'd12 ) ( input CLK, input RST, input [C_NUM_CHNL-1:0] RX_SG_BUF_RECVD, // The scatter gather data for a rx_port transaction has been read input [C_NUM_CHNL-1:0] RX_TXN_DONE, // The rx_port transaction is done input [C_NUM_CHNL-1:0] TX_TXN, // New tx_port transaction input [C_NUM_CHNL-1:0] TX_SG_BUF_RECVD, // The scatter gather data for a tx_port transaction has been read input [C_NUM_CHNL-1:0] TX_TXN_DONE, // The tx_port transaction is done input VECT_0_RST, // Interrupt vector 0 reset input VECT_1_RST, // Interrupt vector 1 reset input [31:0] VECT_RST, // Interrupt vector reset value output [31:0] VECT_0, // Interrupt vector 0 output [31:0] VECT_1, // Interrupt vector 1 input INTR_LEGACY_CLR, // Pulsed high to ack the legacy interrupt and clear it input CONFIG_INTERRUPT_MSIENABLE, // 1 if MSI interrupts are enable, 0 if only legacy are supported input INTR_MSI_RDY, // High when interrupt is able to be sent output INTR_MSI_REQUEST // High to request interrupt, when both INTR_MSI_RDY and INTR_MSI_REQUEST are high, interrupt is sent ); reg [1:0] rState=0; reg [31:0] rVect0=0; reg [31:0] rVect1=0; wire [31:0] wVect0; wire [31:0] wVect1; wire wIntr = (rState == `S_INTR_INTR); wire wIntrDone; assign VECT_0 = rVect0; assign VECT_1 = rVect1; // Align the input signals to the interrupt vector. // VECT_0/VECT_1 are organized from right to left (LSB to MSB) as: // [ 0] TX_TXN for channel 0 in VECT_0, channel 6 in VECT_1 // [ 1] TX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1 // [ 2] TX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1 // [ 3] RX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1 // [ 4] RX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1 // ... // [25] TX_TXN for channel 5 in VECT_0, channel 11 in VECT_1 // [26] TX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1 // [27] TX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1 // [28] RX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1 // [29] RX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1 // Positions 30 - 31 in both VECT_0 and VECT_1 are zero. genvar i; generate for (i = 0; i < C_NUM_CHNL; i = i + 1) begin: vectMap if (i < 6) begin : vectMap0 assign wVect0[(5*i)+0] = TX_TXN[i]; assign wVect0[(5*i)+1] = TX_SG_BUF_RECVD[i]; assign wVect0[(5*i)+2] = TX_TXN_DONE[i]; assign wVect0[(5*i)+3] = RX_SG_BUF_RECVD[i]; assign wVect0[(5*i)+4] = RX_TXN_DONE[i]; end else begin : vectMap1 assign wVect1[(5*(i-6))+0] = TX_TXN[i]; assign wVect1[(5*(i-6))+1] = TX_SG_BUF_RECVD[i]; assign wVect1[(5*(i-6))+2] = TX_TXN_DONE[i]; assign wVect1[(5*(i-6))+3] = RX_SG_BUF_RECVD[i]; assign wVect1[(5*(i-6))+4] = RX_TXN_DONE[i]; end end for (i = C_NUM_CHNL; i < 12; i = i + 1) begin: vectZero if (i < 6) begin : vectZero0 assign wVect0[(5*i)+0] = 1'b0; assign wVect0[(5*i)+1] = 1'b0; assign wVect0[(5*i)+2] = 1'b0; assign wVect0[(5*i)+3] = 1'b0; assign wVect0[(5*i)+4] = 1'b0; end else begin : vectZero1 assign wVect1[(5*(i-6))+0] = 1'b0; assign wVect1[(5*(i-6))+1] = 1'b0; assign wVect1[(5*(i-6))+2] = 1'b0; assign wVect1[(5*(i-6))+3] = 1'b0; assign wVect1[(5*(i-6))+4] = 1'b0; end end assign wVect0[30] = 1'b0; assign wVect0[31] = 1'b0; assign wVect1[30] = 1'b0; assign wVect1[31] = 1'b0; endgenerate // Interrupt controller interrupt_controller intrCtlr ( .CLK(CLK), .RST(RST), .INTR(wIntr), .INTR_LEGACY_CLR(INTR_LEGACY_CLR), .INTR_DONE(wIntrDone), .CFG_INTERRUPT_ASSERT(), .CONFIG_INTERRUPT_MSIENABLE(CONFIG_INTERRUPT_MSIENABLE), .INTR_MSI_RDY(INTR_MSI_RDY), .INTR_MSI_REQUEST(INTR_MSI_REQUEST) ); // Update the interrupt vector when new signals come in (pulse in) and on reset. always @(posedge CLK) begin if (RST) begin rVect0 <= #1 0; rVect1 <= #1 0; end else begin if (VECT_0_RST) begin rVect0 <= #1 (wVect0 | (rVect0 & ~VECT_RST)); rVect1 <= #1 (wVect1 | rVect1); end else if (VECT_1_RST) begin rVect0 <= #1 (wVect0 | rVect0); rVect1 <= #1 (wVect1 | (rVect1 & ~VECT_RST)); end else begin rVect0 <= #1 (wVect0 | rVect0); rVect1 <= #1 (wVect1 | rVect1); end end end // Fire the interrupt when we have a non-zero vector. always @(posedge CLK) begin if (RST) begin rState <= #1 `S_INTR_IDLE; end else begin case (rState) `S_INTR_IDLE : rState <= #1 ((rVect0 | rVect1) == 0 ? `S_INTR_IDLE : `S_INTR_INTR); `S_INTR_INTR : rState <= #1 (wIntrDone ? `S_INTR_CLR_0 : `S_INTR_INTR); `S_INTR_CLR_0 : rState <= #1 (VECT_0_RST ? (C_NUM_CHNL > 6 ? `S_INTR_CLR_1 : `S_INTR_IDLE) : `S_INTR_CLR_0); `S_INTR_CLR_1 : rState <= #1 (VECT_1_RST ? `S_INTR_IDLE : `S_INTR_CLR_1); endcase end end endmodule
// ---------------------------------------------------------------------- // Copyright (c) 2016, The Regents of the University of California All // rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents of the University of California // nor the names of its contributors may be used to endorse or // promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL REGENTS OF THE // UNIVERSITY OF CALIFORNIA BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS // OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR // TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // ---------------------------------------------------------------------- //---------------------------------------------------------------------------- // Filename: interrupt.v // Version: 1.00.a // Verilog Standard: Verilog-2001 // Description: Manages the interrupt vector and sends interrupts. // Author: Matt Jacobsen // History: @mattj: Version 2.0 //----------------------------------------------------------------------------- `define S_INTR_IDLE 2'd0 `define S_INTR_INTR 2'd1 `define S_INTR_CLR_0 2'd2 `define S_INTR_CLR_1 2'd3 `timescale 1ns/1ns module interrupt #( parameter C_NUM_CHNL = 4'd12 ) ( input CLK, input RST, input [C_NUM_CHNL-1:0] RX_SG_BUF_RECVD, // The scatter gather data for a rx_port transaction has been read input [C_NUM_CHNL-1:0] RX_TXN_DONE, // The rx_port transaction is done input [C_NUM_CHNL-1:0] TX_TXN, // New tx_port transaction input [C_NUM_CHNL-1:0] TX_SG_BUF_RECVD, // The scatter gather data for a tx_port transaction has been read input [C_NUM_CHNL-1:0] TX_TXN_DONE, // The tx_port transaction is done input VECT_0_RST, // Interrupt vector 0 reset input VECT_1_RST, // Interrupt vector 1 reset input [31:0] VECT_RST, // Interrupt vector reset value output [31:0] VECT_0, // Interrupt vector 0 output [31:0] VECT_1, // Interrupt vector 1 input INTR_LEGACY_CLR, // Pulsed high to ack the legacy interrupt and clear it input CONFIG_INTERRUPT_MSIENABLE, // 1 if MSI interrupts are enable, 0 if only legacy are supported input INTR_MSI_RDY, // High when interrupt is able to be sent output INTR_MSI_REQUEST // High to request interrupt, when both INTR_MSI_RDY and INTR_MSI_REQUEST are high, interrupt is sent ); reg [1:0] rState=0; reg [31:0] rVect0=0; reg [31:0] rVect1=0; wire [31:0] wVect0; wire [31:0] wVect1; wire wIntr = (rState == `S_INTR_INTR); wire wIntrDone; assign VECT_0 = rVect0; assign VECT_1 = rVect1; // Align the input signals to the interrupt vector. // VECT_0/VECT_1 are organized from right to left (LSB to MSB) as: // [ 0] TX_TXN for channel 0 in VECT_0, channel 6 in VECT_1 // [ 1] TX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1 // [ 2] TX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1 // [ 3] RX_SG_BUF_RECVD for channel 0 in VECT_0, channel 6 in VECT_1 // [ 4] RX_TXN_DONE for channel 0 in VECT_0, channel 6 in VECT_1 // ... // [25] TX_TXN for channel 5 in VECT_0, channel 11 in VECT_1 // [26] TX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1 // [27] TX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1 // [28] RX_SG_BUF_RECVD for channel 5 in VECT_0, channel 11 in VECT_1 // [29] RX_TXN_DONE for channel 5 in VECT_0, channel 11 in VECT_1 // Positions 30 - 31 in both VECT_0 and VECT_1 are zero. genvar i; generate for (i = 0; i < C_NUM_CHNL; i = i + 1) begin: vectMap if (i < 6) begin : vectMap0 assign wVect0[(5*i)+0] = TX_TXN[i]; assign wVect0[(5*i)+1] = TX_SG_BUF_RECVD[i]; assign wVect0[(5*i)+2] = TX_TXN_DONE[i]; assign wVect0[(5*i)+3] = RX_SG_BUF_RECVD[i]; assign wVect0[(5*i)+4] = RX_TXN_DONE[i]; end else begin : vectMap1 assign wVect1[(5*(i-6))+0] = TX_TXN[i]; assign wVect1[(5*(i-6))+1] = TX_SG_BUF_RECVD[i]; assign wVect1[(5*(i-6))+2] = TX_TXN_DONE[i]; assign wVect1[(5*(i-6))+3] = RX_SG_BUF_RECVD[i]; assign wVect1[(5*(i-6))+4] = RX_TXN_DONE[i]; end end for (i = C_NUM_CHNL; i < 12; i = i + 1) begin: vectZero if (i < 6) begin : vectZero0 assign wVect0[(5*i)+0] = 1'b0; assign wVect0[(5*i)+1] = 1'b0; assign wVect0[(5*i)+2] = 1'b0; assign wVect0[(5*i)+3] = 1'b0; assign wVect0[(5*i)+4] = 1'b0; end else begin : vectZero1 assign wVect1[(5*(i-6))+0] = 1'b0; assign wVect1[(5*(i-6))+1] = 1'b0; assign wVect1[(5*(i-6))+2] = 1'b0; assign wVect1[(5*(i-6))+3] = 1'b0; assign wVect1[(5*(i-6))+4] = 1'b0; end end assign wVect0[30] = 1'b0; assign wVect0[31] = 1'b0; assign wVect1[30] = 1'b0; assign wVect1[31] = 1'b0; endgenerate // Interrupt controller interrupt_controller intrCtlr ( .CLK(CLK), .RST(RST), .INTR(wIntr), .INTR_LEGACY_CLR(INTR_LEGACY_CLR), .INTR_DONE(wIntrDone), .CFG_INTERRUPT_ASSERT(), .CONFIG_INTERRUPT_MSIENABLE(CONFIG_INTERRUPT_MSIENABLE), .INTR_MSI_RDY(INTR_MSI_RDY), .INTR_MSI_REQUEST(INTR_MSI_REQUEST) ); // Update the interrupt vector when new signals come in (pulse in) and on reset. always @(posedge CLK) begin if (RST) begin rVect0 <= #1 0; rVect1 <= #1 0; end else begin if (VECT_0_RST) begin rVect0 <= #1 (wVect0 | (rVect0 & ~VECT_RST)); rVect1 <= #1 (wVect1 | rVect1); end else if (VECT_1_RST) begin rVect0 <= #1 (wVect0 | rVect0); rVect1 <= #1 (wVect1 | (rVect1 & ~VECT_RST)); end else begin rVect0 <= #1 (wVect0 | rVect0); rVect1 <= #1 (wVect1 | rVect1); end end end // Fire the interrupt when we have a non-zero vector. always @(posedge CLK) begin if (RST) begin rState <= #1 `S_INTR_IDLE; end else begin case (rState) `S_INTR_IDLE : rState <= #1 ((rVect0 | rVect1) == 0 ? `S_INTR_IDLE : `S_INTR_INTR); `S_INTR_INTR : rState <= #1 (wIntrDone ? `S_INTR_CLR_0 : `S_INTR_INTR); `S_INTR_CLR_0 : rState <= #1 (VECT_0_RST ? (C_NUM_CHNL > 6 ? `S_INTR_CLR_1 : `S_INTR_IDLE) : `S_INTR_CLR_0); `S_INTR_CLR_1 : rState <= #1 (VECT_1_RST ? `S_INTR_IDLE : `S_INTR_CLR_1); endcase end end endmodule
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2012 by Wilson Snyder. //bug456 typedef logic signed [34:0] rc_t; module t (/*AUTOARG*/ // Inputs clk ); input clk; integer cyc=0; reg [63:0] crc; reg [63:0] sum; // Take CRC data and apply to testblock inputs wire [34:0] rc = crc[34:0]; /*AUTOWIRE*/ // Beginning of automatic wires (for undeclared instantiated-module outputs) logic o; // From test of Test.v // End of automatics Test test (/*AUTOINST*/ // Outputs .o (o), // Inputs .rc (rc), .clk (clk)); // Aggregate outputs into a single result vector wire [63:0] result = {63'h0, o}; // Test loop always @ (posedge clk) begin `ifdef TEST_VERBOSE $write("[%0t] cyc==%0d crc=%x result=%x\n",$time, cyc, crc, result); `endif cyc <= cyc + 1; crc <= {crc[62:0], crc[63]^crc[2]^crc[0]}; sum <= result ^ {sum[62:0],sum[63]^sum[2]^sum[0]}; if (cyc==0) begin // Setup crc <= 64'h5aef0c8d_d70a4497; sum <= 64'h0; end else if (cyc<10) begin sum <= 64'h0; end else if (cyc==99) begin $write("[%0t] cyc==%0d crc=%x sum=%x\n",$time, cyc, crc, sum); if (crc !== 64'hc77bb9b3784ea091) $stop; // What checksum will we end up with (above print should match) `define EXPECTED_SUM 64'h7211d24a17b25ec9 if (sum !== `EXPECTED_SUM) $stop; $write("*-* All Finished *-*\n"); $finish; end end endmodule module Test( output logic o, input rc_t rc, input logic clk); localparam RATIO = 2; rc_t rc_d[RATIO:1]; always_ff @(posedge clk) begin integer k; rc_d[1] <= rc; for( k=2; k<RATIO+1; k++ ) begin rc_d[k] <= rc_d[k-1]; end end // always_ff @ assign o = rc_d[RATIO] < 0; endmodule // Local Variables: // verilog-typedef-regexp: "_t$" // End:
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2012 by Wilson Snyder. //bug456 typedef logic signed [34:0] rc_t; module t (/*AUTOARG*/ // Inputs clk ); input clk; integer cyc=0; reg [63:0] crc; reg [63:0] sum; // Take CRC data and apply to testblock inputs wire [34:0] rc = crc[34:0]; /*AUTOWIRE*/ // Beginning of automatic wires (for undeclared instantiated-module outputs) logic o; // From test of Test.v // End of automatics Test test (/*AUTOINST*/ // Outputs .o (o), // Inputs .rc (rc), .clk (clk)); // Aggregate outputs into a single result vector wire [63:0] result = {63'h0, o}; // Test loop always @ (posedge clk) begin `ifdef TEST_VERBOSE $write("[%0t] cyc==%0d crc=%x result=%x\n",$time, cyc, crc, result); `endif cyc <= cyc + 1; crc <= {crc[62:0], crc[63]^crc[2]^crc[0]}; sum <= result ^ {sum[62:0],sum[63]^sum[2]^sum[0]}; if (cyc==0) begin // Setup crc <= 64'h5aef0c8d_d70a4497; sum <= 64'h0; end else if (cyc<10) begin sum <= 64'h0; end else if (cyc==99) begin $write("[%0t] cyc==%0d crc=%x sum=%x\n",$time, cyc, crc, sum); if (crc !== 64'hc77bb9b3784ea091) $stop; // What checksum will we end up with (above print should match) `define EXPECTED_SUM 64'h7211d24a17b25ec9 if (sum !== `EXPECTED_SUM) $stop; $write("*-* All Finished *-*\n"); $finish; end end endmodule module Test( output logic o, input rc_t rc, input logic clk); localparam RATIO = 2; rc_t rc_d[RATIO:1]; always_ff @(posedge clk) begin integer k; rc_d[1] <= rc; for( k=2; k<RATIO+1; k++ ) begin rc_d[k] <= rc_d[k-1]; end end // always_ff @ assign o = rc_d[RATIO] < 0; endmodule // Local Variables: // verilog-typedef-regexp: "_t$" // End:
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2012 by Wilson Snyder. //bug456 typedef logic signed [34:0] rc_t; module t (/*AUTOARG*/ // Inputs clk ); input clk; integer cyc=0; reg [63:0] crc; reg [63:0] sum; // Take CRC data and apply to testblock inputs wire [34:0] rc = crc[34:0]; /*AUTOWIRE*/ // Beginning of automatic wires (for undeclared instantiated-module outputs) logic o; // From test of Test.v // End of automatics Test test (/*AUTOINST*/ // Outputs .o (o), // Inputs .rc (rc), .clk (clk)); // Aggregate outputs into a single result vector wire [63:0] result = {63'h0, o}; // Test loop always @ (posedge clk) begin `ifdef TEST_VERBOSE $write("[%0t] cyc==%0d crc=%x result=%x\n",$time, cyc, crc, result); `endif cyc <= cyc + 1; crc <= {crc[62:0], crc[63]^crc[2]^crc[0]}; sum <= result ^ {sum[62:0],sum[63]^sum[2]^sum[0]}; if (cyc==0) begin // Setup crc <= 64'h5aef0c8d_d70a4497; sum <= 64'h0; end else if (cyc<10) begin sum <= 64'h0; end else if (cyc==99) begin $write("[%0t] cyc==%0d crc=%x sum=%x\n",$time, cyc, crc, sum); if (crc !== 64'hc77bb9b3784ea091) $stop; // What checksum will we end up with (above print should match) `define EXPECTED_SUM 64'h7211d24a17b25ec9 if (sum !== `EXPECTED_SUM) $stop; $write("*-* All Finished *-*\n"); $finish; end end endmodule module Test( output logic o, input rc_t rc, input logic clk); localparam RATIO = 2; rc_t rc_d[RATIO:1]; always_ff @(posedge clk) begin integer k; rc_d[1] <= rc; for( k=2; k<RATIO+1; k++ ) begin rc_d[k] <= rc_d[k-1]; end end // always_ff @ assign o = rc_d[RATIO] < 0; endmodule // Local Variables: // verilog-typedef-regexp: "_t$" // End:
// (C) 2001-2015 Altera Corporation. All rights reserved. // Your use of Altera Corporation's design tools, logic functions and other // software and tools, and its AMPP partner logic functions, and any output // files any of the foregoing (including device programming or simulation // files), and any associated documentation or information are expressly subject // to the terms and conditions of the Altera Program License Subscription // Agreement, Altera MegaCore Function License Agreement, or other applicable // license agreement, including, without limitation, that your use is for the // sole purpose of programming logic devices manufactured by Altera and sold by // Altera or its authorized distributors. Please refer to the applicable // agreement for further details. // $File: //acds/rel/15.1/ip/avalon_st/altera_avalon_st_handshake_clock_crosser/altera_avalon_st_handshake_clock_crosser.v $ // $Revision: #1 $ // $Date: 2015/08/09 $ // $Author: swbranch $ //------------------------------------------------------------------------------ // Clock crosser module with handshaking mechanism //------------------------------------------------------------------------------ `timescale 1ns / 1ns module altera_avalon_st_handshake_clock_crosser #( parameter DATA_WIDTH = 8, BITS_PER_SYMBOL = 8, USE_PACKETS = 0, // ------------------------------ // Optional signal widths // ------------------------------ USE_CHANNEL = 0, CHANNEL_WIDTH = 1, USE_ERROR = 0, ERROR_WIDTH = 1, VALID_SYNC_DEPTH = 2, READY_SYNC_DEPTH = 2, USE_OUTPUT_PIPELINE = 1, // ------------------------------ // Derived parameters // ------------------------------ SYMBOLS_PER_BEAT = DATA_WIDTH / BITS_PER_SYMBOL, EMPTY_WIDTH = log2ceil(SYMBOLS_PER_BEAT) ) ( input in_clk, input in_reset, input out_clk, input out_reset, output in_ready, input in_valid, input [DATA_WIDTH - 1 : 0] in_data, input [CHANNEL_WIDTH - 1 : 0] in_channel, input [ERROR_WIDTH - 1 : 0] in_error, input in_startofpacket, input in_endofpacket, input [(EMPTY_WIDTH ? (EMPTY_WIDTH - 1) : 0) : 0] in_empty, input out_ready, output out_valid, output [DATA_WIDTH - 1 : 0] out_data, output [CHANNEL_WIDTH - 1 : 0] out_channel, output [ERROR_WIDTH - 1 : 0] out_error, output out_startofpacket, output out_endofpacket, output [(EMPTY_WIDTH ? (EMPTY_WIDTH - 1) : 0) : 0] out_empty ); // ------------------------------ // Payload-specific widths // ------------------------------ localparam PACKET_WIDTH = (USE_PACKETS) ? 2 + EMPTY_WIDTH : 0; localparam PCHANNEL_W = (USE_CHANNEL) ? CHANNEL_WIDTH : 0; localparam PERROR_W = (USE_ERROR) ? ERROR_WIDTH : 0; localparam PAYLOAD_WIDTH = DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH + PERROR_W; wire [PAYLOAD_WIDTH - 1: 0] in_payload; wire [PAYLOAD_WIDTH - 1: 0] out_payload; // ------------------------------ // Assign in_data and other optional sink interface // signals to in_payload. // ------------------------------ assign in_payload[DATA_WIDTH - 1 : 0] = in_data; generate // optional packet inputs if (PACKET_WIDTH) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH - 1 : DATA_WIDTH ] = {in_startofpacket, in_endofpacket}; end // optional channel input if (USE_CHANNEL) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W - 1 : DATA_WIDTH + PACKET_WIDTH ] = in_channel; end // optional empty input if (EMPTY_WIDTH) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W ] = in_empty; end // optional error input if (USE_ERROR) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH + PERROR_W - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH ] = in_error; end endgenerate // -------------------------------------------------- // Pipe the input payload to our inner module which handles the // actual clock crossing // -------------------------------------------------- altera_avalon_st_clock_crosser #( .SYMBOLS_PER_BEAT (1), .BITS_PER_SYMBOL (PAYLOAD_WIDTH), .FORWARD_SYNC_DEPTH (VALID_SYNC_DEPTH), .BACKWARD_SYNC_DEPTH (READY_SYNC_DEPTH), .USE_OUTPUT_PIPELINE (USE_OUTPUT_PIPELINE) ) clock_xer ( .in_clk (in_clk ), .in_reset (in_reset ), .in_ready (in_ready ), .in_valid (in_valid ), .in_data (in_payload ), .out_clk (out_clk ), .out_reset (out_reset ), .out_ready (out_ready ), .out_valid (out_valid ), .out_data (out_payload ) ); // -------------------------------------------------- // Split out_payload into the output signals. // -------------------------------------------------- assign out_data = out_payload[DATA_WIDTH - 1 : 0]; generate // optional packet outputs if (USE_PACKETS) begin assign {out_startofpacket, out_endofpacket} = out_payload[DATA_WIDTH + PACKET_WIDTH - 1 : DATA_WIDTH]; end else begin // avoid a "has no driver" warning. assign {out_startofpacket, out_endofpacket} = 2'b0; end // optional channel output if (USE_CHANNEL) begin assign out_channel = out_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W - 1 : DATA_WIDTH + PACKET_WIDTH ]; end else begin // avoid a "has no driver" warning. assign out_channel = 1'b0; end // optional empty output if (EMPTY_WIDTH) begin assign out_empty = out_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W ]; end else begin // avoid a "has no driver" warning. assign out_empty = 1'b0; end // optional error output if (USE_ERROR) begin assign out_error = out_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH + PERROR_W - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH ]; end else begin // avoid a "has no driver" warning. assign out_error = 1'b0; end endgenerate // -------------------------------------------------- // Calculates the log2ceil of the input value. // -------------------------------------------------- function integer log2ceil; input integer val; integer i; begin i = 1; log2ceil = 0; while (i < val) begin log2ceil = log2ceil + 1; i = i << 1; end end endfunction endmodule
// (C) 2001-2015 Altera Corporation. All rights reserved. // Your use of Altera Corporation's design tools, logic functions and other // software and tools, and its AMPP partner logic functions, and any output // files any of the foregoing (including device programming or simulation // files), and any associated documentation or information are expressly subject // to the terms and conditions of the Altera Program License Subscription // Agreement, Altera MegaCore Function License Agreement, or other applicable // license agreement, including, without limitation, that your use is for the // sole purpose of programming logic devices manufactured by Altera and sold by // Altera or its authorized distributors. Please refer to the applicable // agreement for further details. // $File: //acds/rel/15.1/ip/avalon_st/altera_avalon_st_handshake_clock_crosser/altera_avalon_st_handshake_clock_crosser.v $ // $Revision: #1 $ // $Date: 2015/08/09 $ // $Author: swbranch $ //------------------------------------------------------------------------------ // Clock crosser module with handshaking mechanism //------------------------------------------------------------------------------ `timescale 1ns / 1ns module altera_avalon_st_handshake_clock_crosser #( parameter DATA_WIDTH = 8, BITS_PER_SYMBOL = 8, USE_PACKETS = 0, // ------------------------------ // Optional signal widths // ------------------------------ USE_CHANNEL = 0, CHANNEL_WIDTH = 1, USE_ERROR = 0, ERROR_WIDTH = 1, VALID_SYNC_DEPTH = 2, READY_SYNC_DEPTH = 2, USE_OUTPUT_PIPELINE = 1, // ------------------------------ // Derived parameters // ------------------------------ SYMBOLS_PER_BEAT = DATA_WIDTH / BITS_PER_SYMBOL, EMPTY_WIDTH = log2ceil(SYMBOLS_PER_BEAT) ) ( input in_clk, input in_reset, input out_clk, input out_reset, output in_ready, input in_valid, input [DATA_WIDTH - 1 : 0] in_data, input [CHANNEL_WIDTH - 1 : 0] in_channel, input [ERROR_WIDTH - 1 : 0] in_error, input in_startofpacket, input in_endofpacket, input [(EMPTY_WIDTH ? (EMPTY_WIDTH - 1) : 0) : 0] in_empty, input out_ready, output out_valid, output [DATA_WIDTH - 1 : 0] out_data, output [CHANNEL_WIDTH - 1 : 0] out_channel, output [ERROR_WIDTH - 1 : 0] out_error, output out_startofpacket, output out_endofpacket, output [(EMPTY_WIDTH ? (EMPTY_WIDTH - 1) : 0) : 0] out_empty ); // ------------------------------ // Payload-specific widths // ------------------------------ localparam PACKET_WIDTH = (USE_PACKETS) ? 2 + EMPTY_WIDTH : 0; localparam PCHANNEL_W = (USE_CHANNEL) ? CHANNEL_WIDTH : 0; localparam PERROR_W = (USE_ERROR) ? ERROR_WIDTH : 0; localparam PAYLOAD_WIDTH = DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH + PERROR_W; wire [PAYLOAD_WIDTH - 1: 0] in_payload; wire [PAYLOAD_WIDTH - 1: 0] out_payload; // ------------------------------ // Assign in_data and other optional sink interface // signals to in_payload. // ------------------------------ assign in_payload[DATA_WIDTH - 1 : 0] = in_data; generate // optional packet inputs if (PACKET_WIDTH) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH - 1 : DATA_WIDTH ] = {in_startofpacket, in_endofpacket}; end // optional channel input if (USE_CHANNEL) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W - 1 : DATA_WIDTH + PACKET_WIDTH ] = in_channel; end // optional empty input if (EMPTY_WIDTH) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W ] = in_empty; end // optional error input if (USE_ERROR) begin assign in_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH + PERROR_W - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH ] = in_error; end endgenerate // -------------------------------------------------- // Pipe the input payload to our inner module which handles the // actual clock crossing // -------------------------------------------------- altera_avalon_st_clock_crosser #( .SYMBOLS_PER_BEAT (1), .BITS_PER_SYMBOL (PAYLOAD_WIDTH), .FORWARD_SYNC_DEPTH (VALID_SYNC_DEPTH), .BACKWARD_SYNC_DEPTH (READY_SYNC_DEPTH), .USE_OUTPUT_PIPELINE (USE_OUTPUT_PIPELINE) ) clock_xer ( .in_clk (in_clk ), .in_reset (in_reset ), .in_ready (in_ready ), .in_valid (in_valid ), .in_data (in_payload ), .out_clk (out_clk ), .out_reset (out_reset ), .out_ready (out_ready ), .out_valid (out_valid ), .out_data (out_payload ) ); // -------------------------------------------------- // Split out_payload into the output signals. // -------------------------------------------------- assign out_data = out_payload[DATA_WIDTH - 1 : 0]; generate // optional packet outputs if (USE_PACKETS) begin assign {out_startofpacket, out_endofpacket} = out_payload[DATA_WIDTH + PACKET_WIDTH - 1 : DATA_WIDTH]; end else begin // avoid a "has no driver" warning. assign {out_startofpacket, out_endofpacket} = 2'b0; end // optional channel output if (USE_CHANNEL) begin assign out_channel = out_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W - 1 : DATA_WIDTH + PACKET_WIDTH ]; end else begin // avoid a "has no driver" warning. assign out_channel = 1'b0; end // optional empty output if (EMPTY_WIDTH) begin assign out_empty = out_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W ]; end else begin // avoid a "has no driver" warning. assign out_empty = 1'b0; end // optional error output if (USE_ERROR) begin assign out_error = out_payload[ DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH + PERROR_W - 1 : DATA_WIDTH + PACKET_WIDTH + PCHANNEL_W + EMPTY_WIDTH ]; end else begin // avoid a "has no driver" warning. assign out_error = 1'b0; end endgenerate // -------------------------------------------------- // Calculates the log2ceil of the input value. // -------------------------------------------------- function integer log2ceil; input integer val; integer i; begin i = 1; log2ceil = 0; while (i < val) begin log2ceil = log2ceil + 1; i = i << 1; end end endfunction endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 17:39:29 10/01/2015 // Design Name: // Module Name: Zero_InfMult_Unit // Project Name: // Target Devices: // Tool versions: // Description: // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module Zero_InfMult_Unit //SINGLE PRECISION PARAMETERS # (parameter W = 32) //DOUBLE PRECISION PARAMETERS /* # (parameter W = 64) */ ( input wire clk, input wire rst, input wire load, input wire [W-2:0] Data_A, input wire [W-2:0] Data_B, output wire zero_m_flag ); //Wires///////////////////// wire or_1, or_2; wire [W-2:0] zero_comp; wire zero_reg; //////////////////////////// Comparator_Equal #(.S(W-1)) Data_A_Comp ( .Data_A(Data_A), .Data_B(zero_comp), .equal_sgn(or_1) ); Comparator_Equal #(.S(W-1)) Data_B_Comp ( .Data_A(zero_comp), .Data_B(Data_B), .equal_sgn(or_2) ); RegisterAdd #(.W(1)) Zero_Info_Mult ( //Data X input register .clk(clk), .rst(rst), .load(load), .D(zero_reg), .Q(zero_m_flag) ); assign zero_reg = or_1 || or_2; generate if (W == 32) assign zero_comp = 31'd0; else assign zero_comp = 63'd0; endgenerate endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 17:39:29 10/01/2015 // Design Name: // Module Name: Zero_InfMult_Unit // Project Name: // Target Devices: // Tool versions: // Description: // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module Zero_InfMult_Unit //SINGLE PRECISION PARAMETERS # (parameter W = 32) //DOUBLE PRECISION PARAMETERS /* # (parameter W = 64) */ ( input wire clk, input wire rst, input wire load, input wire [W-2:0] Data_A, input wire [W-2:0] Data_B, output wire zero_m_flag ); //Wires///////////////////// wire or_1, or_2; wire [W-2:0] zero_comp; wire zero_reg; //////////////////////////// Comparator_Equal #(.S(W-1)) Data_A_Comp ( .Data_A(Data_A), .Data_B(zero_comp), .equal_sgn(or_1) ); Comparator_Equal #(.S(W-1)) Data_B_Comp ( .Data_A(zero_comp), .Data_B(Data_B), .equal_sgn(or_2) ); RegisterAdd #(.W(1)) Zero_Info_Mult ( //Data X input register .clk(clk), .rst(rst), .load(load), .D(zero_reg), .Q(zero_m_flag) ); assign zero_reg = or_1 || or_2; generate if (W == 32) assign zero_comp = 31'd0; else assign zero_comp = 63'd0; endgenerate endmodule
// DESCRIPTION: Verilator: Test of gated clock detection // // The code as shown generates a result by a delayed assignment from PC. The // creation of the result is from a clock gated from the clock that sets // PC. Howevever since they are essentially the same clock, the result should // be delayed by one cycle. // // Standard Verilator treats them as different clocks, so the result stays in // step with the PC. An event drive simulator always allows the clock to win. // // The problem is caused by the extra loop added by Verilator to the // evaluation of all internally generated clocks (effectively removed by // marking the clock enable). // // This test is added to facilitate experiments with solutions. // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2013 by Jeremy Bennett <[email protected]>. module t (/*AUTOARG*/ // Inputs clk ); input clk; reg gated_clk_en = 1'b0 ; reg [1:0] pc = 2'b0; reg [1:0] res = 2'b0; wire gated_clk = gated_clk_en & clk; always @(posedge clk) begin pc <= pc + 1; gated_clk_en <= 1'b1; end always @(posedge gated_clk) begin res <= pc; end always @(posedge clk) begin if (pc == 2'b11) begin // Correct behaviour is that res should be lagging pc in the count // by one cycle if (res == 2'b10) begin $write("*-* All Finished *-*\n"); $finish; end else begin $stop; end end end endmodule
// (C) 1992-2014 Altera Corporation. All rights reserved. // Your use of Altera Corporation's design tools, logic functions and other // software and tools, and its AMPP partner logic functions, and any output // files any of the foregoing (including device programming or simulation // files), and any associated documentation or information are expressly subject // to the terms and conditions of the Altera Program License Subscription // Agreement, Altera MegaCore Function License Agreement, or other applicable // license agreement, including, without limitation, that your use is for the // sole purpose of programming logic devices manufactured by Altera and sold by // Altera or its authorized distributors. Please refer to the applicable // agreement for further details. // overall latency of this IP `define IP_PIPELINE_LATENCY_PLUS1 5 // to support the 0-latency stall free entry, add one more valid bit `define ZERO_LATENCY_OFFSET 1 module acl_stall_free_sink #( parameter integer DATA_WIDTH = 32, parameter integer PIPELINE_DEPTH = 32, parameter integer SHARINGII = 1, parameter integer SCHEDULEII = 1 ) ( input logic clock, input logic resetn, input logic [DATA_WIDTH-1:0] data_in, output logic [DATA_WIDTH-1:0] data_out, input logic input_accepted, output logic valid_out, input logic stall_in, output logic stall_entry, output logic [PIPELINE_DEPTH-`IP_PIPELINE_LATENCY_PLUS1+`ZERO_LATENCY_OFFSET:0] valids, output logic [SHARINGII-1:0] IIphases, input logic inc_pipelined_thread, input logic dec_pipelined_thread ); (* altera_attribute = "-name auto_shift_register_recognition OFF" *) reg [PIPELINE_DEPTH-`IP_PIPELINE_LATENCY_PLUS1:0] shift_reg; reg [DATA_WIDTH-1:0] reg_data_in; localparam FIFO_DEPTH_LOG2 = CLogB2(PIPELINE_DEPTH); localparam FIFO_DEPTH = 1 << FIFO_DEPTH_LOG2; reg [FIFO_DEPTH_LOG2:0] counter; reg [SHARINGII-1:0] IIshreg; wire output_accepted; wire staging_reg_stall; wire fifo_valid; wire [DATA_WIDTH-1:0] fifo_data; wire throttle_pipelined_iterations; assign stall_entry = counter[FIFO_DEPTH_LOG2] | (!IIshreg[0]) | throttle_pipelined_iterations; assign output_accepted = fifo_valid & ~staging_reg_stall; assign valids = {shift_reg, input_accepted}; assign IIphases = IIshreg; always @(posedge clock or negedge resetn) begin if (!resetn) begin IIshreg <= {{(SHARINGII - 1){1'b0}},1'b1}; end else begin IIshreg <= {IIshreg,IIshreg[SHARINGII-1]}; end end reg[$clog2(SCHEDULEII):0] IIschedcount; reg[$clog2(SCHEDULEII):0] threads_count; always @(posedge clock or negedge resetn) begin if (!resetn) begin IIschedcount <= 0; threads_count <= 0; end else begin if (IIshreg[0]) begin // do not increase the counter if a thread is exiting // increasing threads_count is already decreasing the window // increasing IIschedcount ends up accepting the next thread too early IIschedcount <= (input_accepted && dec_pipelined_thread) ? IIschedcount : (IIschedcount == (SCHEDULEII - 1) ? 0 : (IIschedcount + 1)); end if (input_accepted) begin threads_count <= threads_count + inc_pipelined_thread - dec_pipelined_thread; end end end // allow threads in a window of the II cycles // this prevents the next iteration from entering too early assign throttle_pipelined_iterations = (IIschedcount >= (threads_count > 0 ? threads_count : 1)); always @(posedge clock or negedge resetn) begin if (!resetn) begin shift_reg <= {(PIPELINE_DEPTH-`IP_PIPELINE_LATENCY_PLUS1-1){1'b0}}; counter <= {(FIFO_DEPTH_LOG2+1){1'b0}}; reg_data_in <= 'x; end else begin shift_reg <= { shift_reg[PIPELINE_DEPTH-(`IP_PIPELINE_LATENCY_PLUS1+1):0], input_accepted }; counter <= counter + input_accepted - output_accepted; reg_data_in <= data_in; end end acl_fifo #( .DATA_WIDTH(DATA_WIDTH), .DEPTH(FIFO_DEPTH) ) fifo ( .clock(clock), .resetn(resetn), .data_in(reg_data_in), .data_out(fifo_data), .valid_in(shift_reg[PIPELINE_DEPTH-`IP_PIPELINE_LATENCY_PLUS1]), .valid_out(fifo_valid), .stall_in(staging_reg_stall) ); acl_staging_reg #( .WIDTH(DATA_WIDTH) ) staging_reg ( .clk(clock), .reset(~resetn), .i_data(fifo_data), .i_valid(fifo_valid), .o_stall(staging_reg_stall), .o_data(data_out), .o_valid(valid_out), .i_stall(stall_in) ); //ceil of the log base 2 function integer CLogB2; input [31:0] Depth; integer i; begin i = Depth; for(CLogB2 = 0; i > 0; CLogB2 = CLogB2 + 1) i = i >> 1; end endfunction endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 03/18/2016 03:28:31 PM // Design Name: // Module Name: Round_Sgf_Dec // Project Name: // Target Devices: // Tool Versions: // Description: // // Dep encies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module Round_Sgf_Dec( input wire clk, input wire [1:0] Data_i, input wire [1:0] Round_Type_i, input wire Sign_Result_i, output reg Round_Flag_o ); always @* case ({Sign_Result_i,Round_Type_i,Data_i}) //Round type=00; Towards zero / No round //Round type=01; Towards - infinity //Round type=10; Towards + infinity //Op=0;Round type=00 /*5'b00000: Round_Flag_o <=0; 5'b00001: Round_Flag_o <=0; 5'b00010: Round_Flag_o <=0; 5'b00011: Round_Flag_o <=0;*/ //Op=1;Round type=00 /*5'b10000: Round_Flag_o <=0; 5'b10001: Round_Flag_o <=0; 5'b10010: Round_Flag_o <=0; 5'b10011: Round_Flag_o <=0; */ //Op=0;Round type=01 /*5'b00100: Round_Flag_o <=0; 5'b00101: Round_Flag_o <=0; 5'b00110: Round_Flag_o <=0; 5'b00111: Round_Flag_o <=0; */ //Op=1;Round type=01 //5'b10100: Round_Flag_o <=0; 5'b10101: Round_Flag_o <=1; 5'b10110: Round_Flag_o <=1; 5'b10111: Round_Flag_o <=1; //Op=0;Round type=10 //5'b01000: Round_Flag_o <=0; 5'b01001: Round_Flag_o <=1; 5'b01010: Round_Flag_o <=1; 5'b01011: Round_Flag_o <=1; //Op=1;Round type=10 /*5'b11000: Round_Flag_o <=0; 5'b11001: Round_Flag_o <=0; 5'b11010: Round_Flag_o <=0; 5'b11011: Round_Flag_o <=0; */ default: Round_Flag_o <=0; endcase endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 03/18/2016 03:28:31 PM // Design Name: // Module Name: Round_Sgf_Dec // Project Name: // Target Devices: // Tool Versions: // Description: // // Dep encies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module Round_Sgf_Dec( input wire clk, input wire [1:0] Data_i, input wire [1:0] Round_Type_i, input wire Sign_Result_i, output reg Round_Flag_o ); always @* case ({Sign_Result_i,Round_Type_i,Data_i}) //Round type=00; Towards zero / No round //Round type=01; Towards - infinity //Round type=10; Towards + infinity //Op=0;Round type=00 /*5'b00000: Round_Flag_o <=0; 5'b00001: Round_Flag_o <=0; 5'b00010: Round_Flag_o <=0; 5'b00011: Round_Flag_o <=0;*/ //Op=1;Round type=00 /*5'b10000: Round_Flag_o <=0; 5'b10001: Round_Flag_o <=0; 5'b10010: Round_Flag_o <=0; 5'b10011: Round_Flag_o <=0; */ //Op=0;Round type=01 /*5'b00100: Round_Flag_o <=0; 5'b00101: Round_Flag_o <=0; 5'b00110: Round_Flag_o <=0; 5'b00111: Round_Flag_o <=0; */ //Op=1;Round type=01 //5'b10100: Round_Flag_o <=0; 5'b10101: Round_Flag_o <=1; 5'b10110: Round_Flag_o <=1; 5'b10111: Round_Flag_o <=1; //Op=0;Round type=10 //5'b01000: Round_Flag_o <=0; 5'b01001: Round_Flag_o <=1; 5'b01010: Round_Flag_o <=1; 5'b01011: Round_Flag_o <=1; //Op=1;Round type=10 /*5'b11000: Round_Flag_o <=0; 5'b11001: Round_Flag_o <=0; 5'b11010: Round_Flag_o <=0; 5'b11011: Round_Flag_o <=0; */ default: Round_Flag_o <=0; endcase endmodule
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2009 by Wilson Snyder. package defs; function automatic integer max; input integer a; input integer b; max = (a > b) ? a : b; endfunction function automatic integer log2; input integer value; value = value >> 1; for (log2 = 0; value > 0; log2 = log2 + 1) value = value >> 1; endfunction function automatic integer ceil_log2; input integer value; value = value - 1; for (ceil_log2 = 0; value > 0; ceil_log2 = ceil_log2 + 1) value = value >> 1; endfunction endpackage module sub(); import defs::*; parameter RAND_NUM_MAX = ""; localparam DATA_RANGE = RAND_NUM_MAX + 1; localparam DATA_WIDTH = ceil_log2(DATA_RANGE); localparam WIDTH = max(4, ceil_log2(DATA_RANGE + 1)); endmodule module t(/*AUTOARG*/ // Inputs clk ); import defs::*; parameter WHICH = 0; parameter MAX_COUNT = 10; localparam MAX_EXPONENT = log2(MAX_COUNT); localparam EXPONENT_WIDTH = ceil_log2(MAX_EXPONENT + 1); input clk; generate if (WHICH == 1) begin : which_true sub sub_true(); defparam sub_true.RAND_NUM_MAX = MAX_EXPONENT; end else begin : which_false sub sub_false(); defparam sub_false.RAND_NUM_MAX = MAX_COUNT; end endgenerate endmodule
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2009 by Wilson Snyder. package defs; function automatic integer max; input integer a; input integer b; max = (a > b) ? a : b; endfunction function automatic integer log2; input integer value; value = value >> 1; for (log2 = 0; value > 0; log2 = log2 + 1) value = value >> 1; endfunction function automatic integer ceil_log2; input integer value; value = value - 1; for (ceil_log2 = 0; value > 0; ceil_log2 = ceil_log2 + 1) value = value >> 1; endfunction endpackage module sub(); import defs::*; parameter RAND_NUM_MAX = ""; localparam DATA_RANGE = RAND_NUM_MAX + 1; localparam DATA_WIDTH = ceil_log2(DATA_RANGE); localparam WIDTH = max(4, ceil_log2(DATA_RANGE + 1)); endmodule module t(/*AUTOARG*/ // Inputs clk ); import defs::*; parameter WHICH = 0; parameter MAX_COUNT = 10; localparam MAX_EXPONENT = log2(MAX_COUNT); localparam EXPONENT_WIDTH = ceil_log2(MAX_EXPONENT + 1); input clk; generate if (WHICH == 1) begin : which_true sub sub_true(); defparam sub_true.RAND_NUM_MAX = MAX_EXPONENT; end else begin : which_false sub sub_false(); defparam sub_false.RAND_NUM_MAX = MAX_COUNT; end endgenerate endmodule
//----------------------------------------------------------------------------- // Copyright (C) 2014 iZsh <izsh at fail0verflow.com> // // This code is licensed to you under the terms of the GNU GPL, version 2 or, // at your option, any later version. See the LICENSE.txt file for the text of // the license. //----------------------------------------------------------------------------- // testbench for lf_edge_detect `include "lf_edge_detect.v" `define FIN "tb_tmp/data.filtered.gold" `define FOUT_MIN "tb_tmp/data.min" `define FOUT_MAX "tb_tmp/data.max" `define FOUT_STATE "tb_tmp/data.state" `define FOUT_TOGGLE "tb_tmp/data.toggle" `define FOUT_HIGH "tb_tmp/data.high" `define FOUT_HIGHZ "tb_tmp/data.highz" `define FOUT_LOWZ "tb_tmp/data.lowz" `define FOUT_LOW "tb_tmp/data.low" module lf_edge_detect_tb; integer fin, fout_state, fout_toggle; integer fout_high, fout_highz, fout_lowz, fout_low, fout_min, fout_max; integer r; reg clk = 0; reg [7:0] adc_d; wire adc_clk; wire data_rdy; wire edge_state; wire edge_toggle; wire [7:0] high_threshold; wire [7:0] highz_threshold; wire [7:0] lowz_threshold; wire [7:0] low_threshold; wire [7:0] max; wire [7:0] min; initial begin clk = 0; fin = $fopen(`FIN, "r"); if (!fin) begin $display("ERROR: can't open the data file"); $finish; end fout_min = $fopen(`FOUT_MIN, "w+"); fout_max = $fopen(`FOUT_MAX, "w+"); fout_state = $fopen(`FOUT_STATE, "w+"); fout_toggle = $fopen(`FOUT_TOGGLE, "w+"); fout_high = $fopen(`FOUT_HIGH, "w+"); fout_highz = $fopen(`FOUT_HIGHZ, "w+"); fout_lowz = $fopen(`FOUT_LOWZ, "w+"); fout_low = $fopen(`FOUT_LOW, "w+"); if (!$feof(fin)) adc_d = $fgetc(fin); // read the first value end always # 1 clk = !clk; // input initial begin while (!$feof(fin)) begin @(negedge clk) adc_d <= $fgetc(fin); end if ($feof(fin)) begin # 3 $fclose(fin); $fclose(fout_state); $fclose(fout_toggle); $fclose(fout_high); $fclose(fout_highz); $fclose(fout_lowz); $fclose(fout_low); $fclose(fout_min); $fclose(fout_max); $finish; end end initial begin // $monitor("%d\t S: %b, E: %b", $time, edge_state, edge_toggle); end // output always @(negedge clk) if ($time > 2) begin r = $fputc(min, fout_min); r = $fputc(max, fout_max); r = $fputc(edge_state, fout_state); r = $fputc(edge_toggle, fout_toggle); r = $fputc(high_threshold, fout_high); r = $fputc(highz_threshold, fout_highz); r = $fputc(lowz_threshold, fout_lowz); r = $fputc(low_threshold, fout_low); end // module to test lf_edge_detect detect(clk, adc_d, 8'd127, max, min, high_threshold, highz_threshold, lowz_threshold, low_threshold, edge_state, edge_toggle); endmodule
// DESCRIPTION: Verilator: Verilog Test module // // This file ONLY is placed into the Public Domain, for any use, // without warranty, 2009 by Wilson Snyder. module t (/*AUTOARG*/ // Inputs clk ); input clk; integer cyc=0; genvar g; integer i; reg [31:0] v; reg [31:0] gen_pre_PLUSPLUS = 32'h0; reg [31:0] gen_pre_MINUSMINUS = 32'h0; reg [31:0] gen_post_PLUSPLUS = 32'h0; reg [31:0] gen_post_MINUSMINUS = 32'h0; reg [31:0] gen_PLUSEQ = 32'h0; reg [31:0] gen_MINUSEQ = 32'h0; reg [31:0] gen_TIMESEQ = 32'h0; reg [31:0] gen_DIVEQ = 32'h0; reg [31:0] gen_MODEQ = 32'h0; reg [31:0] gen_ANDEQ = 32'h0; reg [31:0] gen_OREQ = 32'h0; reg [31:0] gen_XOREQ = 32'h0; reg [31:0] gen_SLEFTEQ = 32'h0; reg [31:0] gen_SRIGHTEQ = 32'h0; reg [31:0] gen_SSRIGHTEQ = 32'h0; generate for (g=8; g<=16; ++g) always @(posedge clk) gen_pre_PLUSPLUS[g] = 1'b1; for (g=16; g>=8; --g) always @(posedge clk) gen_pre_MINUSMINUS[g] = 1'b1; for (g=8; g<=16; g++) always @(posedge clk) gen_post_PLUSPLUS[g] = 1'b1; for (g=16; g>=8; g--) always @(posedge clk) gen_post_MINUSMINUS[g] = 1'b1; for (g=8; g<=16; g+=2) always @(posedge clk) gen_PLUSEQ[g] = 1'b1; for (g=16; g>=8; g-=2) always @(posedge clk) gen_MINUSEQ[g] = 1'b1; `ifndef verilator //UNSUPPORTED for (g=8; g<=16; g*=2) always @(posedge clk) gen_TIMESEQ[g] = 1'b1; for (g=16; g>=8; g/=2) always @(posedge clk) gen_DIVEQ[g] = 1'b1; for (g=15; g>8; g%=8) always @(posedge clk) gen_MODEQ[g] = 1'b1; for (g=7; g>4; g&=4) always @(posedge clk) gen_ANDEQ[g] = 1'b1; for (g=1; g<=1; g|=2) always @(posedge clk) gen_OREQ[g] = 1'b1; for (g=7; g==7; g^=2) always @(posedge clk) gen_XOREQ[g] = 1'b1; for (g=8; g<=16; g<<=2) always @(posedge clk) gen_SLEFTEQ[g] = 1'b1; for (g=16; g>=8; g>>=2) always @(posedge clk) gen_SRIGHTEQ[g] = 1'b1; for (g=16; g>=8; g>>>=2) always @(posedge clk) gen_SSRIGHTEQ[g] = 1'b1; `endif endgenerate always @ (posedge clk) begin cyc <= cyc + 1; if (cyc == 3) begin `ifdef TEST_VERBOSE $write("gen_pre_PLUSPLUS %b\n", gen_pre_PLUSPLUS); $write("gen_pre_MINUSMINUS %b\n", gen_pre_MINUSMINUS); $write("gen_post_PLUSPLUS %b\n", gen_post_PLUSPLUS); $write("gen_post_MINUSMINUS %b\n", gen_post_MINUSMINUS); $write("gen_PLUSEQ %b\n", gen_PLUSEQ); $write("gen_MINUSEQ %b\n", gen_MINUSEQ); $write("gen_TIMESEQ %b\n", gen_TIMESEQ); $write("gen_DIVEQ %b\n", gen_DIVEQ); $write("gen_MODEQ %b\n", gen_MODEQ); $write("gen_ANDEQ %b\n", gen_ANDEQ); $write("gen_OREQ %b\n", gen_OREQ); $write("gen_XOREQ %b\n", gen_XOREQ); $write("gen_SLEFTEQ %b\n", gen_SLEFTEQ); $write("gen_SRIGHTEQ %b\n", gen_SRIGHTEQ); $write("gen_SSRIGHTEQ %b\n", gen_SSRIGHTEQ); `endif if (gen_pre_PLUSPLUS !== 32'b00000000000000011111111100000000) $stop; if (gen_pre_MINUSMINUS !== 32'b00000000000000011111111100000000) $stop; if (gen_post_PLUSPLUS !== 32'b00000000000000011111111100000000) $stop; if (gen_post_MINUSMINUS!== 32'b00000000000000011111111100000000) $stop; if (gen_PLUSEQ !== 32'b00000000000000010101010100000000) $stop; if (gen_MINUSEQ !== 32'b00000000000000010101010100000000) $stop; `ifndef verilator //UNSUPPORTED if (gen_TIMESEQ !== 32'b00000000000000010000000100000000) $stop; if (gen_DIVEQ !== 32'b00000000000000010000000100000000) $stop; if (gen_MODEQ !== 32'b00000000000000001000000000000000) $stop; if (gen_ANDEQ !== 32'b00000000000000000000000010000000) $stop; if (gen_OREQ !== 32'b00000000000000000000000000000010) $stop; if (gen_XOREQ !== 32'b00000000000000000000000010000000) $stop; if (gen_SLEFTEQ !== 32'b00000000000000000000000100000000) $stop; if (gen_SRIGHTEQ !== 32'b00000000000000010000000000000000) $stop; if (gen_SSRIGHTEQ !== 32'b00000000000000010000000000000000) $stop; `endif v=0; for (i=8; i<=16; ++i) v[i] = 1'b1; if (v !== 32'b00000000000000011111111100000000) $stop; v=0; for (i=16; i>=8; --i) v[i] = 1'b1; if (v !== 32'b00000000000000011111111100000000) $stop; v=0; for (i=8; i<=16; i++) v[i] = 1'b1; if (v !== 32'b00000000000000011111111100000000) $stop; v=0; for (i=16; i>=8; i--) v[i] = 1'b1; if (v !== 32'b00000000000000011111111100000000) $stop; v=0; for (i=8; i<=16; i+=2) v[i] = 1'b1; if (v !== 32'b00000000000000010101010100000000) $stop; v=0; for (i=16; i>=8; i-=2) v[i] = 1'b1; if (v !== 32'b00000000000000010101010100000000) $stop; `ifndef verilator //UNSUPPORTED v=0; for (i=8; i<=16; i*=2) v[i] = 1'b1; if (v !== 32'b00000000000000010000000100000000) $stop; v=0; for (i=16; i>=8; i/=2) v[i] = 1'b1; if (v !== 32'b00000000000000010000000100000000) $stop; v=0; for (i=15; i>8; i%=8) v[i] = 1'b1; if (v !== 32'b00000000000000001000000000000000) $stop; v=0; for (i=7; i>4; i&=4) v[i] = 1'b1; if (v !== 32'b00000000000000000000000010000000) $stop; v=0; for (i=1; i<=1; i|=2) v[i] = 1'b1; if (v !== 32'b00000000000000000000000000000010) $stop; v=0; for (i=7; i==7; i^=2) v[i] = 1'b1; if (v !== 32'b00000000000000000000000010000000) $stop; v=0; for (i=8; i<=16; i<<=2) v[i] =1'b1; if (v !== 32'b00000000000000000000000100000000) $stop; v=0; for (i=16; i>=8; i>>=2) v[i] =1'b1; if (v !== 32'b00000000000000010000000000000000) $stop; v=0; for (i=16; i>=8; i>>>=2) v[i]=1'b1; if (v !== 32'b00000000000000010000000000000000) $stop; `endif $write("*-* All Finished *-*\n"); $finish; end end endmodule
////////////////////////////////////////////////////////////////////// //// //// //// spi_top.v //// //// //// //// This file is part of the SPI IP core project //// //// http://www.opencores.org/projects/spi/ //// //// //// //// Author(s): //// //// - Simon Srot ([email protected]) //// //// //// //// All additional information is avaliable in the Readme.txt //// //// file. //// //// //// ////////////////////////////////////////////////////////////////////// //// //// //// Copyright (C) 2002 Authors //// //// //// //// This source file may be used and distributed without //// //// restriction provided that this copyright statement is not //// //// removed from the file and that any derivative work contains //// //// the original copyright notice and the associated disclaimer. //// //// //// //// This source file is free software; you can redistribute it //// //// and/or modify it under the terms of the GNU Lesser General //// //// Public License as published by the Free Software Foundation; //// //// either version 2.1 of the License, or (at your option) any //// //// later version. //// //// //// //// This source is distributed in the hope that it will be //// //// useful, but WITHOUT ANY WARRANTY; without even the implied //// //// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR //// //// PURPOSE. See the GNU Lesser General Public License for more //// //// details. //// //// //// //// You should have received a copy of the GNU Lesser General //// //// Public License along with this source; if not, download it //// //// from http://www.opencores.org/lgpl.shtml //// //// //// ////////////////////////////////////////////////////////////////////// //// //// /* Modifications to spi_top.v */ //// /* Copyright (c) 2006 Rice University */ //// /* All Rights Reserved */ //// /* This code is covered by the Rice-WARP license */ //// /* See http://warp.rice.edu/license/ for details */ module spi_top ( // OPB signals opb_clk_i, opb_rst_i, // SPI registers reg_ctrl, reg_ss, reg_divider, reg_tx, ctrlwrite, busval, go, // SPI signals ss_pad_o, sclk_pad_o, mosi_pad_o ); parameter Tp = 1; // OPB signals input opb_clk_i; // master clock input input opb_rst_i; // synchronous active high reset // SPI registers input [13:0] reg_ctrl; input [7:0] reg_ss; input [3:0] reg_divider; input [17:0] reg_tx; input ctrlwrite; input busval; output go; // SPI signals output [8-1:0] ss_pad_o; // slave select output sclk_pad_o; // serial clock output mosi_pad_o; // master out slave in // Internal signals wire [17:0] rx; // Rx register wire rx_negedge; // miso is sampled on negative edge wire tx_negedge; // mosi is driven on negative edge wire [4:0] char_len; // char len //wire go; // go wire lsb; // lsb first on line wire ie; // interrupt enable wire ass; // automatic slave select wire spi_divider_sel; // divider register select wire spi_ctrl_sel; // ctrl register select wire [3:0] spi_tx_sel; // tx_l register select wire spi_ss_sel; // ss register select wire tip; // transfer in progress wire pos_edge; // recognize posedge of sclk wire neg_edge; // recognize negedge of sclk wire last_bit; // marks last character bit reg ctrlbitgo; assign rx_negedge = reg_ctrl[9]; assign tx_negedge = reg_ctrl[10]; assign go = ctrlbitgo; assign char_len = reg_ctrl[6:0]; assign lsb = reg_ctrl[11]; assign ie = reg_ctrl[12]; assign ass = reg_ctrl[13]; always @(posedge opb_clk_i or posedge opb_rst_i) begin if (opb_rst_i) ctrlbitgo <= #Tp 1'b0; else if(ctrlwrite && !tip) ctrlbitgo <= #Tp busval; else if(tip && last_bit && pos_edge) ctrlbitgo <= #Tp 1'b0; end assign ss_pad_o = ~((reg_ss & {8{tip & ass}}) | (reg_ss & {8{!ass}})); spi_clgen clgen (.clk_in(opb_clk_i), .rst(opb_rst_i), .go(go), .enable(tip), .last_clk(last_bit), .divider(reg_divider), .clk_out(sclk_pad_o), .pos_edge(pos_edge), .neg_edge(neg_edge)); spi_shift shift (.clk(opb_clk_i), .rst(opb_rst_i), .len(char_len[5-1:0]), .lsb(lsb), .go(go), .pos_edge(pos_edge), .neg_edge(neg_edge), .rx_negedge(rx_negedge), .tx_negedge(tx_negedge), .tip(tip), .last(last_bit), .p_in(reg_tx), .p_out(rx), .s_clk(sclk_pad_o), .s_out(mosi_pad_o)); endmodule
/*+-------------------------------------------------------------------------- Copyright (c) 2015, Microsoft Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ `timescale 1ns / 1ps module RCB_FRL_CRC_gen ( D, NewCRC); input [47:0] D; output [7:0] NewCRC; assign NewCRC[0] = D[46] ^ D[42] ^ D[41] ^ D[37] ^ D[36] ^ D[35] ^ D[34] ^ D[33] ^ D[31] ^ D[30] ^ D[29] ^ D[27] ^ D[26] ^ D[24] ^ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ D[8] ^ D[7] ^ D[6] ^ D[3] ^ D[1] ^ D[0]; assign NewCRC[1] = D[47] ^ D[43] ^ D[42] ^ D[38] ^ D[37] ^ D[36] ^ D[35] ^ D[34] ^ D[32] ^ D[31] ^ D[30] ^ D[28] ^ D[27] ^ D[25] ^ D[21] ^ D[19] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[9] ^ D[8] ^ D[7] ^ D[4] ^ D[2] ^ D[1]; assign NewCRC[2] = D[46] ^ D[44] ^ D[43] ^ D[42] ^ D[41] ^ D[39] ^ D[38] ^ D[34] ^ D[32] ^ D[30] ^ D[28] ^ D[27] ^ D[24] ^ D[22] ^ D[19] ^ D[14] ^ D[13] ^ D[10] ^ D[9] ^ D[7] ^ D[6] ^ D[5] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[3] = D[47] ^ D[45] ^ D[44] ^ D[43] ^ D[42] ^ D[40] ^ D[39] ^ D[35] ^ D[33] ^ D[31] ^ D[29] ^ D[28] ^ D[25] ^ D[23] ^ D[20] ^ D[15] ^ D[14] ^ D[11] ^ D[10] ^ D[8] ^ D[7] ^ D[6] ^ D[3] ^ D[2] ^ D[1]; assign NewCRC[4] = D[45] ^ D[44] ^ D[43] ^ D[42] ^ D[40] ^ D[37] ^ D[35] ^ D[33] ^ D[32] ^ D[31] ^ D[27] ^ D[21] ^ D[20] ^ D[18] ^ D[17] ^ D[14] ^ D[13] ^ D[12] ^ D[11] ^ D[9] ^ D[6] ^ D[4] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[5] = D[46] ^ D[45] ^ D[44] ^ D[43] ^ D[41] ^ D[38] ^ D[36] ^ D[34] ^ D[33] ^ D[32] ^ D[28] ^ D[22] ^ D[21] ^ D[19] ^ D[18] ^ D[15] ^ D[14] ^ D[13] ^ D[12] ^ D[10] ^ D[7] ^ D[5] ^ D[3] ^ D[2] ^ D[1]; assign NewCRC[6] = D[47] ^ D[45] ^ D[44] ^ D[41] ^ D[39] ^ D[36] ^ D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[23] ^ D[22] ^ D[19] ^ D[18] ^ D[17] ^ D[11] ^ D[7] ^ D[4] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[7] = D[45] ^ D[41] ^ D[40] ^ D[36] ^ D[35] ^ D[34] ^ D[33] ^ D[32] ^ D[30] ^ D[29] ^ D[28] ^ D[26] ^ D[25] ^ D[23] ^ D[19] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ D[12] ^ D[7] ^ D[6] ^ D[5] ^ D[2] ^ D[0]; endmodule
/*+-------------------------------------------------------------------------- Copyright (c) 2015, Microsoft Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ `timescale 1ns / 1ps module RCB_FRL_CRC_gen ( D, NewCRC); input [47:0] D; output [7:0] NewCRC; assign NewCRC[0] = D[46] ^ D[42] ^ D[41] ^ D[37] ^ D[36] ^ D[35] ^ D[34] ^ D[33] ^ D[31] ^ D[30] ^ D[29] ^ D[27] ^ D[26] ^ D[24] ^ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ D[8] ^ D[7] ^ D[6] ^ D[3] ^ D[1] ^ D[0]; assign NewCRC[1] = D[47] ^ D[43] ^ D[42] ^ D[38] ^ D[37] ^ D[36] ^ D[35] ^ D[34] ^ D[32] ^ D[31] ^ D[30] ^ D[28] ^ D[27] ^ D[25] ^ D[21] ^ D[19] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[9] ^ D[8] ^ D[7] ^ D[4] ^ D[2] ^ D[1]; assign NewCRC[2] = D[46] ^ D[44] ^ D[43] ^ D[42] ^ D[41] ^ D[39] ^ D[38] ^ D[34] ^ D[32] ^ D[30] ^ D[28] ^ D[27] ^ D[24] ^ D[22] ^ D[19] ^ D[14] ^ D[13] ^ D[10] ^ D[9] ^ D[7] ^ D[6] ^ D[5] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[3] = D[47] ^ D[45] ^ D[44] ^ D[43] ^ D[42] ^ D[40] ^ D[39] ^ D[35] ^ D[33] ^ D[31] ^ D[29] ^ D[28] ^ D[25] ^ D[23] ^ D[20] ^ D[15] ^ D[14] ^ D[11] ^ D[10] ^ D[8] ^ D[7] ^ D[6] ^ D[3] ^ D[2] ^ D[1]; assign NewCRC[4] = D[45] ^ D[44] ^ D[43] ^ D[42] ^ D[40] ^ D[37] ^ D[35] ^ D[33] ^ D[32] ^ D[31] ^ D[27] ^ D[21] ^ D[20] ^ D[18] ^ D[17] ^ D[14] ^ D[13] ^ D[12] ^ D[11] ^ D[9] ^ D[6] ^ D[4] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[5] = D[46] ^ D[45] ^ D[44] ^ D[43] ^ D[41] ^ D[38] ^ D[36] ^ D[34] ^ D[33] ^ D[32] ^ D[28] ^ D[22] ^ D[21] ^ D[19] ^ D[18] ^ D[15] ^ D[14] ^ D[13] ^ D[12] ^ D[10] ^ D[7] ^ D[5] ^ D[3] ^ D[2] ^ D[1]; assign NewCRC[6] = D[47] ^ D[45] ^ D[44] ^ D[41] ^ D[39] ^ D[36] ^ D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[23] ^ D[22] ^ D[19] ^ D[18] ^ D[17] ^ D[11] ^ D[7] ^ D[4] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[7] = D[45] ^ D[41] ^ D[40] ^ D[36] ^ D[35] ^ D[34] ^ D[33] ^ D[32] ^ D[30] ^ D[29] ^ D[28] ^ D[26] ^ D[25] ^ D[23] ^ D[19] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ D[12] ^ D[7] ^ D[6] ^ D[5] ^ D[2] ^ D[0]; endmodule
/*+-------------------------------------------------------------------------- Copyright (c) 2015, Microsoft Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ `timescale 1ns / 1ps module RCB_FRL_CRC_gen ( D, NewCRC); input [47:0] D; output [7:0] NewCRC; assign NewCRC[0] = D[46] ^ D[42] ^ D[41] ^ D[37] ^ D[36] ^ D[35] ^ D[34] ^ D[33] ^ D[31] ^ D[30] ^ D[29] ^ D[27] ^ D[26] ^ D[24] ^ D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ D[8] ^ D[7] ^ D[6] ^ D[3] ^ D[1] ^ D[0]; assign NewCRC[1] = D[47] ^ D[43] ^ D[42] ^ D[38] ^ D[37] ^ D[36] ^ D[35] ^ D[34] ^ D[32] ^ D[31] ^ D[30] ^ D[28] ^ D[27] ^ D[25] ^ D[21] ^ D[19] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[9] ^ D[8] ^ D[7] ^ D[4] ^ D[2] ^ D[1]; assign NewCRC[2] = D[46] ^ D[44] ^ D[43] ^ D[42] ^ D[41] ^ D[39] ^ D[38] ^ D[34] ^ D[32] ^ D[30] ^ D[28] ^ D[27] ^ D[24] ^ D[22] ^ D[19] ^ D[14] ^ D[13] ^ D[10] ^ D[9] ^ D[7] ^ D[6] ^ D[5] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[3] = D[47] ^ D[45] ^ D[44] ^ D[43] ^ D[42] ^ D[40] ^ D[39] ^ D[35] ^ D[33] ^ D[31] ^ D[29] ^ D[28] ^ D[25] ^ D[23] ^ D[20] ^ D[15] ^ D[14] ^ D[11] ^ D[10] ^ D[8] ^ D[7] ^ D[6] ^ D[3] ^ D[2] ^ D[1]; assign NewCRC[4] = D[45] ^ D[44] ^ D[43] ^ D[42] ^ D[40] ^ D[37] ^ D[35] ^ D[33] ^ D[32] ^ D[31] ^ D[27] ^ D[21] ^ D[20] ^ D[18] ^ D[17] ^ D[14] ^ D[13] ^ D[12] ^ D[11] ^ D[9] ^ D[6] ^ D[4] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[5] = D[46] ^ D[45] ^ D[44] ^ D[43] ^ D[41] ^ D[38] ^ D[36] ^ D[34] ^ D[33] ^ D[32] ^ D[28] ^ D[22] ^ D[21] ^ D[19] ^ D[18] ^ D[15] ^ D[14] ^ D[13] ^ D[12] ^ D[10] ^ D[7] ^ D[5] ^ D[3] ^ D[2] ^ D[1]; assign NewCRC[6] = D[47] ^ D[45] ^ D[44] ^ D[41] ^ D[39] ^ D[36] ^ D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[23] ^ D[22] ^ D[19] ^ D[18] ^ D[17] ^ D[11] ^ D[7] ^ D[4] ^ D[2] ^ D[1] ^ D[0]; assign NewCRC[7] = D[45] ^ D[41] ^ D[40] ^ D[36] ^ D[35] ^ D[34] ^ D[33] ^ D[32] ^ D[30] ^ D[29] ^ D[28] ^ D[26] ^ D[25] ^ D[23] ^ D[19] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^ D[12] ^ D[7] ^ D[6] ^ D[5] ^ D[2] ^ D[0]; endmodule
//wishbone_arbiter.v /* Distributed under the MIT licesnse. Copyright (c) 2011 Dave McCoy ([email protected]) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ `timescale 1 ns/1 ps module arbiter_2_masters ( //control signals input clk, input rst, //wishbone master ports input i_m0_we, input i_m0_cyc, input i_m0_stb, input [3:0] i_m0_sel, output o_m0_ack, input [31:0] i_m0_dat, output [31:0] o_m0_dat, input [31:0] i_m0_adr, output o_m0_int, input i_m1_we, input i_m1_cyc, input i_m1_stb, input [3:0] i_m1_sel, output o_m1_ack, input [31:0] i_m1_dat, output [31:0] o_m1_dat, input [31:0] i_m1_adr, output o_m1_int, //wishbone slave signals output o_s_we, output o_s_stb, output o_s_cyc, output [3:0] o_s_sel, output [31:0] o_s_adr, output [31:0] o_s_dat, input [31:0] i_s_dat, input i_s_ack, input i_s_int ); localparam MASTER_COUNT = 2; //registers/wires //this should be parameterized reg [7:0] master_select; reg [7:0] priority_select; wire o_master_we [MASTER_COUNT - 1:0]; wire o_master_stb [MASTER_COUNT - 1:0]; wire o_master_cyc [MASTER_COUNT - 1:0]; wire [3:0] o_master_sel [MASTER_COUNT - 1:0]; wire [31:0] o_master_adr [MASTER_COUNT - 1:0]; wire [31:0] o_master_dat [MASTER_COUNT - 1:0]; //master select block localparam MASTER_NO_SEL = 8'hFF; localparam MASTER_0 = 0; localparam MASTER_1 = 1; always @ (posedge clk) begin if (rst) begin master_select <= MASTER_NO_SEL; end else begin case (master_select) MASTER_0: begin if (!i_m0_cyc && !i_s_ack) begin master_select <= MASTER_NO_SEL; end end MASTER_1: begin if (!i_m1_cyc && !i_s_ack) begin master_select <= MASTER_NO_SEL; end end default: begin //nothing selected if (i_m0_cyc) begin master_select <= MASTER_0; end else if (i_m1_cyc) begin master_select <= MASTER_1; end end endcase if ((master_select != MASTER_NO_SEL) && (priority_select < master_select) && (!o_s_stb && !i_s_ack))begin master_select <= MASTER_NO_SEL; end end end //priority select always @ (posedge clk) begin if (rst) begin priority_select <= MASTER_NO_SEL; end else begin //find the highest priority if (i_m0_cyc) begin priority_select <= MASTER_0; end else if (i_m1_cyc) begin priority_select <= MASTER_1; end else begin priority_select <= MASTER_NO_SEL; end end end //slave assignments assign o_s_we = (master_select != MASTER_NO_SEL) ? o_master_we[master_select] : 0; assign o_s_stb = (master_select != MASTER_NO_SEL) ? o_master_stb[master_select] : 0; assign o_s_cyc = (master_select != MASTER_NO_SEL) ? o_master_cyc[master_select] : 0; assign o_s_sel = (master_select != MASTER_NO_SEL) ? o_master_sel[master_select] : 0; assign o_s_adr = (master_select != MASTER_NO_SEL) ? o_master_adr[master_select] : 0; assign o_s_dat = (master_select != MASTER_NO_SEL) ? o_master_dat[master_select] : 0; //write select block assign o_master_we[MASTER_0] = i_m0_we; assign o_master_we[MASTER_1] = i_m1_we; //strobe select block assign o_master_stb[MASTER_0] = i_m0_stb; assign o_master_stb[MASTER_1] = i_m1_stb; //cycle select block assign o_master_cyc[MASTER_0] = i_m0_cyc; assign o_master_cyc[MASTER_1] = i_m1_cyc; //select select block assign o_master_sel[MASTER_0] = i_m0_sel; assign o_master_sel[MASTER_1] = i_m1_sel; //address seelct block assign o_master_adr[MASTER_0] = i_m0_adr; assign o_master_adr[MASTER_1] = i_m1_adr; //data select block assign o_master_dat[MASTER_0] = i_m0_dat; assign o_master_dat[MASTER_1] = i_m1_dat; //assign block assign o_m0_ack = (master_select == MASTER_0) ? i_s_ack : 0; assign o_m0_dat = i_s_dat; assign o_m0_int = (master_select == MASTER_0) ? i_s_int : 0; assign o_m1_ack = (master_select == MASTER_1) ? i_s_ack : 0; assign o_m1_dat = i_s_dat; assign o_m1_int = (master_select == MASTER_1) ? i_s_int : 0; endmodule
/*+-------------------------------------------------------------------------- Copyright (c) 2015, Microsoft Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ `timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: Microsoft Research Asia // Engineer: Jiansong Zhang // // Create Date: 12:17:59 11/12/2009 // Design Name: // Module Name: performance_counter // Project Name: Sora // Target Devices: LX50T1136-1 // Tool versions: ISE 10.02 // Description: We measure the durations in this module (1) from TX_des request sent to tx_engine to new des // received (2) from transfer start to transfer done. // A counter (125MHz or 250MHz depends on DMA clock) is implemeted. // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module performance_counter( input clk, input rst, input transferstart_one, input rd_dma_done_one, input new_des_one, output reg [23:0] round_trip_latency, output reg [23:0] transfer_duration ); reg [39:0] counter; /// free run 40bits counter, more than two hours per cycle on 125MHz clock reg [23:0] snapshot_transferstart; /// record the lower 24 bit of counter when transferstart, more than 100ms per cycle on 125MHz clock /// counter always@(posedge clk) begin if(rst) counter <= 40'h00_0000_0000; else counter <= counter + 40'h00_0000_0001; end /// snapshot_transferstart always@(posedge clk) begin if(rst) snapshot_transferstart <= 24'h00_0000; else if (transferstart_one) snapshot_transferstart <= counter[23:0]; else snapshot_transferstart <= snapshot_transferstart; end /// round_trip_latency always@(posedge clk) begin if (rst) round_trip_latency <= 24'h00_0000; else if (new_des_one) round_trip_latency <= counter[23:0] + (~snapshot_transferstart) + 24'h00_0001; else round_trip_latency <= round_trip_latency; end /// transfer_duration always@(posedge clk) begin if (rst) transfer_duration <= 24'h00_0000; else if (rd_dma_done_one) transfer_duration <= counter[23:0] + (~snapshot_transferstart) + 24'h00_0001; else transfer_duration <= transfer_duration; end endmodule
/*+-------------------------------------------------------------------------- Copyright (c) 2015, Microsoft Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ ////////////////////////////////////////////////////////////////////////////////// // Company: Microsoft Research Asia // Engineer: Jiansong Zhang // // Create Date: 21:39:39 06/01/2009 // Design Name: // Module Name: rx_trn_data_fsm // Project Name: Sora // Target Devices: Virtex5 LX50T // Tool versions: ISE10.1.03 // Description: // Purpose: Receive TRN Data FSM. This module interfaces to the Block Plus RX // TRN. It presents the 64-bit data from completer and and forwards that // data with a data_valid signal. This block also decodes packet header info // and forwards it to the rx_trn_monitor block. // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// `timescale 1ns / 1ps module rx_trn_data_fsm( input wire clk, input wire rst, // Rx Local-Link input wire [63:0] trn_rd, input wire [7:0] trn_rrem_n, input wire trn_rsof_n, input wire trn_reof_n, input wire trn_rsrc_rdy_n, input wire trn_rsrc_dsc_n, output reg trn_rdst_rdy_n, input wire trn_rerrfwd_n, output wire trn_rnp_ok_n, input wire [6:0] trn_rbar_hit_n, input wire [11:0] trn_rfc_npd_av, input wire [7:0] trn_rfc_nph_av, input wire [11:0] trn_rfc_pd_av, input wire [7:0] trn_rfc_ph_av, input wire [11:0] trn_rfc_cpld_av, input wire [7:0] trn_rfc_cplh_av, output wire trn_rcpl_streaming_n, //DATA FIFO SIGNALS output reg [63:0] data_out, output wire [7:0] data_out_be, output reg data_valid, input wire data_fifo_status, //END DATA FIFO SIGNALS //HEADER FIELD SIGNALS //The following are registered from the header fields of the current packet //See the PCIe Base Specification for definitions of these headers output reg fourdw_n_threedw, //fourdw = 1'b1; 3dw = 1'b0; output reg payload, output reg [2:0] tc, //traffic class output reg td, //digest output reg ep, //poisoned bit output reg [1:0] attr, //attribute field output reg [9:0] dw_length, //DWORD Length //the following fields are dependent on the type of TLP being received //regs with MEM prefix are valid for memory TLPS and regs with CMP prefix //are valid for completion TLPS output reg [15:0] MEM_req_id, //requester ID for memory TLPs output reg [7:0] MEM_tag, //tag for non-posted memory read request output reg [15:0] CMP_comp_id, //completer id for completion TLPs output reg [2:0]CMP_compl_stat, //status for completion TLPs output reg CMP_bcm, //byte count modified field for completions TLPs output reg [11:0] CMP_byte_count, //remaining byte count for completion TLPs output reg [63:0] MEM_addr, //address field for memory TLPs output reg [15:0] CMP_req_id, //requester if for completions TLPs output reg [7:0] CMP_tag, //tag field for completion TLPs output reg [6:0] CMP_lower_addr, //lower address field for completion TLPs //decode of the format field output wire MRd, //Mem read output wire MWr, //Mem write output wire CplD, //Completion w/ data output wire Msg, //Message TLP output wire UR, //Unsupported request TLP i.e. IO, CPL,etc.. output reg [6:0] bar_hit, //valid when a BAR is hit output reg header_fields_valid//valid signal to qualify the above header fields //END HEADER FIELD SIGNALS ); //state machine states localparam IDLE = 3'b000; localparam NOT_READY = 3'b001; localparam SOF = 3'b010; localparam HEAD2 = 3'b011; localparam BODY = 3'b100; localparam EOF = 3'b101; //additional pipelines regs for RX TRN interface reg [63:0] trn_rd_d1; reg [7:0] trn_rrem_d1_n; reg trn_rsof_d1_n; reg trn_reof_d1_n; reg trn_rsrc_rdy_d1_n; reg trn_rsrc_dsc_d1_n; reg trn_rerrfwd_d1_n; reg [6:0] trn_rbar_hit_d1_n; reg [11:0] trn_rfc_npd_av_d1; reg [7:0] trn_rfc_nph_av_d1; reg [11:0] trn_rfc_pd_av_d1; reg [7:0] trn_rfc_ph_av_d1; reg [11:0] trn_rfc_cpld_av_d1; reg [7:0] trn_rfc_cplh_av_d1; //second pipeline reg [63:0] trn_rd_d2; reg [7:0] trn_rrem_d2_n; reg trn_rsof_d2_n; reg trn_reof_d2_n; reg trn_rsrc_rdy_d2_n; reg trn_rsrc_dsc_d2_n; reg trn_rerrfwd_d2_n; reg [6:0] trn_rbar_hit_d2_n; reg [11:0] trn_rfc_npd_av_d2; reg [7:0] trn_rfc_nph_av_d2; reg [11:0] trn_rfc_pd_av_d2; reg [7:0] trn_rfc_ph_av_d2; reg [11:0] trn_rfc_cpld_av_d2; reg [7:0] trn_rfc_cplh_av_d2; reg [4:0] rx_packet_type; reg [2:0] trn_state; wire [63:0] data_out_mux; wire [7:0] data_out_be_mux; reg data_valid_early; reg rst_reg; always@(posedge clk) rst_reg <= rst; // TIE constant signals here assign trn_rnp_ok_n = 1'b0; assign trn_rcpl_streaming_n = 1'b0; //use completion streaming mode //all the outputs of the endpoint should be pipelined //to help meet required timing of an 8 lane design always @ (posedge clk) begin trn_rd_d1[63:0] <= trn_rd[63:0] ; trn_rrem_d1_n[7:0] <= trn_rrem_n[7:0] ; trn_rsof_d1_n <= trn_rsof_n ; trn_reof_d1_n <= trn_reof_n ; trn_rsrc_rdy_d1_n <= trn_rsrc_rdy_n ; trn_rsrc_dsc_d1_n <= trn_rsrc_dsc_n ; trn_rerrfwd_d1_n <= trn_rerrfwd_n ; trn_rbar_hit_d1_n[6:0] <= trn_rbar_hit_n[6:0] ; trn_rfc_npd_av_d1[11:0] <= trn_rfc_npd_av[11:0] ; trn_rfc_nph_av_d1[7:0] <= trn_rfc_nph_av[7:0] ; trn_rfc_pd_av_d1[11:0] <= trn_rfc_pd_av[11:0] ; trn_rfc_ph_av_d1[7:0] <= trn_rfc_ph_av[7:0] ; trn_rfc_cpld_av_d1[11:0] <= trn_rfc_cpld_av[11:0]; trn_rfc_cplh_av_d1[7:0] <= trn_rfc_cplh_av[7:0] ; trn_rd_d2[63:0] <= trn_rd_d1[63:0] ; trn_rrem_d2_n[7:0] <= trn_rrem_d1_n[7:0] ; trn_rsof_d2_n <= trn_rsof_d1_n ; trn_reof_d2_n <= trn_reof_d1_n ; trn_rsrc_rdy_d2_n <= trn_rsrc_rdy_d1_n ; trn_rsrc_dsc_d2_n <= trn_rsrc_dsc_d1_n ; trn_rerrfwd_d2_n <= trn_rerrfwd_d1_n ; trn_rbar_hit_d2_n[6:0] <= trn_rbar_hit_d1_n[6:0] ; trn_rfc_npd_av_d2[11:0] <= trn_rfc_npd_av_d1[11:0] ; trn_rfc_nph_av_d2[7:0] <= trn_rfc_nph_av_d1[7:0] ; trn_rfc_pd_av_d2[11:0] <= trn_rfc_pd_av_d1[11:0] ; trn_rfc_ph_av_d2[7:0] <= trn_rfc_ph_av_d1[7:0] ; trn_rfc_cpld_av_d2[11:0] <= trn_rfc_cpld_av_d1[11:0]; trn_rfc_cplh_av_d2[7:0] <= trn_rfc_cplh_av_d1[7:0] ; end assign rx_sof_d1 = ~trn_rsof_d1_n & ~trn_rsrc_rdy_d1_n; // Assign packet type information about the current RX Packet // rx_packet_type is decoded in always block directly below these assigns assign MRd = rx_packet_type[4]; assign MWr = rx_packet_type[3]; assign CplD = rx_packet_type[2]; assign Msg = rx_packet_type[1]; assign UR = rx_packet_type[0]; //register the packet header fields and decode the packet type //both memory and completion TLP header fields are registered for each //received packet, however, only the fields for the incoming type will be //valid always@(posedge clk ) begin if(rst_reg)begin rx_packet_type[4:0] <= 5'b00000; fourdw_n_threedw <= 0; payload <= 0; tc[2:0] <= 0; //traffic class td <= 0; //digest ep <= 0; //poisoned bit attr[1:0] <= 0; dw_length[9:0] <= 0; MEM_req_id[15:0] <= 0; MEM_tag[7:0] <= 0; CMP_comp_id[15:0] <= 0; CMP_compl_stat[2:0] <= 0; CMP_bcm <= 0; CMP_byte_count[11:0] <= 0; end else begin if(rx_sof_d1)begin //these fields same for all TLPs fourdw_n_threedw <= trn_rd_d1[61]; payload <= trn_rd_d1[62]; tc[2:0] <= trn_rd_d1[54:52]; //traffic class td <= trn_rd_d1[47]; //digest ep <= trn_rd_d1[46]; //poisoned bit attr[1:0] <= trn_rd_d1[45:44]; dw_length[9:0] <= trn_rd_d1[41:32]; //also latch bar_hit bar_hit[6:0] <= ~trn_rbar_hit_d1_n[6:0]; //these following fields dependent on packet type //i.e. memory packet fields are only valid for mem packet types //and completer packet fields are only valid for completer packet type; //memory packet fields MEM_req_id[15:0] <= trn_rd_d1[31:16]; MEM_tag[7:0] <= trn_rd_d1[15:8]; //first and last byte enables not needed because plus core delivers //completer packet fields CMP_comp_id[15:0] <= trn_rd_d1[31:16]; CMP_compl_stat[2:0] <= trn_rd_d1[15:13]; CMP_bcm <= trn_rd_d1[12]; CMP_byte_count[11:0] <= trn_rd_d1[11:0]; //add message fields here if needed //decode the packet type and register in rx_packet_type casex({trn_rd_d1[62],trn_rd_d1[60:56]}) 6'b000000: begin //mem read rx_packet_type[4:0] <= 5'b10000; end 6'b100000: begin //mem write rx_packet_type[4:0] <= 5'b01000; end 6'b101010: begin //completer with data rx_packet_type[4:0] <= 5'b00100; end 6'bx10xxx: begin //message rx_packet_type[4:0] <= 5'b00010; end default: begin //all other packet types are unsupported for this design rx_packet_type[4:0] <= 5'b00001; end endcase end end end // Now do the same for the second header of the current packet always@(posedge clk )begin if(rst_reg)begin MEM_addr[63:0] <= 0; CMP_req_id[15:0] <= 0; CMP_tag[7:0] <= 0; CMP_lower_addr[6:0] <= 0; end else begin if(trn_state == SOF & ~trn_rsrc_rdy_d1_n)begin //packet is in process of //reading out second header if(fourdw_n_threedw) MEM_addr[63:0] <= trn_rd_d1[63:0]; else MEM_addr[63:0] <= {32'h00000000,trn_rd_d1[63:32]}; CMP_req_id[15:0] <= trn_rd_d1[63:48]; CMP_tag[7:0] <= trn_rd_d1[47:40]; CMP_lower_addr[6:0] <= trn_rd_d1[48:32]; end end end // generate a valid signal for the headers field always@(posedge clk)begin if(rst_reg) header_fields_valid <= 0; else header_fields_valid <= ~trn_rsrc_rdy_d2_n & trn_rsof_d1_n; end //This state machine keeps track of what state the RX TRN interface //is currently in always @ (posedge clk ) begin if(rst_reg) begin trn_state <= IDLE; trn_rdst_rdy_n <= 1'b0; end else begin case(trn_state) IDLE: begin trn_rdst_rdy_n <= 1'b0; if(rx_sof_d1) trn_state <= SOF; else trn_state <= IDLE; end /// Jiansong: notice, completion streaming here NOT_READY: begin // This state is a placeholder only - it is currently not // entered from any other state // This state could be used for throttling the PCIe // Endpoint Block Plus RX TRN interface, however, this // should not be done when using completion streaming // mode as this reference design does trn_rdst_rdy_n <= 1'b1; trn_state <= IDLE; end SOF: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else if(trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= HEAD2; else trn_state <= SOF; end HEAD2: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else if(trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= BODY; else trn_state <= HEAD2; end BODY: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else trn_state <= BODY; end EOF: begin if(~trn_rsof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= SOF; else if(trn_rsof_d1_n & trn_rsrc_rdy_d1_n) trn_state <= IDLE; else if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else trn_state <= IDLE; end default: begin trn_state <= IDLE; end endcase end end //data shifter logic //need to shift the data depending if we receive a four DWORD or three DWORD //TLP type - Note that completion packets will always be 3DW TLPs assign data_out_mux[63:0] = (fourdw_n_threedw) ? trn_rd_d2[63:0] : {trn_rd_d2[31:0],trn_rd_d1[63:32]}; /// Jiansong: notice, why? 64bit data? likely should be modified //swap the byte ordering to little endian //e.g. data_out = B7,B6,B5,B4,B3,B2,B1,B0 always@(posedge clk) data_out[63:0] <= {data_out_mux[7:0],data_out_mux[15:8], data_out_mux[23:16],data_out_mux[31:24], data_out_mux[39:32],data_out_mux[47:40], data_out_mux[55:48],data_out_mux[63:56]}; //Data byte enable logic: //Need to add byte enable logic for incoming memory transactions if desired //to allow memory transaction granularity smaller than DWORD. // //This design always requests data on 128 byte boundaries so for //completion TLPs the byte enables would always be asserted // //Note that the endpoint block plus uses negative logic, however, //I decided to use positive logic for the user application. assign data_out_be = 8'hff; //data_valid generation logic //Generally, data_valid should be asserted the same amount of cycles //that trn_rsrc_rdy_n is asserted (minus the cycles that sof and //eof are asserted). //There are two exceptions to this: // - 3DW TLPs with odd number of DW without Digest // In this case an extra cycle is required // - eof is used to generate this extra cycle // - 4DW TLPs with even number of DW with Digest // In this case an extra cycle needs to be removed // - the last cycle is removed // Jiansong: fix Mrd data to fifo bug always@(*)begin case({fourdw_n_threedw, dw_length[0], td}) 3'b010: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_rsof_d2_n & ~trn_reof_d2_n & payload; 3'b101: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_reof_d1_n & payload; default: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_rsof_d2_n & trn_reof_d2_n & payload; endcase end //delay by one clock to match data_out (and presumably data_out_be) always@(posedge clk) if(rst_reg) data_valid <= 1'b0; else data_valid <= data_valid_early; endmodule
/*+-------------------------------------------------------------------------- Copyright (c) 2015, Microsoft Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ ////////////////////////////////////////////////////////////////////////////////// // Company: Microsoft Research Asia // Engineer: Jiansong Zhang // // Create Date: 21:39:39 06/01/2009 // Design Name: // Module Name: rx_trn_data_fsm // Project Name: Sora // Target Devices: Virtex5 LX50T // Tool versions: ISE10.1.03 // Description: // Purpose: Receive TRN Data FSM. This module interfaces to the Block Plus RX // TRN. It presents the 64-bit data from completer and and forwards that // data with a data_valid signal. This block also decodes packet header info // and forwards it to the rx_trn_monitor block. // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// `timescale 1ns / 1ps module rx_trn_data_fsm( input wire clk, input wire rst, // Rx Local-Link input wire [63:0] trn_rd, input wire [7:0] trn_rrem_n, input wire trn_rsof_n, input wire trn_reof_n, input wire trn_rsrc_rdy_n, input wire trn_rsrc_dsc_n, output reg trn_rdst_rdy_n, input wire trn_rerrfwd_n, output wire trn_rnp_ok_n, input wire [6:0] trn_rbar_hit_n, input wire [11:0] trn_rfc_npd_av, input wire [7:0] trn_rfc_nph_av, input wire [11:0] trn_rfc_pd_av, input wire [7:0] trn_rfc_ph_av, input wire [11:0] trn_rfc_cpld_av, input wire [7:0] trn_rfc_cplh_av, output wire trn_rcpl_streaming_n, //DATA FIFO SIGNALS output reg [63:0] data_out, output wire [7:0] data_out_be, output reg data_valid, input wire data_fifo_status, //END DATA FIFO SIGNALS //HEADER FIELD SIGNALS //The following are registered from the header fields of the current packet //See the PCIe Base Specification for definitions of these headers output reg fourdw_n_threedw, //fourdw = 1'b1; 3dw = 1'b0; output reg payload, output reg [2:0] tc, //traffic class output reg td, //digest output reg ep, //poisoned bit output reg [1:0] attr, //attribute field output reg [9:0] dw_length, //DWORD Length //the following fields are dependent on the type of TLP being received //regs with MEM prefix are valid for memory TLPS and regs with CMP prefix //are valid for completion TLPS output reg [15:0] MEM_req_id, //requester ID for memory TLPs output reg [7:0] MEM_tag, //tag for non-posted memory read request output reg [15:0] CMP_comp_id, //completer id for completion TLPs output reg [2:0]CMP_compl_stat, //status for completion TLPs output reg CMP_bcm, //byte count modified field for completions TLPs output reg [11:0] CMP_byte_count, //remaining byte count for completion TLPs output reg [63:0] MEM_addr, //address field for memory TLPs output reg [15:0] CMP_req_id, //requester if for completions TLPs output reg [7:0] CMP_tag, //tag field for completion TLPs output reg [6:0] CMP_lower_addr, //lower address field for completion TLPs //decode of the format field output wire MRd, //Mem read output wire MWr, //Mem write output wire CplD, //Completion w/ data output wire Msg, //Message TLP output wire UR, //Unsupported request TLP i.e. IO, CPL,etc.. output reg [6:0] bar_hit, //valid when a BAR is hit output reg header_fields_valid//valid signal to qualify the above header fields //END HEADER FIELD SIGNALS ); //state machine states localparam IDLE = 3'b000; localparam NOT_READY = 3'b001; localparam SOF = 3'b010; localparam HEAD2 = 3'b011; localparam BODY = 3'b100; localparam EOF = 3'b101; //additional pipelines regs for RX TRN interface reg [63:0] trn_rd_d1; reg [7:0] trn_rrem_d1_n; reg trn_rsof_d1_n; reg trn_reof_d1_n; reg trn_rsrc_rdy_d1_n; reg trn_rsrc_dsc_d1_n; reg trn_rerrfwd_d1_n; reg [6:0] trn_rbar_hit_d1_n; reg [11:0] trn_rfc_npd_av_d1; reg [7:0] trn_rfc_nph_av_d1; reg [11:0] trn_rfc_pd_av_d1; reg [7:0] trn_rfc_ph_av_d1; reg [11:0] trn_rfc_cpld_av_d1; reg [7:0] trn_rfc_cplh_av_d1; //second pipeline reg [63:0] trn_rd_d2; reg [7:0] trn_rrem_d2_n; reg trn_rsof_d2_n; reg trn_reof_d2_n; reg trn_rsrc_rdy_d2_n; reg trn_rsrc_dsc_d2_n; reg trn_rerrfwd_d2_n; reg [6:0] trn_rbar_hit_d2_n; reg [11:0] trn_rfc_npd_av_d2; reg [7:0] trn_rfc_nph_av_d2; reg [11:0] trn_rfc_pd_av_d2; reg [7:0] trn_rfc_ph_av_d2; reg [11:0] trn_rfc_cpld_av_d2; reg [7:0] trn_rfc_cplh_av_d2; reg [4:0] rx_packet_type; reg [2:0] trn_state; wire [63:0] data_out_mux; wire [7:0] data_out_be_mux; reg data_valid_early; reg rst_reg; always@(posedge clk) rst_reg <= rst; // TIE constant signals here assign trn_rnp_ok_n = 1'b0; assign trn_rcpl_streaming_n = 1'b0; //use completion streaming mode //all the outputs of the endpoint should be pipelined //to help meet required timing of an 8 lane design always @ (posedge clk) begin trn_rd_d1[63:0] <= trn_rd[63:0] ; trn_rrem_d1_n[7:0] <= trn_rrem_n[7:0] ; trn_rsof_d1_n <= trn_rsof_n ; trn_reof_d1_n <= trn_reof_n ; trn_rsrc_rdy_d1_n <= trn_rsrc_rdy_n ; trn_rsrc_dsc_d1_n <= trn_rsrc_dsc_n ; trn_rerrfwd_d1_n <= trn_rerrfwd_n ; trn_rbar_hit_d1_n[6:0] <= trn_rbar_hit_n[6:0] ; trn_rfc_npd_av_d1[11:0] <= trn_rfc_npd_av[11:0] ; trn_rfc_nph_av_d1[7:0] <= trn_rfc_nph_av[7:0] ; trn_rfc_pd_av_d1[11:0] <= trn_rfc_pd_av[11:0] ; trn_rfc_ph_av_d1[7:0] <= trn_rfc_ph_av[7:0] ; trn_rfc_cpld_av_d1[11:0] <= trn_rfc_cpld_av[11:0]; trn_rfc_cplh_av_d1[7:0] <= trn_rfc_cplh_av[7:0] ; trn_rd_d2[63:0] <= trn_rd_d1[63:0] ; trn_rrem_d2_n[7:0] <= trn_rrem_d1_n[7:0] ; trn_rsof_d2_n <= trn_rsof_d1_n ; trn_reof_d2_n <= trn_reof_d1_n ; trn_rsrc_rdy_d2_n <= trn_rsrc_rdy_d1_n ; trn_rsrc_dsc_d2_n <= trn_rsrc_dsc_d1_n ; trn_rerrfwd_d2_n <= trn_rerrfwd_d1_n ; trn_rbar_hit_d2_n[6:0] <= trn_rbar_hit_d1_n[6:0] ; trn_rfc_npd_av_d2[11:0] <= trn_rfc_npd_av_d1[11:0] ; trn_rfc_nph_av_d2[7:0] <= trn_rfc_nph_av_d1[7:0] ; trn_rfc_pd_av_d2[11:0] <= trn_rfc_pd_av_d1[11:0] ; trn_rfc_ph_av_d2[7:0] <= trn_rfc_ph_av_d1[7:0] ; trn_rfc_cpld_av_d2[11:0] <= trn_rfc_cpld_av_d1[11:0]; trn_rfc_cplh_av_d2[7:0] <= trn_rfc_cplh_av_d1[7:0] ; end assign rx_sof_d1 = ~trn_rsof_d1_n & ~trn_rsrc_rdy_d1_n; // Assign packet type information about the current RX Packet // rx_packet_type is decoded in always block directly below these assigns assign MRd = rx_packet_type[4]; assign MWr = rx_packet_type[3]; assign CplD = rx_packet_type[2]; assign Msg = rx_packet_type[1]; assign UR = rx_packet_type[0]; //register the packet header fields and decode the packet type //both memory and completion TLP header fields are registered for each //received packet, however, only the fields for the incoming type will be //valid always@(posedge clk ) begin if(rst_reg)begin rx_packet_type[4:0] <= 5'b00000; fourdw_n_threedw <= 0; payload <= 0; tc[2:0] <= 0; //traffic class td <= 0; //digest ep <= 0; //poisoned bit attr[1:0] <= 0; dw_length[9:0] <= 0; MEM_req_id[15:0] <= 0; MEM_tag[7:0] <= 0; CMP_comp_id[15:0] <= 0; CMP_compl_stat[2:0] <= 0; CMP_bcm <= 0; CMP_byte_count[11:0] <= 0; end else begin if(rx_sof_d1)begin //these fields same for all TLPs fourdw_n_threedw <= trn_rd_d1[61]; payload <= trn_rd_d1[62]; tc[2:0] <= trn_rd_d1[54:52]; //traffic class td <= trn_rd_d1[47]; //digest ep <= trn_rd_d1[46]; //poisoned bit attr[1:0] <= trn_rd_d1[45:44]; dw_length[9:0] <= trn_rd_d1[41:32]; //also latch bar_hit bar_hit[6:0] <= ~trn_rbar_hit_d1_n[6:0]; //these following fields dependent on packet type //i.e. memory packet fields are only valid for mem packet types //and completer packet fields are only valid for completer packet type; //memory packet fields MEM_req_id[15:0] <= trn_rd_d1[31:16]; MEM_tag[7:0] <= trn_rd_d1[15:8]; //first and last byte enables not needed because plus core delivers //completer packet fields CMP_comp_id[15:0] <= trn_rd_d1[31:16]; CMP_compl_stat[2:0] <= trn_rd_d1[15:13]; CMP_bcm <= trn_rd_d1[12]; CMP_byte_count[11:0] <= trn_rd_d1[11:0]; //add message fields here if needed //decode the packet type and register in rx_packet_type casex({trn_rd_d1[62],trn_rd_d1[60:56]}) 6'b000000: begin //mem read rx_packet_type[4:0] <= 5'b10000; end 6'b100000: begin //mem write rx_packet_type[4:0] <= 5'b01000; end 6'b101010: begin //completer with data rx_packet_type[4:0] <= 5'b00100; end 6'bx10xxx: begin //message rx_packet_type[4:0] <= 5'b00010; end default: begin //all other packet types are unsupported for this design rx_packet_type[4:0] <= 5'b00001; end endcase end end end // Now do the same for the second header of the current packet always@(posedge clk )begin if(rst_reg)begin MEM_addr[63:0] <= 0; CMP_req_id[15:0] <= 0; CMP_tag[7:0] <= 0; CMP_lower_addr[6:0] <= 0; end else begin if(trn_state == SOF & ~trn_rsrc_rdy_d1_n)begin //packet is in process of //reading out second header if(fourdw_n_threedw) MEM_addr[63:0] <= trn_rd_d1[63:0]; else MEM_addr[63:0] <= {32'h00000000,trn_rd_d1[63:32]}; CMP_req_id[15:0] <= trn_rd_d1[63:48]; CMP_tag[7:0] <= trn_rd_d1[47:40]; CMP_lower_addr[6:0] <= trn_rd_d1[48:32]; end end end // generate a valid signal for the headers field always@(posedge clk)begin if(rst_reg) header_fields_valid <= 0; else header_fields_valid <= ~trn_rsrc_rdy_d2_n & trn_rsof_d1_n; end //This state machine keeps track of what state the RX TRN interface //is currently in always @ (posedge clk ) begin if(rst_reg) begin trn_state <= IDLE; trn_rdst_rdy_n <= 1'b0; end else begin case(trn_state) IDLE: begin trn_rdst_rdy_n <= 1'b0; if(rx_sof_d1) trn_state <= SOF; else trn_state <= IDLE; end /// Jiansong: notice, completion streaming here NOT_READY: begin // This state is a placeholder only - it is currently not // entered from any other state // This state could be used for throttling the PCIe // Endpoint Block Plus RX TRN interface, however, this // should not be done when using completion streaming // mode as this reference design does trn_rdst_rdy_n <= 1'b1; trn_state <= IDLE; end SOF: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else if(trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= HEAD2; else trn_state <= SOF; end HEAD2: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else if(trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= BODY; else trn_state <= HEAD2; end BODY: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else trn_state <= BODY; end EOF: begin if(~trn_rsof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= SOF; else if(trn_rsof_d1_n & trn_rsrc_rdy_d1_n) trn_state <= IDLE; else if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else trn_state <= IDLE; end default: begin trn_state <= IDLE; end endcase end end //data shifter logic //need to shift the data depending if we receive a four DWORD or three DWORD //TLP type - Note that completion packets will always be 3DW TLPs assign data_out_mux[63:0] = (fourdw_n_threedw) ? trn_rd_d2[63:0] : {trn_rd_d2[31:0],trn_rd_d1[63:32]}; /// Jiansong: notice, why? 64bit data? likely should be modified //swap the byte ordering to little endian //e.g. data_out = B7,B6,B5,B4,B3,B2,B1,B0 always@(posedge clk) data_out[63:0] <= {data_out_mux[7:0],data_out_mux[15:8], data_out_mux[23:16],data_out_mux[31:24], data_out_mux[39:32],data_out_mux[47:40], data_out_mux[55:48],data_out_mux[63:56]}; //Data byte enable logic: //Need to add byte enable logic for incoming memory transactions if desired //to allow memory transaction granularity smaller than DWORD. // //This design always requests data on 128 byte boundaries so for //completion TLPs the byte enables would always be asserted // //Note that the endpoint block plus uses negative logic, however, //I decided to use positive logic for the user application. assign data_out_be = 8'hff; //data_valid generation logic //Generally, data_valid should be asserted the same amount of cycles //that trn_rsrc_rdy_n is asserted (minus the cycles that sof and //eof are asserted). //There are two exceptions to this: // - 3DW TLPs with odd number of DW without Digest // In this case an extra cycle is required // - eof is used to generate this extra cycle // - 4DW TLPs with even number of DW with Digest // In this case an extra cycle needs to be removed // - the last cycle is removed // Jiansong: fix Mrd data to fifo bug always@(*)begin case({fourdw_n_threedw, dw_length[0], td}) 3'b010: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_rsof_d2_n & ~trn_reof_d2_n & payload; 3'b101: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_reof_d1_n & payload; default: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_rsof_d2_n & trn_reof_d2_n & payload; endcase end //delay by one clock to match data_out (and presumably data_out_be) always@(posedge clk) if(rst_reg) data_valid <= 1'b0; else data_valid <= data_valid_early; endmodule
/*+-------------------------------------------------------------------------- Copyright (c) 2015, Microsoft Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ ////////////////////////////////////////////////////////////////////////////////// // Company: Microsoft Research Asia // Engineer: Jiansong Zhang // // Create Date: 21:39:39 06/01/2009 // Design Name: // Module Name: rx_trn_data_fsm // Project Name: Sora // Target Devices: Virtex5 LX50T // Tool versions: ISE10.1.03 // Description: // Purpose: Receive TRN Data FSM. This module interfaces to the Block Plus RX // TRN. It presents the 64-bit data from completer and and forwards that // data with a data_valid signal. This block also decodes packet header info // and forwards it to the rx_trn_monitor block. // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// `timescale 1ns / 1ps module rx_trn_data_fsm( input wire clk, input wire rst, // Rx Local-Link input wire [63:0] trn_rd, input wire [7:0] trn_rrem_n, input wire trn_rsof_n, input wire trn_reof_n, input wire trn_rsrc_rdy_n, input wire trn_rsrc_dsc_n, output reg trn_rdst_rdy_n, input wire trn_rerrfwd_n, output wire trn_rnp_ok_n, input wire [6:0] trn_rbar_hit_n, input wire [11:0] trn_rfc_npd_av, input wire [7:0] trn_rfc_nph_av, input wire [11:0] trn_rfc_pd_av, input wire [7:0] trn_rfc_ph_av, input wire [11:0] trn_rfc_cpld_av, input wire [7:0] trn_rfc_cplh_av, output wire trn_rcpl_streaming_n, //DATA FIFO SIGNALS output reg [63:0] data_out, output wire [7:0] data_out_be, output reg data_valid, input wire data_fifo_status, //END DATA FIFO SIGNALS //HEADER FIELD SIGNALS //The following are registered from the header fields of the current packet //See the PCIe Base Specification for definitions of these headers output reg fourdw_n_threedw, //fourdw = 1'b1; 3dw = 1'b0; output reg payload, output reg [2:0] tc, //traffic class output reg td, //digest output reg ep, //poisoned bit output reg [1:0] attr, //attribute field output reg [9:0] dw_length, //DWORD Length //the following fields are dependent on the type of TLP being received //regs with MEM prefix are valid for memory TLPS and regs with CMP prefix //are valid for completion TLPS output reg [15:0] MEM_req_id, //requester ID for memory TLPs output reg [7:0] MEM_tag, //tag for non-posted memory read request output reg [15:0] CMP_comp_id, //completer id for completion TLPs output reg [2:0]CMP_compl_stat, //status for completion TLPs output reg CMP_bcm, //byte count modified field for completions TLPs output reg [11:0] CMP_byte_count, //remaining byte count for completion TLPs output reg [63:0] MEM_addr, //address field for memory TLPs output reg [15:0] CMP_req_id, //requester if for completions TLPs output reg [7:0] CMP_tag, //tag field for completion TLPs output reg [6:0] CMP_lower_addr, //lower address field for completion TLPs //decode of the format field output wire MRd, //Mem read output wire MWr, //Mem write output wire CplD, //Completion w/ data output wire Msg, //Message TLP output wire UR, //Unsupported request TLP i.e. IO, CPL,etc.. output reg [6:0] bar_hit, //valid when a BAR is hit output reg header_fields_valid//valid signal to qualify the above header fields //END HEADER FIELD SIGNALS ); //state machine states localparam IDLE = 3'b000; localparam NOT_READY = 3'b001; localparam SOF = 3'b010; localparam HEAD2 = 3'b011; localparam BODY = 3'b100; localparam EOF = 3'b101; //additional pipelines regs for RX TRN interface reg [63:0] trn_rd_d1; reg [7:0] trn_rrem_d1_n; reg trn_rsof_d1_n; reg trn_reof_d1_n; reg trn_rsrc_rdy_d1_n; reg trn_rsrc_dsc_d1_n; reg trn_rerrfwd_d1_n; reg [6:0] trn_rbar_hit_d1_n; reg [11:0] trn_rfc_npd_av_d1; reg [7:0] trn_rfc_nph_av_d1; reg [11:0] trn_rfc_pd_av_d1; reg [7:0] trn_rfc_ph_av_d1; reg [11:0] trn_rfc_cpld_av_d1; reg [7:0] trn_rfc_cplh_av_d1; //second pipeline reg [63:0] trn_rd_d2; reg [7:0] trn_rrem_d2_n; reg trn_rsof_d2_n; reg trn_reof_d2_n; reg trn_rsrc_rdy_d2_n; reg trn_rsrc_dsc_d2_n; reg trn_rerrfwd_d2_n; reg [6:0] trn_rbar_hit_d2_n; reg [11:0] trn_rfc_npd_av_d2; reg [7:0] trn_rfc_nph_av_d2; reg [11:0] trn_rfc_pd_av_d2; reg [7:0] trn_rfc_ph_av_d2; reg [11:0] trn_rfc_cpld_av_d2; reg [7:0] trn_rfc_cplh_av_d2; reg [4:0] rx_packet_type; reg [2:0] trn_state; wire [63:0] data_out_mux; wire [7:0] data_out_be_mux; reg data_valid_early; reg rst_reg; always@(posedge clk) rst_reg <= rst; // TIE constant signals here assign trn_rnp_ok_n = 1'b0; assign trn_rcpl_streaming_n = 1'b0; //use completion streaming mode //all the outputs of the endpoint should be pipelined //to help meet required timing of an 8 lane design always @ (posedge clk) begin trn_rd_d1[63:0] <= trn_rd[63:0] ; trn_rrem_d1_n[7:0] <= trn_rrem_n[7:0] ; trn_rsof_d1_n <= trn_rsof_n ; trn_reof_d1_n <= trn_reof_n ; trn_rsrc_rdy_d1_n <= trn_rsrc_rdy_n ; trn_rsrc_dsc_d1_n <= trn_rsrc_dsc_n ; trn_rerrfwd_d1_n <= trn_rerrfwd_n ; trn_rbar_hit_d1_n[6:0] <= trn_rbar_hit_n[6:0] ; trn_rfc_npd_av_d1[11:0] <= trn_rfc_npd_av[11:0] ; trn_rfc_nph_av_d1[7:0] <= trn_rfc_nph_av[7:0] ; trn_rfc_pd_av_d1[11:0] <= trn_rfc_pd_av[11:0] ; trn_rfc_ph_av_d1[7:0] <= trn_rfc_ph_av[7:0] ; trn_rfc_cpld_av_d1[11:0] <= trn_rfc_cpld_av[11:0]; trn_rfc_cplh_av_d1[7:0] <= trn_rfc_cplh_av[7:0] ; trn_rd_d2[63:0] <= trn_rd_d1[63:0] ; trn_rrem_d2_n[7:0] <= trn_rrem_d1_n[7:0] ; trn_rsof_d2_n <= trn_rsof_d1_n ; trn_reof_d2_n <= trn_reof_d1_n ; trn_rsrc_rdy_d2_n <= trn_rsrc_rdy_d1_n ; trn_rsrc_dsc_d2_n <= trn_rsrc_dsc_d1_n ; trn_rerrfwd_d2_n <= trn_rerrfwd_d1_n ; trn_rbar_hit_d2_n[6:0] <= trn_rbar_hit_d1_n[6:0] ; trn_rfc_npd_av_d2[11:0] <= trn_rfc_npd_av_d1[11:0] ; trn_rfc_nph_av_d2[7:0] <= trn_rfc_nph_av_d1[7:0] ; trn_rfc_pd_av_d2[11:0] <= trn_rfc_pd_av_d1[11:0] ; trn_rfc_ph_av_d2[7:0] <= trn_rfc_ph_av_d1[7:0] ; trn_rfc_cpld_av_d2[11:0] <= trn_rfc_cpld_av_d1[11:0]; trn_rfc_cplh_av_d2[7:0] <= trn_rfc_cplh_av_d1[7:0] ; end assign rx_sof_d1 = ~trn_rsof_d1_n & ~trn_rsrc_rdy_d1_n; // Assign packet type information about the current RX Packet // rx_packet_type is decoded in always block directly below these assigns assign MRd = rx_packet_type[4]; assign MWr = rx_packet_type[3]; assign CplD = rx_packet_type[2]; assign Msg = rx_packet_type[1]; assign UR = rx_packet_type[0]; //register the packet header fields and decode the packet type //both memory and completion TLP header fields are registered for each //received packet, however, only the fields for the incoming type will be //valid always@(posedge clk ) begin if(rst_reg)begin rx_packet_type[4:0] <= 5'b00000; fourdw_n_threedw <= 0; payload <= 0; tc[2:0] <= 0; //traffic class td <= 0; //digest ep <= 0; //poisoned bit attr[1:0] <= 0; dw_length[9:0] <= 0; MEM_req_id[15:0] <= 0; MEM_tag[7:0] <= 0; CMP_comp_id[15:0] <= 0; CMP_compl_stat[2:0] <= 0; CMP_bcm <= 0; CMP_byte_count[11:0] <= 0; end else begin if(rx_sof_d1)begin //these fields same for all TLPs fourdw_n_threedw <= trn_rd_d1[61]; payload <= trn_rd_d1[62]; tc[2:0] <= trn_rd_d1[54:52]; //traffic class td <= trn_rd_d1[47]; //digest ep <= trn_rd_d1[46]; //poisoned bit attr[1:0] <= trn_rd_d1[45:44]; dw_length[9:0] <= trn_rd_d1[41:32]; //also latch bar_hit bar_hit[6:0] <= ~trn_rbar_hit_d1_n[6:0]; //these following fields dependent on packet type //i.e. memory packet fields are only valid for mem packet types //and completer packet fields are only valid for completer packet type; //memory packet fields MEM_req_id[15:0] <= trn_rd_d1[31:16]; MEM_tag[7:0] <= trn_rd_d1[15:8]; //first and last byte enables not needed because plus core delivers //completer packet fields CMP_comp_id[15:0] <= trn_rd_d1[31:16]; CMP_compl_stat[2:0] <= trn_rd_d1[15:13]; CMP_bcm <= trn_rd_d1[12]; CMP_byte_count[11:0] <= trn_rd_d1[11:0]; //add message fields here if needed //decode the packet type and register in rx_packet_type casex({trn_rd_d1[62],trn_rd_d1[60:56]}) 6'b000000: begin //mem read rx_packet_type[4:0] <= 5'b10000; end 6'b100000: begin //mem write rx_packet_type[4:0] <= 5'b01000; end 6'b101010: begin //completer with data rx_packet_type[4:0] <= 5'b00100; end 6'bx10xxx: begin //message rx_packet_type[4:0] <= 5'b00010; end default: begin //all other packet types are unsupported for this design rx_packet_type[4:0] <= 5'b00001; end endcase end end end // Now do the same for the second header of the current packet always@(posedge clk )begin if(rst_reg)begin MEM_addr[63:0] <= 0; CMP_req_id[15:0] <= 0; CMP_tag[7:0] <= 0; CMP_lower_addr[6:0] <= 0; end else begin if(trn_state == SOF & ~trn_rsrc_rdy_d1_n)begin //packet is in process of //reading out second header if(fourdw_n_threedw) MEM_addr[63:0] <= trn_rd_d1[63:0]; else MEM_addr[63:0] <= {32'h00000000,trn_rd_d1[63:32]}; CMP_req_id[15:0] <= trn_rd_d1[63:48]; CMP_tag[7:0] <= trn_rd_d1[47:40]; CMP_lower_addr[6:0] <= trn_rd_d1[48:32]; end end end // generate a valid signal for the headers field always@(posedge clk)begin if(rst_reg) header_fields_valid <= 0; else header_fields_valid <= ~trn_rsrc_rdy_d2_n & trn_rsof_d1_n; end //This state machine keeps track of what state the RX TRN interface //is currently in always @ (posedge clk ) begin if(rst_reg) begin trn_state <= IDLE; trn_rdst_rdy_n <= 1'b0; end else begin case(trn_state) IDLE: begin trn_rdst_rdy_n <= 1'b0; if(rx_sof_d1) trn_state <= SOF; else trn_state <= IDLE; end /// Jiansong: notice, completion streaming here NOT_READY: begin // This state is a placeholder only - it is currently not // entered from any other state // This state could be used for throttling the PCIe // Endpoint Block Plus RX TRN interface, however, this // should not be done when using completion streaming // mode as this reference design does trn_rdst_rdy_n <= 1'b1; trn_state <= IDLE; end SOF: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else if(trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= HEAD2; else trn_state <= SOF; end HEAD2: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else if(trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= BODY; else trn_state <= HEAD2; end BODY: begin if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else trn_state <= BODY; end EOF: begin if(~trn_rsof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= SOF; else if(trn_rsof_d1_n & trn_rsrc_rdy_d1_n) trn_state <= IDLE; else if(~trn_reof_d1_n & ~trn_rsrc_rdy_d1_n) trn_state <= EOF; else trn_state <= IDLE; end default: begin trn_state <= IDLE; end endcase end end //data shifter logic //need to shift the data depending if we receive a four DWORD or three DWORD //TLP type - Note that completion packets will always be 3DW TLPs assign data_out_mux[63:0] = (fourdw_n_threedw) ? trn_rd_d2[63:0] : {trn_rd_d2[31:0],trn_rd_d1[63:32]}; /// Jiansong: notice, why? 64bit data? likely should be modified //swap the byte ordering to little endian //e.g. data_out = B7,B6,B5,B4,B3,B2,B1,B0 always@(posedge clk) data_out[63:0] <= {data_out_mux[7:0],data_out_mux[15:8], data_out_mux[23:16],data_out_mux[31:24], data_out_mux[39:32],data_out_mux[47:40], data_out_mux[55:48],data_out_mux[63:56]}; //Data byte enable logic: //Need to add byte enable logic for incoming memory transactions if desired //to allow memory transaction granularity smaller than DWORD. // //This design always requests data on 128 byte boundaries so for //completion TLPs the byte enables would always be asserted // //Note that the endpoint block plus uses negative logic, however, //I decided to use positive logic for the user application. assign data_out_be = 8'hff; //data_valid generation logic //Generally, data_valid should be asserted the same amount of cycles //that trn_rsrc_rdy_n is asserted (minus the cycles that sof and //eof are asserted). //There are two exceptions to this: // - 3DW TLPs with odd number of DW without Digest // In this case an extra cycle is required // - eof is used to generate this extra cycle // - 4DW TLPs with even number of DW with Digest // In this case an extra cycle needs to be removed // - the last cycle is removed // Jiansong: fix Mrd data to fifo bug always@(*)begin case({fourdw_n_threedw, dw_length[0], td}) 3'b010: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_rsof_d2_n & ~trn_reof_d2_n & payload; 3'b101: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_reof_d1_n & payload; default: data_valid_early = ~trn_rsrc_rdy_d2_n & trn_rsof_d2_n & trn_reof_d2_n & payload; endcase end //delay by one clock to match data_out (and presumably data_out_be) always@(posedge clk) if(rst_reg) data_valid <= 1'b0; else data_valid <= data_valid_early; endmodule
//Legal Notice: (C)2016 Altera Corporation. All rights reserved. Your //use of Altera Corporation's design tools, logic functions and other //software and tools, and its AMPP partner logic functions, and any //output files any of the foregoing (including device programming or //simulation files), and any associated documentation or information are //expressly subject to the terms and conditions of the Altera Program //License Subscription Agreement or other applicable license agreement, //including, without limitation, that your use is for the sole purpose //of programming logic devices manufactured by Altera and sold by Altera //or its authorized distributors. Please refer to the applicable //agreement for further details. // synthesis translate_off `timescale 1ns / 1ps // synthesis translate_on // turn off superfluous verilog processor warnings // altera message_level Level1 // altera message_off 10034 10035 10036 10037 10230 10240 10030 module niosii_onchip_memory2_0 ( // inputs: address, byteenable, chipselect, clk, clken, reset, reset_req, write, writedata, // outputs: readdata ) ; parameter INIT_FILE = "niosii_onchip_memory2_0.hex"; output [ 31: 0] readdata; input [ 13: 0] address; input [ 3: 0] byteenable; input chipselect; input clk; input clken; input reset; input reset_req; input write; input [ 31: 0] writedata; wire clocken0; wire [ 31: 0] readdata; wire wren; assign wren = chipselect & write; assign clocken0 = clken & ~reset_req; altsyncram the_altsyncram ( .address_a (address), .byteena_a (byteenable), .clock0 (clk), .clocken0 (clocken0), .data_a (writedata), .q_a (readdata), .wren_a (wren) ); defparam the_altsyncram.byte_size = 8, the_altsyncram.init_file = INIT_FILE, the_altsyncram.lpm_type = "altsyncram", the_altsyncram.maximum_depth = 12000, the_altsyncram.numwords_a = 12000, the_altsyncram.operation_mode = "SINGLE_PORT", the_altsyncram.outdata_reg_a = "UNREGISTERED", the_altsyncram.ram_block_type = "AUTO", the_altsyncram.read_during_write_mode_mixed_ports = "DONT_CARE", the_altsyncram.width_a = 32, the_altsyncram.width_byteena_a = 4, the_altsyncram.widthad_a = 14; //s1, which is an e_avalon_slave //s2, which is an e_avalon_slave endmodule
module ghrd_10m50da_top ( //Clock and Reset input wire clk_50, input wire fpga_reset_n, //QSPI // output wire qspi_clk, // inout wire[3:0] qspi_io, // output wire qspi_csn, //16550 UART input wire uart_rx, output wire uart_tx, output wire [4:0] user_led ); //Heart-beat counter reg [25:0] heart_beat_cnt; // SoC sub-system module ghrd_10m50da ghrd_10m50da_inst ( .clk_clk (clk_50), .reset_reset_n (fpga_reset_n), // .ext_flash_flash_dataout_conduit_dataout (qspi_io), // .ext_flash_flash_dclk_out_conduit_dclk_out (qspi_clk), // .ext_flash_flash_ncs_conduit_ncs (qspi_csn), //16550 UART .a_16550_uart_0_rs_232_serial_sin (uart_rx), // a_16550_uart_0_rs_232_serial.sin .a_16550_uart_0_rs_232_serial_sout (uart_tx), // .sout .a_16550_uart_0_rs_232_serial_sout_oe () // .sout_oe ); //Heart beat by 50MHz clock always @(posedge clk_50 or negedge fpga_reset_n) if (!fpga_reset_n) heart_beat_cnt <= 26'h0; //0x3FFFFFF else heart_beat_cnt <= heart_beat_cnt + 1'b1; assign user_led = {4'hf,heart_beat_cnt[25]}; endmodule
module ghrd_10m50da_top ( //Clock and Reset input wire clk_50, input wire fpga_reset_n, //QSPI // output wire qspi_clk, // inout wire[3:0] qspi_io, // output wire qspi_csn, //16550 UART input wire uart_rx, output wire uart_tx, output wire [4:0] user_led ); //Heart-beat counter reg [25:0] heart_beat_cnt; // SoC sub-system module ghrd_10m50da ghrd_10m50da_inst ( .clk_clk (clk_50), .reset_reset_n (fpga_reset_n), // .ext_flash_flash_dataout_conduit_dataout (qspi_io), // .ext_flash_flash_dclk_out_conduit_dclk_out (qspi_clk), // .ext_flash_flash_ncs_conduit_ncs (qspi_csn), //16550 UART .a_16550_uart_0_rs_232_serial_sin (uart_rx), // a_16550_uart_0_rs_232_serial.sin .a_16550_uart_0_rs_232_serial_sout (uart_tx), // .sout .a_16550_uart_0_rs_232_serial_sout_oe () // .sout_oe ); //Heart beat by 50MHz clock always @(posedge clk_50 or negedge fpga_reset_n) if (!fpga_reset_n) heart_beat_cnt <= 26'h0; //0x3FFFFFF else heart_beat_cnt <= heart_beat_cnt + 1'b1; assign user_led = {4'hf,heart_beat_cnt[25]}; endmodule
`timescale 1ns / 1ps ////////////////////////////////////////////////////////////////////////////////// // Company: // Engineer: // // Create Date: 11:05:40 09/06/2015 // Design Name: // Module Name: Deco_Round_Mult // Project Name: // Target Devices: // Tool versions: // Description: // // Dependencies: // // Revision: // Revision 0.01 - File Created // Additional Comments: // ////////////////////////////////////////////////////////////////////////////////// module Deco_Round_Mult( input wire [1:0] round_mode, input wire or_info, //23 less significant bits from the product of significands input wire xor_info, //Brings information about the sign of the operation output reg ctrl //control signal mux --- control of the rounded significand ); always @* case ({xor_info,or_info,round_mode}) // Round to infinity - (Round down) //1'b0: Let pass the significand without rounding //1'b1: Let pass the rounded significand //Round towards - infinity //0: positive number ; 01: Round towards - inifnity ; XX rounding bits //Positive Number //xor, or, round 4'b0101: ctrl <= 1'b0; //Negative Number 4'b1101: ctrl <= 1'b1; //Round towards + infinity //Positive Number 4'b0110: ctrl <= 1'b1; //Negative Number 4'b1110: ctrl <= 1'b0; default: ctrl <= 1'b0; //Truncation endcase endmodule