// ========== Copyright Header Begin ==========================================
// OpenSPARC T2 Processor File: niu_rdmc_rcr_manager.v
// Copyright (C) 1995-2007 Sun Microsystems, Inc. All Rights Reserved
// 4150 Network Circle, Santa Clara, California 95054, U.S.A.
// * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; version 2 of the License.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
// For the avoidance of doubt, and except that if any non-GPL license
// choice is available it will apply instead, Sun elects to use only
// the General Public License version 2 (GPLv2) at this time for any
// software where a choice of GPL license versions is made
// available with the language indicating that GPLv2 or any later version
// may be used, or where a choice of which version of the GPL is applied is
// otherwise unspecified.
// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
// CA 95054 USA or visit www.sun.com if you need additional information or
// ========== Copyright Header End ============================================
/*************************************************************************
* File Name : niu_rdmc_rcr_manager.v
* Author Name : Jeanne Cai
* Date Created : 07/18/2004
* Copyright (c) 2001, Sun Microsystems, Inc.
* Sun Proprietary and Confidential
*************************************************************************/
module niu_rdmc_rcr_manager (
rx_dma_ctl_stat_reg_bit47,
rx_dma_ctl_stat_reg_wenu,
rx_dma_ctl_stat_reg_wenl,
chnl_sel_buf_en_r, //from niu_rdmc_buf_manager.v
pref_buf_used_num, //up to three for jumbo packets
update_rcr_shadw, //from niu_rdmc_wr_sched.v, need chnl id
input[15:0] clk_div_value;
input[4:0] dma_chnl_grp_id;
input[7:0] shadw_start_addr;
input[7:0] shadw_rd_end_addr;
input[7:0] shadw_wr_end_addr;
input[19:0] rx_log_page_hdl_reg;
input[53:0] rcr_cfig_a_reg;
input[22:0] rcr_cfig_b_reg;
input[31:0] rx_dma_ctl_stat_reg;
input rx_dma_ctl_stat_reg_bit47;
input rcr_cfig_a_reg_wenu;
input rcr_cfig_a_reg_wenl;
input rx_dma_ctl_stat_reg_wenu;
input rx_dma_ctl_stat_reg_wenl;
input[1:0] pref_buf_used_num;
input wr_transfer_comp_int;
input[4:0] rdmc_wr_data_dma_num;
input[3:0] rcr_wrbk_pkt_num;
input rdmc_rcr_ack_valid;
input[4:0] rdmc_rcr_ack_dma_num;
output[7:0] shadw_wr_ptr;
output[7:0] shadw_rd_ptr;
output[63:0] rcr_wrbk_addr;
output[3:0] rcr_wrbk_numb;
output rcr_wrbk_data_type;
output[15:0] rcr_curr_qlen;
output[7:0] shadw_curr_space_cnt;
output[2:0] rcr_ctl_stat_word;
output[15:0] rcr_curr_addr;
output[15:0] rcr_status_a;
output rcr_addr_not_valid;
output mbox_addr_not_valid;
output rcr_addr_overflow;
output rcr_curr_cnt_overflow;
output rcr_curr_cnt_underflow;
output rcr_pkt_cnt_underflow;
output chnl_shadw_parity_err;
reg rcr_cfig_a_reg_wenu_dly2;
reg rcr_cfig_a_reg_wenu_dly;
reg rcr_cfig_a_reg_wenl_dly2;
reg rcr_cfig_a_reg_wenl_dly;
reg rx_dma_ctl_stat_reg_wenu_dly;
reg rx_dma_ctl_stat_reg_wenl_dly;
reg[15:0] rcr_curr_addr_tmp;
reg[16:0] rcr_curr_cnt; //bit[16] is overflow bit
reg[3:0] rcr_wrbk_act_num;
reg[16:0] rcr_curr_pkt_cnt;
reg[3:0] rcr_wrbk_pkt_num_sav;
reg[3:0] rcr_wrbk_pkt_act_num;
reg[3:0] rcr_wrbk_pkt_num_r;
reg[43:0] rcr_relo_addr_r;
reg rcr_curr_cnt_overflow_r;
reg rcr_wrbk_ack_done_sm;
reg[5:0] shadw_act_curr_cnt;
reg[7:0] shadw_curr_space_cnt;
reg rcr_timer_done_r_dly;
reg mbox_update_done_dly;
wire[2:0] rcr_ctl_stat_word;
wire[15:0] rcr_curr_qlen = rcr_curr_cnt[15:0];
//wire[17:0] rcr_status_a = {rcr_curr_cnt[16], rcr_curr_pkt_cnt[16:0]};
wire[15:0] rcr_status_a = rcr_curr_pkt_cnt[15:0];
wire rcr_ack_err = rdmc_rcr_ack_valid & rdmc_rcr_ack_err;
/**********************************************/
//PIO programmed parameters
/**********************************************/
reg[16:0] rcr_end_addr_r;
wire[15:0] rcr_max_len = rcr_cfig_a_reg[53:38];
wire[24:0] rcr_base_addr = rcr_cfig_a_reg[37:13];
wire[15:0] rcr_start_addr = {rcr_cfig_a_reg[12:0], 3'b0};
wire[15:0] rcr_end_addr = rcr_end_addr_r[15:0];
wire rcr_addr_overflow = rcr_end_addr_r[16] & dma_en;
wire[15:0] pkt_thresh = rcr_cfig_b_reg[22:7];
wire timeout_en = rcr_cfig_b_reg[6];
wire[5:0] timeout_value = rcr_cfig_b_reg[5:0];
wire[19:0] page_handle = rx_log_page_hdl_reg;
wire m_bit = rx_dma_ctl_stat_reg_bit47;
//wire rcr_thresh_bit = rx_dma_ctl_stat_reg[46];
//wire rcrto_bit = rx_dma_ctl_stat_reg[45];
wire[15:0] cpu_ptr_rd_num = rx_dma_ctl_stat_reg[31:16];
wire[15:0] cpu_pkt_rd_num = rx_dma_ctl_stat_reg[15:0];
/****************************/
/****************************/
rcr_cfig_a_reg_wenu_dly <= 1'b0;
rcr_cfig_a_reg_wenu_dly <= 1'b0;
else if (rcr_cfig_a_reg_wenu)
rcr_cfig_a_reg_wenu_dly <= 1'b1;
rcr_cfig_a_reg_wenu_dly <= 1'b0;
rcr_cfig_a_reg_wenu_dly2 <= 1'b0;
rcr_cfig_a_reg_wenu_dly2 <= rcr_cfig_a_reg_wenu_dly;
rcr_cfig_a_reg_wenl_dly <= 1'b0;
rcr_cfig_a_reg_wenl_dly <= 1'b0;
else if (rcr_cfig_a_reg_wenl)
rcr_cfig_a_reg_wenl_dly <= 1'b1;
rcr_cfig_a_reg_wenl_dly <= 1'b0;
rcr_cfig_a_reg_wenl_dly2 <= 1'b0;
rcr_cfig_a_reg_wenl_dly2 <= rcr_cfig_a_reg_wenl_dly;
rx_dma_ctl_stat_reg_wenu_dly <= 1'b0;
rx_dma_ctl_stat_reg_wenu_dly <= 1'b0;
rx_dma_ctl_stat_reg_wenu_dly <= rx_dma_ctl_stat_reg_wenu;
rx_dma_ctl_stat_reg_wenl_dly <= 1'b0;
rx_dma_ctl_stat_reg_wenl_dly <= 1'b0;
rx_dma_ctl_stat_reg_wenl_dly <= rx_dma_ctl_stat_reg_wenl;
wire[15:0] rcr_end_addr_sub = rcr_max_len[15:0] - 16'd1;
else if (rcr_cfig_a_reg_wenu_dly & (|rcr_max_len))
rcr_end_addr_r <= {1'b0, rcr_end_addr_sub};
else if (rcr_cfig_a_reg_wenl_dly2 | rcr_cfig_a_reg_wenu_dly2)
rcr_end_addr_r <= {1'b0, rcr_start_addr[15:0]} + rcr_end_addr_r;
rcr_end_addr_r <= rcr_end_addr_r;
addr_cnt <= rcr_wrbk_act_num; //each write back is one cache line for 8 addresses
else if (addr_cnt != 4'b0)
addr_cnt <= addr_cnt - 4'd1;
rcr_curr_addr_tmp <= 16'b0;
else if ((addr_cnt != 4'b0) & (rcr_curr_addr_tmp == rcr_end_addr))
rcr_curr_addr_tmp <= rcr_start_addr;
else if (addr_cnt != 4'b0)
rcr_curr_addr_tmp <= rcr_curr_addr_tmp + 16'd1;
rcr_curr_addr_tmp <= rcr_curr_addr[15:0];
rcr_curr_addr_tmp <= rcr_curr_addr_tmp;
else if (rcr_cfig_a_reg_wenl_dly)
rcr_curr_addr <= rcr_start_addr;
else if (rcr_wrbk_ack_done & !shadw_parity_err_r)
rcr_curr_addr <= rcr_curr_addr_tmp;
rcr_curr_addr <= rcr_curr_addr;
dec_rcr_curr_cnt <= 1'b0;
else if (rx_dma_ctl_stat_reg_wenl & !rcr_wrbk_ack_done_sm | rx_dma_ctl_stat_reg_wenl_dly & !dec_rcr_curr_cnt)
dec_rcr_curr_cnt <= 1'b1;
dec_rcr_curr_cnt <= 1'b0;
rcr_curr_cnt <= 17'b0; //pio read only, this is rcr addr cnt
else if (rcr_curr_cnt[16] | rcr_curr_cnt_overflow_r)
rcr_curr_cnt <= rcr_curr_cnt;
else if (dec_rcr_curr_cnt)
rcr_curr_cnt <= rcr_curr_cnt - {1'b0, cpu_ptr_rd_num[15:0]}; // bit[16] is overflow bit
else if (rcr_wrbk_ack_done & !shadw_parity_err_r)
rcr_curr_cnt <= rcr_curr_cnt + {13'b0, rcr_wrbk_act_num[3:0]};
rcr_curr_cnt <= rcr_curr_cnt;
rcr_wrbk_pkt_num_sav <= 4'b0;
rcr_wrbk_pkt_num_sav <= 4'b0;
else if (rcr_wrbk_ack_done & not_in_cacheline & !shadw_parity_err_r)
rcr_wrbk_pkt_num_sav <= rcr_wrbk_pkt_num_r;
else if (rcr_wrbk_ack_done & !shadw_parity_err_r)
rcr_wrbk_pkt_num_sav <= 4'b0;
rcr_wrbk_pkt_num_sav <= rcr_wrbk_pkt_num_sav;
rcr_wrbk_pkt_act_num <= 4'b0;
rcr_wrbk_pkt_act_num <= 4'b0;
else if (rcr_wrbk_ack_done_sm & !shadw_parity_err_r)
rcr_wrbk_pkt_act_num <= (rcr_wrbk_pkt_num_r - rcr_wrbk_pkt_num_sav);
rcr_wrbk_pkt_act_num <= rcr_wrbk_pkt_act_num;
rcr_curr_pkt_cnt <= 17'b0;
rcr_curr_pkt_cnt <= 17'b0;
else if (rcr_curr_pkt_cnt[16] | rcr_curr_cnt_overflow_r)
rcr_curr_pkt_cnt <= rcr_curr_pkt_cnt;
else if (dec_rcr_curr_cnt)
rcr_curr_pkt_cnt <= rcr_curr_pkt_cnt - {1'b0, cpu_pkt_rd_num[15:0]};
else if (rcr_wrbk_ack_done & !shadw_parity_err_r)
rcr_curr_pkt_cnt <= rcr_curr_pkt_cnt + {13'b0, rcr_wrbk_pkt_act_num[3:0]};
rcr_curr_pkt_cnt <= rcr_curr_pkt_cnt;
/******************************/
/******************************/
//can not detect real packet count overflow
wire rcr_curr_cnt_overflow = rcr_wrbk_ack_done_r & (rcr_curr_cnt > {1'b0, rcr_max_len[15:0]}) & dma_en;
wire rcr_curr_cnt_underflow = dec_rcr_curr_cnt_r & rcr_curr_cnt[16];
wire rcr_pkt_cnt_underflow = dec_rcr_curr_cnt_r & rcr_curr_pkt_cnt[16];
dec_rcr_curr_cnt_r <= 1'b0;
dec_rcr_curr_cnt_r <= dec_rcr_curr_cnt;
rcr_curr_cnt_overflow_r <= 1'b0;
rcr_curr_cnt_overflow_r <= 1'b0;
else if (rcr_curr_cnt_overflow)
rcr_curr_cnt_overflow_r <= 1'b1;
rcr_curr_cnt_overflow_r <= rcr_curr_cnt_overflow_r;
wire chnl_shadw_parity_err = shadw_parity_err & rcr_wrbk_sched;
shadw_parity_err_r <= 1'b0;
else if (rcr_wrbk_ack_done)
shadw_parity_err_r <= 1'b0;
else if (chnl_shadw_parity_err | rcr_ack_err)
shadw_parity_err_r <= 1'b1;
shadw_parity_err_r <= shadw_parity_err_r;
/*********************************/
//request completion write back
/*********************************/
wire shadw_buf_not_empty = (|shadw_act_curr_cnt);
wire shadw_buf_empty = !shadw_buf_not_empty;
//wire mbox_update_true = (rcrto_bit | rcr_thresh_bit) & m_bit;
wire timer_mbox_update = timer_m_bit & (|rcr_curr_pkt_cnt);
wire thresh_mbox_update = thresh_m_bit & rcr_thresh_reg;
wire mbox_update_true = timer_mbox_update | thresh_mbox_update;
wire rcr_wrbk_trig = (|shadw_curr_cnt[7:3]) |
(rcr_timer_done_r | rcr_flush_reg | thresh_mbox_update) & shadw_buf_not_empty;
wire[3:0] rcr_wrbk_min_num = (|shadw_curr_cnt[7:3]) ? 4'd8 : shadw_curr_cnt[3:0];
wire[3:0] rcr_wrbk_act_num_tmp = not_in_cacheline ? (rcr_wrbk_min_num - {1'b0, rcr_curr_addr[2:0]}) : rcr_wrbk_min_num;
wire[43:0] rcr_full_addr = {rcr_base_addr[24:0], rcr_curr_addr[15:3], 6'b0}; //always start in cacheline
wire rcr_addr_valid0 = ((rcr_full_addr[43:12] & addr_mask0) == (comp_value0 & addr_mask0)) & page_valid0;
wire rcr_addr_valid1 = ((rcr_full_addr[43:12] & addr_mask1) == (comp_value1 & addr_mask1)) & page_valid1;
wire rcr_addr_valid = rcr_addr_valid0 | rcr_addr_valid1;
wire[31:0] rcr_relo_addr0 = (rcr_full_addr[43:12] & ~addr_mask0) | (relo_value0 & addr_mask0);
wire[31:0] rcr_relo_addr1 = (rcr_full_addr[43:12] & ~addr_mask1) | (relo_value1 & addr_mask1);
wire[31:0] rcr_relo_addr = rcr_addr_valid0 ? rcr_relo_addr0 : rcr_relo_addr1;
wire mbox_addr_valid0= ((mbox_addr[43:12] & addr_mask0) == (comp_value0 & addr_mask0)) & page_valid0;
wire mbox_addr_valid1= ((mbox_addr[43:12] & addr_mask1) == (comp_value1 & addr_mask1)) & page_valid1;
wire mbox_addr_valid = mbox_addr_valid0 | mbox_addr_valid1;
wire[31:0] mbox_relo_addr0 = (mbox_addr[43:12] & ~addr_mask0) | (relo_value0 & addr_mask0);
wire[31:0] mbox_relo_addr1 = (mbox_addr[43:12] & ~addr_mask1) | (relo_value1 & addr_mask1);
wire[31:0] mbox_relo_addr = mbox_addr_valid0 ? mbox_relo_addr0 : mbox_relo_addr1;
wire[43:0] relo_addr_tmp = mbox_req_sm ? {mbox_relo_addr, mbox_addr[11:0]} :
{rcr_relo_addr, rcr_full_addr[11:0]};
wire[63:0] rcr_wrbk_addr = {page_handle[19:0], rcr_relo_addr_r[43:0]};
wire rcr_addr_not_valid = !rcr_addr_valid & dma_en;
wire mbox_addr_not_valid = !mbox_addr_valid & dma_en;
cal_addr_en <= rcr_wrbk_req_sm;
rcr_relo_addr_r <= 44'b0;
else if (rcr_wrbk_req_sm | mbox_req_sm)
rcr_relo_addr_r <= relo_addr_tmp;
rcr_relo_addr_r <= rcr_relo_addr_r;
rcr_wrbk_act_num <= 4'b0;
rcr_wrbk_act_num <= 4'b0;
else if (rcr_wrbk_req_sm)
rcr_wrbk_act_num <= rcr_wrbk_act_num_tmp;
rcr_wrbk_act_num <= rcr_wrbk_act_num;
else if (rcr_wrbk_req_sm)
rcr_wrbk_numb <= rcr_wrbk_min_num;
rcr_wrbk_numb <= rcr_wrbk_numb;
not_in_cacheline <= 1'b0;
not_in_cacheline <= 1'b0;
else if (rcr_wrbk_req_sm)
not_in_cacheline <= !rcr_wrbk_min_num[3];
not_in_cacheline <= not_in_cacheline;
rcr_wrbk_data_type <= 1'b0;
rcr_wrbk_data_type <= 1'b0;
else if (rcr_wrbk_req_sm)
rcr_wrbk_data_type <= 1'b1;
rcr_wrbk_data_type <= 1'b0;
rcr_wrbk_data_type <= rcr_wrbk_data_type;
else if (rcr_wrbk_req_sm | mbox_req_sm)
rcr_wrbk_req <= rcr_wrbk_req;
else if (rcr_wrbk_done_r)
rcr_wrbk_sched <= rcr_wrbk_sched;
rcr_wrbk_done_r1 <= 1'b0;
rcr_wrbk_done_r <= rcr_wrbk_sched & rcr_wrbk_done;
rcr_wrbk_done_r1 <= rcr_wrbk_done_r;
rcr_wrbk_pkt_num_r <= 4'b0;
rcr_wrbk_pkt_num_r <= 4'b0;
else if (rcr_wrbk_done_r1)
rcr_wrbk_pkt_num_r <= rcr_wrbk_pkt_num;
rcr_wrbk_pkt_num_r <= rcr_wrbk_pkt_num_r;
rcr_wrbk_ack_done <= 1'b0;
rcr_wrbk_ack_done_r <= 1'b0;
mbox_update_done <= 1'b0;
rcr_wrbk_ack_done <= rcr_wrbk_ack_done_sm;
rcr_wrbk_ack_done_r <= rcr_wrbk_ack_done;
mbox_update_done <= mbox_update_done_sm;
rcr_ack_accept <= rcr_ack_accept_sm;
/************************************/
//Completion write back FSM
/************************************/
wire rdmc_rcr_ack = rdmc_rcr_ack_valid & (rdmc_rcr_ack_dma_num == dma_chnl_grp_id);
WRBK_ACK_DONE_WAIT1 = 4'd4,
WRBK_ACK_DONE_WAIT2 = 4'd5,
WAIT_MBOX_WR_DONE = 4'd8,
always @ (state or rcr_addr_valid or mbox_addr_valid or
rcr_wrbk_trig or mbox_update_true or
rcr_wrbk_sched or rcr_wrbk_done_r or
rdmc_rcr_ack or rdmc_rcr_ack_err or
dma_fatal_err or shadw_parity_err_r or
rcr_wrbk_ack_done_sm = 1'b0;
mbox_update_done_sm = 1'b0;
rcr_ack_accept_sm = 1'b0;
case (state) //synopsys parallel_case full_case
if ((rcr_addr_valid | mbox_addr_valid) & !dma_fatal_err)
else if (mbox_update_true)
else if (rcr_timer_done_r)
next_state = WAIT_WRBK_DONE;
next_state = WAIT_WRBK_ACK;
rcr_wrbk_ack_done_sm = 1'b1;
rcr_ack_accept_sm = 1'b1;
if (shadw_parity_err_r | rdmc_rcr_ack_err)
next_state = WRBK_ACK_DONE_WAIT1;
next_state = WRBK_ACK_DONE_WAIT2;
next_state = WRBK_ACK_DONE;
else if (rcr_timer_done_r)
next_state = WAIT_MBOX_WR_DONE;
next_state = WAIT_MBOX_ACK;
mbox_update_done_sm = 1'b1;
rcr_ack_accept_sm = 1'b1;
/*********************************/
// shadwow memory manager
/*********************************/
wire shadw_wr_even = shadw_curr_cnt[0];
wire last_rd_addr = (shadw_rd_ptr == shadw_rd_end_addr);
wire last_wr_addr = (shadw_wr_ptr == shadw_wr_end_addr);
shadw_wr_en <= update_rcr_shadw & (rdmc_wr_data_dma_num == dma_chnl_grp_id);
shadw_wr_ptr <= shadw_start_addr;
shadw_wr_ptr <= shadw_start_addr;
else if (shadw_wr_en & shadw_curr_cnt[0] & last_wr_addr)
shadw_wr_ptr <= shadw_start_addr;
else if (shadw_wr_en & shadw_curr_cnt[0])
shadw_wr_ptr <= shadw_wr_ptr + 8'd1;
shadw_wr_ptr <= shadw_wr_ptr;
shadw_rd_ptr <= shadw_start_addr;
shadw_rd_ptr <= shadw_start_addr;
else if (rcr_wrbk_ack_done & rcr_wrbk_numb[3] & last_rd_addr)
shadw_rd_ptr <= shadw_start_addr;
else if (rcr_wrbk_ack_done & rcr_wrbk_numb[3])
shadw_rd_ptr <= shadw_rd_ptr + 8'd4;
shadw_rd_ptr <= shadw_rd_ptr;
inc_shadw_curr_cnt <= 1'b0;
inc_shadw_curr_cnt <= 1'b0;
else if (shadw_wr_en & rcr_wrbk_ack_done)
inc_shadw_curr_cnt <= 1'b1;
inc_shadw_curr_cnt <= 1'b0;
else if (shadw_wr_en & !rcr_wrbk_ack_done | inc_shadw_curr_cnt)
shadw_curr_cnt <= shadw_curr_cnt + 8'd1;
else if (rcr_wrbk_ack_done & rcr_wrbk_numb[3])
shadw_curr_cnt <= shadw_curr_cnt - 8'd8;
shadw_curr_cnt <= shadw_curr_cnt;
shadw_act_curr_cnt <= 6'b0; //support 4 cache line only
shadw_act_curr_cnt <= 6'b0;
else if (shadw_wr_en & !rcr_wrbk_ack_done | inc_shadw_curr_cnt)
shadw_act_curr_cnt <= shadw_act_curr_cnt + 6'd1;
else if (rcr_wrbk_ack_done)
shadw_act_curr_cnt <= shadw_act_curr_cnt - {2'b0, rcr_wrbk_act_num[3:0]};
shadw_act_curr_cnt <= shadw_act_curr_cnt;
dec_shadw_space_cnt <= 1'b0;
else if (chnl_sel_buf_en_r & rcr_wrbk_ack_done)
dec_shadw_space_cnt <= 1'b1;
dec_shadw_space_cnt <= 1'b0;
shadw_curr_space_cnt <= `SHADW_MAX_ADDR_CNT;
shadw_curr_space_cnt <= `SHADW_MAX_ADDR_CNT;
else if (chnl_sel_buf_en_r & !rcr_wrbk_ack_done | dec_shadw_space_cnt)
shadw_curr_space_cnt <= shadw_curr_space_cnt - {6'b0, pref_buf_used_num[1:0]};
else if (rcr_wrbk_ack_done & rcr_wrbk_numb[3])
shadw_curr_space_cnt <= shadw_curr_space_cnt + 8'd8;
shadw_curr_space_cnt <= shadw_curr_space_cnt;
else if (chnl_sel_buf_en_r)
else if (wr_last_pkt_data & shadw_wr_en)
chnl_has_pkt_s1 <= chnl_has_pkt_s1;
else if (wr_last_pkt_data & shadw_wr_en)
else if (wr_transfer_comp_int)
chnl_has_pkt_s2 <= chnl_has_pkt_s2;
wire chnl_has_pkt = chnl_has_pkt_s1 | chnl_has_pkt_s2;
/********************************/
/********************************/
wire m_bit_en = mbox_update_done & !rx_dma_ctl_stat_reg_wenu | mbox_update_done_dly;
wire set_rcrto_bit = mbox_update_done ? rcr_timer_done_r : rcr_timer_done_r_dly;
wire m_bit_in = !(rcr_thresh_reg | set_rcrto_bit);
assign rcr_ctl_stat_word= {m_bit_in, rcr_thresh_reg, set_rcrto_bit};
rcr_thresh_reg <= (rcr_curr_pkt_cnt[15:0] > pkt_thresh);
else if (rcr_idle_cycle & !rx_dma_ctl_stat_reg_wenu_dly)
thresh_m_bit <= thresh_m_bit;
rcr_timer_done_r_dly <= 1'b0;
mbox_update_done_dly <= 1'b0;
rcr_timer_done_r_dly <= mbox_update_done & rcr_timer_done_r;
mbox_update_done_dly <= mbox_update_done & rx_dma_ctl_stat_reg_wenu;
/********************************/
/********************************/
wire clk_cnt_done = (clk_cnt == clk_div_value);
wire rcr_timer_done = (timeout_cnt == timeout_value) & timeout_en;
wire pio_wr_reset = rx_dma_ctl_stat_reg_wenu_dly & m_bit & !rcr_timer_done_r;
else if (pio_wr_reset | reset_rcr_timer | m_bit_en)
timer_m_bit <= timer_m_bit;
clk_cnt <= clk_cnt + 16'd1;
else if (pio_wr_reset | reset_rcr_timer)
timeout_cnt <= timeout_cnt;
else if (clk_cnt_done & timeout_en)
timeout_cnt <= timeout_cnt + 6'd1;
timeout_cnt <= timeout_cnt;
rcr_timer_done_r <= 1'b0;
else if (pio_wr_reset | reset_rcr_timer)
rcr_timer_done_r <= 1'b0;
rcr_timer_done_r <= 1'b1;
rcr_timer_done_r <= rcr_timer_done_r;
/********************************/
/********************************/
rcr_flush_reg_dly <= 1'b0;
rcr_flush_reg_dly <= rcr_flush_reg;
wire rcr_flush_pulse = rcr_flush_reg & !rcr_flush_reg_dly;
wire reset_rcr_flush = rcr_flush_pulse & shadw_buf_empty | rcr_wrbk_ack_done;