| 1 | // ========== Copyright Header Begin ========================================== |
| 2 | // |
| 3 | // OpenSPARC T2 Processor File: niu_tdmc_dmacontext.v |
| 4 | // Copyright (C) 1995-2007 Sun Microsystems, Inc. All Rights Reserved |
| 5 | // 4150 Network Circle, Santa Clara, California 95054, U.S.A. |
| 6 | // |
| 7 | // * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 8 | // |
| 9 | // This program is free software; you can redistribute it and/or modify |
| 10 | // it under the terms of the GNU General Public License as published by |
| 11 | // the Free Software Foundation; version 2 of the License. |
| 12 | // |
| 13 | // This program is distributed in the hope that it will be useful, |
| 14 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | // GNU General Public License for more details. |
| 17 | // |
| 18 | // You should have received a copy of the GNU General Public License |
| 19 | // along with this program; if not, write to the Free Software |
| 20 | // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 21 | // |
| 22 | // For the avoidance of doubt, and except that if any non-GPL license |
| 23 | // choice is available it will apply instead, Sun elects to use only |
| 24 | // the General Public License version 2 (GPLv2) at this time for any |
| 25 | // software where a choice of GPL license versions is made |
| 26 | // available with the language indicating that GPLv2 or any later version |
| 27 | // may be used, or where a choice of which version of the GPL is applied is |
| 28 | // otherwise unspecified. |
| 29 | // |
| 30 | // Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
| 31 | // CA 95054 USA or visit www.sun.com if you need additional information or |
| 32 | // have any questions. |
| 33 | // |
| 34 | // ========== Copyright Header End ============================================ |
| 35 | /********************************************************************* |
| 36 | * |
| 37 | * |
| 38 | * Orignal Author(s): Arvind Srinivasan |
| 39 | * Modifier(s): |
| 40 | * Project(s): Neptune |
| 41 | * |
| 42 | * Copyright (c) 2004 Sun Microsystems, Inc. |
| 43 | * |
| 44 | * All Rights Reserved. |
| 45 | * |
| 46 | * This verilog model is the confidential and proprietary property of |
| 47 | * Sun Microsystems, Inc., and the possession or use of this model |
| 48 | * requires a written license from Sun Microsystems, Inc. |
| 49 | * |
| 50 | **********************************************************************/ |
| 51 | |
| 52 | `include "txc_defines.h" |
| 53 | `include "niu_dmc_reg_defines.h" |
| 54 | |
| 55 | module niu_tdmc_dmacontext ( /*AUTOARG*/ |
| 56 | // Outputs |
| 57 | ShadowRingCurrentPtr_DMA, DMA_Address, DMA_Ring_Wrapped, |
| 58 | DMA_RingLength, DMACacheEntryValid, dmc_txc_dma_active, |
| 59 | dmc_txc_dma_eoflist, DMA_EmptySpace, tx_rng_head_dma, |
| 60 | DMA_AvailableFor_Fetch, DMA_ReqPending, DMA_EntriesValid, |
| 61 | DMA_CacheEmpty, DMA_CacheReadPtr, DMA_CacheWritePtrReOrder, |
| 62 | dma_reset_scheduled, dma_clear_reset, |
| 63 | set_conf_part_error_dma, set_tx_ring_oflow_dma, |
| 64 | meta_entries_requested_dma, tx_dma_cfg_dma_stop_state, |
| 65 | dma_debug_port, |
| 66 | // Inputs |
| 67 | inc_DMAHeadShadow, updateCacheWritePtrs, ResetDMARdPtr, |
| 68 | meta_resp_address, tx_rng_cfg_dma_staddr, |
| 69 | meta_req_address, DMA_UpdateAddress, |
| 70 | DMC_TxCache_SMX_Req_Length, tx_rng_cfg_dma_len, tx_rng_tail_dma, |
| 71 | tx_dma_cfg_dma_stall, NoOfCacheWritesDispatched, DMANumToReq, |
| 72 | dmc_txc_dma_cacheready, meta_resp_dma_num, IncrDMARdPtr, |
| 73 | updateCacheContext, NoOfValidEntries, page0_mask_dma, |
| 74 | page0_value_dma, page0_reloc_dma, page0_valid_dma, page1_mask_dma, |
| 75 | page1_value_dma, page1_reloc_dma, page1_valid_dma, |
| 76 | dmc_txc_dma_page_handle, txc_dmc_dma_inc_head, |
| 77 | dmc_txc_dma_partial, receivedErrorResp, |
| 78 | dma_reset_done_hold, tx_dma_cfg_dma_rst, |
| 79 | tx_dma_cfg_dma_stop, SysClk, Reset_L |
| 80 | ); |
| 81 | |
| 82 | |
| 83 | output [`PTR_WIDTH -1 :0] ShadowRingCurrentPtr_DMA; |
| 84 | output [63:0] DMA_Address; |
| 85 | output DMA_Ring_Wrapped; |
| 86 | output [`PTR_WIDTH -1 :0] DMA_RingLength; |
| 87 | output DMACacheEntryValid; |
| 88 | output dmc_txc_dma_active; |
| 89 | output dmc_txc_dma_eoflist; |
| 90 | output [3:0] DMA_EmptySpace ; |
| 91 | output [`PTR_WIDTH :0] tx_rng_head_dma; |
| 92 | output DMA_AvailableFor_Fetch; |
| 93 | output DMA_ReqPending; |
| 94 | output [4:0] DMA_EntriesValid; |
| 95 | output DMA_CacheEmpty; |
| 96 | output [3:0] DMA_CacheReadPtr; |
| 97 | output [3:0] DMA_CacheWritePtrReOrder; |
| 98 | output dma_reset_scheduled; |
| 99 | output dma_clear_reset; |
| 100 | output set_conf_part_error_dma; |
| 101 | output set_tx_ring_oflow_dma; |
| 102 | output [4:0] meta_entries_requested_dma; |
| 103 | input inc_DMAHeadShadow; |
| 104 | input updateCacheWritePtrs; |
| 105 | input ResetDMARdPtr; |
| 106 | input [2:0] meta_resp_address; |
| 107 | input [37:0] tx_rng_cfg_dma_staddr; |
| 108 | input [7:0] meta_req_address; |
| 109 | input [43:0] DMA_UpdateAddress; |
| 110 | input [13:0] DMC_TxCache_SMX_Req_Length; |
| 111 | input [`RNG_LENGTH_WIDTH - 1 :0] tx_rng_cfg_dma_len; |
| 112 | input [`PTR_WIDTH :0] tx_rng_tail_dma; |
| 113 | input tx_dma_cfg_dma_stall; |
| 114 | input [3:0] NoOfCacheWritesDispatched; |
| 115 | input [4:0] DMANumToReq; |
| 116 | input dmc_txc_dma_cacheready; |
| 117 | input [4:0] meta_resp_dma_num; |
| 118 | input IncrDMARdPtr; |
| 119 | input updateCacheContext; |
| 120 | input [4:0] NoOfValidEntries; |
| 121 | |
| 122 | input [31:0] page0_mask_dma; |
| 123 | input [31:0] page0_value_dma; |
| 124 | input [31:0] page0_reloc_dma; |
| 125 | input page0_valid_dma; |
| 126 | input [31:0] page1_mask_dma; |
| 127 | input [31:0] page1_value_dma; |
| 128 | input [31:0] page1_reloc_dma; |
| 129 | input page1_valid_dma; |
| 130 | input [19:0] dmc_txc_dma_page_handle; |
| 131 | input dmc_txc_dma_partial; |
| 132 | input dma_reset_done_hold; |
| 133 | input receivedErrorResp; |
| 134 | input tx_dma_cfg_dma_rst; |
| 135 | input tx_dma_cfg_dma_stop; |
| 136 | output tx_dma_cfg_dma_stop_state; |
| 137 | input SysClk; |
| 138 | input Reset_L; |
| 139 | input txc_dmc_dma_inc_head; // Needs to come from txc |
| 140 | output [31:0] dma_debug_port; |
| 141 | |
| 142 | reg [`PTR_WIDTH -1 :0] ShadowRingCurrentPtr_DMA; |
| 143 | reg [63:0] DMA_Address; |
| 144 | wire DMA_Ring_Wrapped; |
| 145 | reg [`PTR_WIDTH -1 :0] DMA_RingLength; |
| 146 | wire DMACacheEntryValid; |
| 147 | wire FetchMoreDMA ; |
| 148 | wire dmc_txc_dma_active; |
| 149 | wire dmc_txc_dma_eoflist; |
| 150 | wire DMAActive; |
| 151 | wire [3:0] DMA_EmptySpace ; |
| 152 | reg ReceivedDMAKick; |
| 153 | reg [`PTR_WIDTH :0] tx_rng_head_dma; |
| 154 | wire DMA_AvailableFor_Fetch; |
| 155 | reg DMA_ReqPending; |
| 156 | reg DMA_ReqPending_d; |
| 157 | reg [3:0] DMA_Anchor_Address; |
| 158 | wire DMA_CacheEmpty; |
| 159 | reg [3:0] DMA_CacheWritePtr; |
| 160 | reg [3:0] DMA_CacheShadowWritePtr; |
| 161 | reg [3:0] DMA_CacheReadPtr; |
| 162 | reg [3:0] DMA_CacheWritePtrReOrder; |
| 163 | reg [4:0] DMA_EntriesValid; |
| 164 | wire DMA_EntryUpdateCollision; |
| 165 | reg ld_TxCacheAddress_dma; |
| 166 | |
| 167 | |
| 168 | reg [`PTR_WIDTH -1 :0] DMA_RingCurrentPtr; |
| 169 | reg tx_dma_cfg_dma_stall_d; |
| 170 | reg DMA_EofList; |
| 171 | reg [3:0] DMA_CacheReOrderOffset; |
| 172 | wire ClearDMAKick; |
| 173 | |
| 174 | reg ShadowRingCurrentPtrWrap_DMA; |
| 175 | reg DMA_RingCurrentPtrWrap; |
| 176 | wire DMA_RingTailWrap; |
| 177 | wire DMA_Cache_SpaceAvailable; |
| 178 | |
| 179 | reg [4:0] meta_entries_requested_dma; |
| 180 | |
| 181 | // Logic for Pagetable translation |
| 182 | reg xlate_done; |
| 183 | reg [43:0] xlate_start_addr; |
| 184 | reg [3:0] page_xlate_state; |
| 185 | reg page0_match; |
| 186 | reg page1_match; |
| 187 | reg [31:0] page0_reloc_addr; |
| 188 | reg [31:0] page1_reloc_addr; |
| 189 | reg set_conf_part_error_dma; |
| 190 | reg page_xlate_error; |
| 191 | reg xlate_valid; |
| 192 | |
| 193 | /* signals for reset logic */ |
| 194 | reg [3:0] DMA_ResetSM; |
| 195 | reg dma_reset_scheduled; |
| 196 | reg stop_fetch_descriptors; |
| 197 | reg dma_clear_reset; |
| 198 | reg flush_dma_cache; |
| 199 | wire reset_asserted; |
| 200 | reg stop_asserted; |
| 201 | reg tx_dma_cfg_dma_stop_d; |
| 202 | reg restart_asserted; |
| 203 | reg set_stop_state; |
| 204 | reg dma_stopped; |
| 205 | wire pkt_counts_equal; |
| 206 | /* registers for status registers and mailbox */ |
| 207 | |
| 208 | |
| 209 | reg set_tx_ring_oflow_dma; |
| 210 | reg oflow_error; |
| 211 | reg updateCacheContext_d; |
| 212 | /* Debug Port*/ |
| 213 | wire [31:0] dma_debug_port; |
| 214 | |
| 215 | |
| 216 | |
| 217 | /*--------------------------------------------------------------*/ |
| 218 | // Parameters and Defines |
| 219 | /*--------------------------------------------------------------*/ |
| 220 | parameter PAGE_XLATE_IDLE = 4'h0, |
| 221 | CHECK_PAGE_STATUS = 4'h1, |
| 222 | PAGE_XLATE_ERROR = 4'h2; |
| 223 | |
| 224 | parameter RESET_IDLE = 4'h0, |
| 225 | WAIT_FOR_RESP = 4'h1, |
| 226 | WAIT_FOR_DONE = 4'h2, |
| 227 | FLUSH_STATES = 4'h3, |
| 228 | WAIT_FOR_PKT_DONE = 4'h4; |
| 229 | |
| 230 | //VCS coverage off |
| 231 | // synopsys translate_off |
| 232 | reg [192:1] PAGE_XLATE_STATE; |
| 233 | |
| 234 | always @(page_xlate_state) |
| 235 | begin |
| 236 | case(page_xlate_state) |
| 237 | PAGE_XLATE_IDLE : PAGE_XLATE_STATE = "PAGE_XLATE_IDLE"; |
| 238 | CHECK_PAGE_STATUS: PAGE_XLATE_STATE = "CHECK_PAGE_STATUS"; |
| 239 | PAGE_XLATE_ERROR : PAGE_XLATE_STATE = "PAGE_XLATE_ERROR"; |
| 240 | default : PAGE_XLATE_STATE = "UNKNOWN"; |
| 241 | endcase |
| 242 | end |
| 243 | |
| 244 | reg [192:1] RESET_STATE; |
| 245 | always @(DMA_ResetSM) |
| 246 | begin |
| 247 | case(DMA_ResetSM) |
| 248 | RESET_IDLE : RESET_STATE = "RESET_IDLE"; |
| 249 | WAIT_FOR_RESP: RESET_STATE = "WAIT_FOR_RESP"; |
| 250 | WAIT_FOR_DONE : RESET_STATE = "WAIT_FOR_DONE"; |
| 251 | FLUSH_STATES : RESET_STATE = "FLUSH_STATES"; |
| 252 | WAIT_FOR_PKT_DONE : RESET_STATE = "WAIT_FOR_PKT_DONE"; |
| 253 | default : RESET_STATE = "UNKNOWN"; |
| 254 | endcase |
| 255 | end |
| 256 | // synopsys translate_on |
| 257 | //VCS coverage on |
| 258 | |
| 259 | |
| 260 | function [3:0] IncrTxCachePtr; |
| 261 | input [3:0] CurrentPtr; |
| 262 | input [3:0] ValueToAdd; |
| 263 | reg [3:0] tmp_result; |
| 264 | begin |
| 265 | tmp_result = {1'b0,CurrentPtr[2:0]} + { ValueToAdd[3:0]}; |
| 266 | IncrTxCachePtr[3] = CurrentPtr[3] ^ tmp_result[3]; |
| 267 | IncrTxCachePtr[2:0] = tmp_result[2:0]; |
| 268 | end |
| 269 | endfunction // IncrTxCachePtr |
| 270 | |
| 271 | |
| 272 | wire [43:0] dma_start_addr = {tx_rng_cfg_dma_staddr,6'h0}; |
| 273 | |
| 274 | |
| 275 | always@(posedge SysClk) begin |
| 276 | if (!Reset_L) begin |
| 277 | xlate_valid <= 1'b0; |
| 278 | end else begin |
| 279 | xlate_valid <= xlate_done | ( xlate_valid & ~ dma_clear_reset) ; |
| 280 | end |
| 281 | end |
| 282 | always@(posedge SysClk) begin |
| 283 | if (!Reset_L) begin |
| 284 | page_xlate_state <= PAGE_XLATE_IDLE; |
| 285 | xlate_done <= 1'b0; |
| 286 | page0_match <= 1'b0; |
| 287 | page1_match <= 1'b0; |
| 288 | set_conf_part_error_dma <= 1'b0; |
| 289 | page_xlate_error <= 1'b0; |
| 290 | xlate_start_addr <= 44'h0; |
| 291 | end else begin |
| 292 | case(page_xlate_state) // synopsys full_case parallel_case |
| 293 | PAGE_XLATE_IDLE: begin |
| 294 | xlate_done <= 1'b0; |
| 295 | set_conf_part_error_dma <= 1'b0; |
| 296 | page_xlate_error <= 1'b0; |
| 297 | if(ld_TxCacheAddress_dma ) begin |
| 298 | xlate_start_addr <= {tx_rng_cfg_dma_staddr,6'h0}; |
| 299 | if( ~page0_valid_dma & ~page1_valid_dma ) begin |
| 300 | // Set ERROR Flags? |
| 301 | set_conf_part_error_dma <= 1'b1; |
| 302 | page_xlate_state <= PAGE_XLATE_ERROR; |
| 303 | end else begin |
| 304 | page_xlate_state <= CHECK_PAGE_STATUS; |
| 305 | page0_reloc_addr <= ((dma_start_addr[43:12] & ~page0_mask_dma) | |
| 306 | ( page0_reloc_dma & page0_mask_dma)) ; |
| 307 | page0_match <= page0_valid_dma & |
| 308 | ((page0_mask_dma & dma_start_addr [43:12] ) == page0_value_dma ); |
| 309 | |
| 310 | page1_reloc_addr <= ((dma_start_addr[43:12] & ~page1_mask_dma) | |
| 311 | ( page1_reloc_dma & page1_mask_dma)) ; |
| 312 | |
| 313 | page1_match <= page1_valid_dma & |
| 314 | ((page1_mask_dma & dma_start_addr [43:12] ) == page1_value_dma ); |
| 315 | |
| 316 | end // else: !if( ~page0_valid_dma & ~page1_valid_dma ) |
| 317 | end // if (ld_TxCacheAddress_dma ) |
| 318 | end // case: PAGE_XLATE_IDLE |
| 319 | CHECK_PAGE_STATUS: begin |
| 320 | if(page0_match) begin |
| 321 | xlate_done <= 1'b1; |
| 322 | xlate_start_addr <= {page0_reloc_addr,xlate_start_addr[11:0]}; |
| 323 | page_xlate_state <= PAGE_XLATE_IDLE; |
| 324 | end else if(page1_match) begin |
| 325 | xlate_done <= 1'b1; |
| 326 | xlate_start_addr <= {page1_reloc_addr,xlate_start_addr[11:0]}; |
| 327 | page_xlate_state <= PAGE_XLATE_IDLE; |
| 328 | end else begin |
| 329 | set_conf_part_error_dma <= 1'b1; |
| 330 | page_xlate_state <= PAGE_XLATE_ERROR; |
| 331 | end |
| 332 | end // case: CHECK_PAGE_STATUS |
| 333 | PAGE_XLATE_ERROR: begin |
| 334 | // Go back to IDLE -- for now |
| 335 | set_conf_part_error_dma <= 1'b0; |
| 336 | page_xlate_error <= 1'b1; |
| 337 | // xlate_done <= 1'b1; // This is to be removed !!! |
| 338 | if(reset_asserted) begin |
| 339 | page_xlate_state <= PAGE_XLATE_IDLE; |
| 340 | end else begin |
| 341 | page_xlate_state <= PAGE_XLATE_ERROR; |
| 342 | end |
| 343 | // synopsys translate_off |
| 344 | // $display(" %m: Warning-- Page translation failure Time - %t",$time); |
| 345 | // synopsys translate_on |
| 346 | end |
| 347 | default: begin |
| 348 | page_xlate_state <= PAGE_XLATE_IDLE; |
| 349 | xlate_done <= 1'b0; |
| 350 | page0_match <= 1'b0; |
| 351 | page1_match <= 1'b0; |
| 352 | set_conf_part_error_dma <= 1'b0; |
| 353 | page_xlate_error <= 1'b0; |
| 354 | xlate_start_addr <= 44'h0; |
| 355 | end |
| 356 | endcase // case(page_xlate_state) |
| 357 | end // else: !if(!Reset_L) |
| 358 | end // always@ (posedge SysClk) |
| 359 | |
| 360 | |
| 361 | |
| 362 | |
| 363 | assign ClearDMAKick = updateCacheContext & ReceivedDMAKick; |
| 364 | |
| 365 | always@(posedge SysClk ) |
| 366 | if (!Reset_L) begin |
| 367 | updateCacheContext_d <= 1'b0; |
| 368 | end else begin |
| 369 | updateCacheContext_d <= updateCacheContext; |
| 370 | end |
| 371 | // DMA |
| 372 | reg ring_wraped_or_not; |
| 373 | always@(posedge SysClk ) |
| 374 | if (!Reset_L) begin |
| 375 | ShadowRingCurrentPtr_DMA <= `PTR_WIDTH'h0; |
| 376 | ShadowRingCurrentPtrWrap_DMA <= 1'b0; |
| 377 | DMA_Address <= 64'h0; |
| 378 | ring_wraped_or_not <= 1'b0; |
| 379 | end else begin // if (!Reset_L) |
| 380 | if(xlate_done | dma_clear_reset ) begin |
| 381 | ShadowRingCurrentPtr_DMA <= `PTR_WIDTH'h0; |
| 382 | // DMA_Address <= {20'h0,tx_rng_cfg_dma_staddr,6'h0}; |
| 383 | DMA_Address <= {dmc_txc_dma_page_handle,xlate_start_addr}; |
| 384 | ShadowRingCurrentPtrWrap_DMA <= 1'b0; |
| 385 | ring_wraped_or_not <= 1'b0; |
| 386 | end else begin |
| 387 | if(updateCacheContext) begin |
| 388 | ring_wraped_or_not <= ( ( ShadowRingCurrentPtr_DMA + {10'h0,DMC_TxCache_SMX_Req_Length[8:3]}) >= (DMA_RingLength )); |
| 389 | end else if(updateCacheContext_d ) begin |
| 390 | if(ring_wraped_or_not) begin |
| 391 | // ShadowRingCurrentPtr_DMA <= {13'h0,DMC_TxCache_SMX_Req_Length[8:3]}; |
| 392 | ShadowRingCurrentPtr_DMA <= `PTR_WIDTH'h0; |
| 393 | ShadowRingCurrentPtrWrap_DMA <= ~ ShadowRingCurrentPtrWrap_DMA; |
| 394 | // DMA_Address <= {20'h0,tx_rng_cfg_dma_staddr,6'h0}; |
| 395 | DMA_Address <= {dmc_txc_dma_page_handle,xlate_start_addr}; |
| 396 | end else begin |
| 397 | ShadowRingCurrentPtr_DMA <= ShadowRingCurrentPtr_DMA + {10'h0,DMC_TxCache_SMX_Req_Length[8:3]}; |
| 398 | ShadowRingCurrentPtrWrap_DMA <= ShadowRingCurrentPtrWrap_DMA; |
| 399 | DMA_Address <= {dmc_txc_dma_page_handle,DMA_UpdateAddress}; |
| 400 | end // else: !if(( ShadowRingCurrentPtr_DMA + {13'h0,DMC_TxCache_SMX_Req_Length[8:3]}) > (DMA_RingLength ) ) |
| 401 | end // if (updateCacheContext & (DMANumToReq== DMA_CHANNEL_NUMBER) ) |
| 402 | end // else: !if(xlate_done) |
| 403 | end // else: !if(!Reset_L) |
| 404 | |
| 405 | assign DMA_Ring_Wrapped = ShadowRingCurrentPtrWrap_DMA ^ DMA_RingTailWrap ; |
| 406 | |
| 407 | always@(posedge SysClk ) |
| 408 | if (!Reset_L) begin |
| 409 | meta_entries_requested_dma <= 5'h0; |
| 410 | end else if(updateCacheContext) begin |
| 411 | meta_entries_requested_dma <= DMC_TxCache_SMX_Req_Length[7:3]; |
| 412 | end |
| 413 | |
| 414 | always@(posedge SysClk ) |
| 415 | if (!Reset_L) begin |
| 416 | DMA_RingLength <= `PTR_WIDTH'h0; |
| 417 | end else if(xlate_done) begin |
| 418 | DMA_RingLength <= {tx_rng_cfg_dma_len,3'h0}; |
| 419 | end // if (xlate_done) |
| 420 | |
| 421 | // Head Pointer for S/W |
| 422 | |
| 423 | |
| 424 | always@(posedge SysClk ) |
| 425 | if (!Reset_L) begin |
| 426 | tx_rng_head_dma <= `PTR_WIDTH_PLUS1'h0; |
| 427 | end else if(xlate_done | dma_clear_reset) begin |
| 428 | tx_rng_head_dma <= `PTR_WIDTH_PLUS1'h0; |
| 429 | end else if( txc_dmc_dma_inc_head) begin// if (xlate_done) |
| 430 | if( tx_rng_head_dma[`PTR_WIDTH -1 :0] == (DMA_RingLength - `PTR_WIDTH'b1) ) begin |
| 431 | tx_rng_head_dma[`PTR_WIDTH] <= ~tx_rng_head_dma[`PTR_WIDTH]; |
| 432 | tx_rng_head_dma[`PTR_WIDTH -1 :0] <= `PTR_WIDTH'h0; |
| 433 | end else begin // if ( tx_rng_head_dma[`PTR_WIDTH -1 :0] == (DMA_RingLength - `PTR_WIDTH'b1) ) |
| 434 | tx_rng_head_dma[`PTR_WIDTH -1 :0] <= tx_rng_head_dma[`PTR_WIDTH -1 :0] + `PTR_WIDTH'h1 ; |
| 435 | end // else: !if( tx_rng_head_dma[`PTR_WIDTH -1 :0] == (DMA_RingLength - `PTR_WIDTH'b1) ) |
| 436 | end // if ( txc_dmc_inc_head) |
| 437 | |
| 438 | |
| 439 | reg ring_oflow; |
| 440 | reg ring_oflow_d; |
| 441 | always@(posedge SysClk) begin |
| 442 | if(!Reset_L) begin |
| 443 | set_tx_ring_oflow_dma <= 1'b0; |
| 444 | oflow_error <= 1'b0; |
| 445 | ring_oflow <= 1'b0; |
| 446 | ring_oflow_d <= 1'b0; |
| 447 | end else begin |
| 448 | ring_oflow <= xlate_valid & ( (tx_rng_tail_dma[`PTR_WIDTH -1 :0] > DMA_RingLength) |
| 449 | | ( ~DMA_Ring_Wrapped & ( ShadowRingCurrentPtr_DMA > tx_rng_tail_dma[`PTR_WIDTH -1 :0]) ) |
| 450 | | ( DMA_Ring_Wrapped & ( tx_rng_tail_dma[`PTR_WIDTH -1 :0] > ShadowRingCurrentPtr_DMA ) ) |
| 451 | ); |
| 452 | ring_oflow_d <= ring_oflow; |
| 453 | |
| 454 | if(dma_clear_reset) begin |
| 455 | set_tx_ring_oflow_dma <= 1'b0; |
| 456 | oflow_error <= 1'b0; |
| 457 | end else if(ring_oflow &~ring_oflow_d) begin |
| 458 | set_tx_ring_oflow_dma <= 1'b1; |
| 459 | oflow_error <= 1'b1; |
| 460 | end else set_tx_ring_oflow_dma <= 1'b0; |
| 461 | end // else: !if(!Reset_L) |
| 462 | end // always@ (posedge SysClk) |
| 463 | |
| 464 | // This logic can be taken out-- TOADS This is used by TxCif files |
| 465 | |
| 466 | always@(posedge SysClk ) |
| 467 | if (!Reset_L) begin |
| 468 | DMA_RingCurrentPtr <= `PTR_WIDTH'h0; |
| 469 | DMA_RingCurrentPtrWrap <= 1'b0; |
| 470 | end else if(xlate_done | dma_clear_reset ) begin |
| 471 | DMA_RingCurrentPtr <= `PTR_WIDTH'h0; |
| 472 | DMA_RingCurrentPtrWrap <= 1'b0; |
| 473 | end else if(inc_DMAHeadShadow) begin // if (xlate_done) |
| 474 | // Take care of wrap around cases |
| 475 | if(DMA_RingCurrentPtr == (DMA_RingLength - `PTR_WIDTH'b1) ) begin |
| 476 | DMA_RingCurrentPtr <= `PTR_WIDTH'h0; |
| 477 | DMA_RingCurrentPtrWrap <= ~DMA_RingCurrentPtrWrap; |
| 478 | end else begin |
| 479 | DMA_RingCurrentPtr <= DMA_RingCurrentPtr + `PTR_WIDTH'b1; |
| 480 | DMA_RingCurrentPtrWrap <= DMA_RingCurrentPtrWrap; |
| 481 | end // else: !if(DMA_RingCurrentPtr == DMA_RingLength ) |
| 482 | end // if (inc_DMAHeadShadow) |
| 483 | |
| 484 | assign DMA_RingTailWrap = tx_rng_tail_dma[`PTR_WIDTH]; |
| 485 | |
| 486 | |
| 487 | assign DMACacheEntryValid = ( ~ ( ( DMA_RingCurrentPtrWrap == DMA_RingTailWrap ) & |
| 488 | ( DMA_RingCurrentPtr == tx_rng_tail_dma[`PTR_WIDTH -1 :0] ) ) ) & DMAActive ; // Check exact width |
| 489 | |
| 490 | |
| 491 | |
| 492 | always@(posedge SysClk ) |
| 493 | if (!Reset_L) begin |
| 494 | DMA_CacheShadowWritePtr <=4'h0; |
| 495 | end else if(flush_dma_cache) begin |
| 496 | DMA_CacheShadowWritePtr <=4'h0; |
| 497 | end else if(updateCacheContext ) begin |
| 498 | DMA_CacheShadowWritePtr <= IncrTxCachePtr(DMA_CacheShadowWritePtr,NoOfCacheWritesDispatched); |
| 499 | end // if (updateCacheContext) |
| 500 | |
| 501 | |
| 502 | assign FetchMoreDMA = ~ ( ( ShadowRingCurrentPtrWrap_DMA == DMA_RingTailWrap ) & |
| 503 | (ShadowRingCurrentPtr_DMA == tx_rng_tail_dma[`PTR_WIDTH -1 :0] ) ); |
| 504 | |
| 505 | assign dmc_txc_dma_active = DMAActive ; |
| 506 | |
| 507 | always@(posedge SysClk ) |
| 508 | if(!Reset_L)begin |
| 509 | DMA_EofList <= 1'b1; |
| 510 | end else begin |
| 511 | DMA_EofList <= ~DMACacheEntryValid; |
| 512 | end // else: !if(!Reset_L) |
| 513 | |
| 514 | assign dmc_txc_dma_eoflist =DMA_EofList; |
| 515 | assign DMAActive = ~tx_dma_cfg_dma_stall; |
| 516 | assign DMA_EmptySpace= (DMA_CacheShadowWritePtr[3]^DMA_CacheReadPtr[3]) ? |
| 517 | ( {1'b0,DMA_CacheReadPtr[2:0]} - {1'b0,DMA_CacheShadowWritePtr[2:0]}) : |
| 518 | ( 4'h8 - {1'b0,DMA_CacheShadowWritePtr[2:0]} + {1'b0,DMA_CacheReadPtr[2:0]}) ; |
| 519 | assign DMA_Cache_SpaceAvailable = ( (DMA_EmptySpace >4'h3) ? 1'b1: 1'b0 ) | |
| 520 | ( (dmc_txc_dma_partial & (DMA_EmptySpace >4'h0)) ? 1'b1:1'b0 ); |
| 521 | always@(posedge SysClk ) |
| 522 | if(!Reset_L) begin |
| 523 | ReceivedDMAKick <= 1'b0; |
| 524 | tx_dma_cfg_dma_stall_d <= 1'b0; |
| 525 | ld_TxCacheAddress_dma <= 1'b0; |
| 526 | DMA_ReqPending_d <= 1'b0; |
| 527 | end else begin |
| 528 | tx_dma_cfg_dma_stall_d <= tx_dma_cfg_dma_stall; |
| 529 | ReceivedDMAKick <= (!tx_dma_cfg_dma_stall & tx_dma_cfg_dma_stall_d) | ( ReceivedDMAKick & ~ClearDMAKick); |
| 530 | ld_TxCacheAddress_dma <= ~tx_dma_cfg_dma_stall & tx_dma_cfg_dma_stall_d; |
| 531 | DMA_ReqPending_d <= DMA_ReqPending; |
| 532 | end // else: !if(!Reset_L) |
| 533 | |
| 534 | |
| 535 | |
| 536 | assign DMA_AvailableFor_Fetch = ( ( FetchMoreDMA & ReceivedDMAKick & DMA_Cache_SpaceAvailable ) | ( FetchMoreDMA & DMA_Cache_SpaceAvailable)) |
| 537 | & DMAActive & ~stop_fetch_descriptors & ~page_xlate_error & ~dma_stopped & xlate_valid & ~DMA_ReqPending_d & ~oflow_error; |
| 538 | |
| 539 | // DMA |
| 540 | always@(posedge SysClk ) |
| 541 | if (!Reset_L) begin |
| 542 | DMA_ReqPending <=1'h0; |
| 543 | DMA_Anchor_Address <= 4'h0; |
| 544 | end else if(updateCacheContext ) begin |
| 545 | DMA_ReqPending <= 1'b1; |
| 546 | DMA_Anchor_Address <= meta_req_address[7:4]; |
| 547 | end else if(updateCacheWritePtrs | receivedErrorResp ) begin |
| 548 | /* This should be write confirm signal based upon transaction complete */ |
| 549 | DMA_ReqPending <= 1'b0; |
| 550 | end // if (updateCacheWritePtrs & (meta_resp_dma_num ==DMA_CHANNEL_NUMBER) ) |
| 551 | |
| 552 | assign DMA_EntryUpdateCollision = inc_DMAHeadShadow & updateCacheWritePtrs; |
| 553 | |
| 554 | always@(posedge SysClk ) |
| 555 | if (!Reset_L) begin |
| 556 | DMA_EntriesValid <= 5'h0; |
| 557 | end else begin // if (!Reset_L) |
| 558 | if(flush_dma_cache) begin |
| 559 | DMA_EntriesValid <= 5'h0; |
| 560 | end else if(~DMA_EntryUpdateCollision & updateCacheWritePtrs) begin |
| 561 | DMA_EntriesValid <= DMA_EntriesValid + NoOfValidEntries; |
| 562 | end // if (~DMA_EntryUpdateCollision & (updateCacheWritePtrs & (meta_resp_dma_num ==DMA_CHANNEL_NUMBER) ) begin... |
| 563 | else if( ~DMA_EntryUpdateCollision & inc_DMAHeadShadow ) begin |
| 564 | DMA_EntriesValid <= DMA_EntriesValid - 5'h1; |
| 565 | end // if ( ~DMA_EntryUpdateCollision & IncrDMARdPtr) |
| 566 | else if(DMA_EntryUpdateCollision) begin |
| 567 | DMA_EntriesValid <= DMA_EntriesValid + NoOfValidEntries - 5'h1; |
| 568 | end else begin |
| 569 | DMA_EntriesValid <= DMA_EntriesValid; |
| 570 | end // else: !if(DMA_EntryUpdateCollision) |
| 571 | end // else: !if(!Reset_L) |
| 572 | |
| 573 | |
| 574 | // DMA |
| 575 | // assign DMA_CacheFull = (DMA_CacheReadPtr[2:0] == DMA_CacheWritePtr[2:0]) & (DMA_CacheReadPtr[3]^DMA_CacheWritePtr[3]) ; |
| 576 | assign DMA_CacheEmpty = (DMA_CacheReadPtr[2:0] == DMA_CacheWritePtr[2:0]) &~ (DMA_CacheReadPtr[3]^DMA_CacheWritePtr[3]); |
| 577 | |
| 578 | always@(posedge SysClk ) |
| 579 | if (!Reset_L) begin |
| 580 | DMA_CacheWritePtr <=4'h0; |
| 581 | end else begin |
| 582 | if(flush_dma_cache) begin |
| 583 | DMA_CacheWritePtr <=4'h0; |
| 584 | end else if(updateCacheWritePtrs) begin |
| 585 | DMA_CacheWritePtr <= DMA_CacheShadowWritePtr; |
| 586 | end // if (updateCacheWritePtrs) |
| 587 | end // else: !if(!Reset_L) |
| 588 | |
| 589 | always@(posedge SysClk ) |
| 590 | if (!Reset_L) begin |
| 591 | DMA_CacheReadPtr <=4'h0; |
| 592 | end else begin // if (!Reset_L) |
| 593 | if(IncrDMARdPtr) begin |
| 594 | DMA_CacheReadPtr <= IncrTxCachePtr(DMA_CacheReadPtr,4'h1); |
| 595 | end else if(flush_dma_cache ) begin // if (IncrDMARdPtr) |
| 596 | DMA_CacheReadPtr <=4'h0; |
| 597 | end else if(ResetDMARdPtr ) begin // if (IncrDMARdPtr) |
| 598 | DMA_CacheReadPtr <= DMA_CacheWritePtr; |
| 599 | end // if (ResetDMARdPtr) |
| 600 | end // else: !if(!Reset_L) |
| 601 | |
| 602 | |
| 603 | |
| 604 | |
| 605 | // DMA_31 Offset Calculations |
| 606 | always@( meta_resp_address or DMA_Anchor_Address or DMA_CacheWritePtr) begin |
| 607 | DMA_CacheReOrderOffset = ( meta_resp_address[2:0] >= DMA_Anchor_Address[2:0] ) ? |
| 608 | ( {1'b0,meta_resp_address[2:0]} - {1'b0,DMA_Anchor_Address[2:0]} ) : |
| 609 | ( 4'h8 + {1'b0,meta_resp_address[2:0]} - {1'b0,DMA_Anchor_Address[2:0]} ); |
| 610 | DMA_CacheWritePtrReOrder = IncrTxCachePtr(DMA_CacheWritePtr, DMA_CacheReOrderOffset); |
| 611 | end |
| 612 | |
| 613 | |
| 614 | /* DMA Reset logic - */ |
| 615 | |
| 616 | //TODO- |
| 617 | // PIO Write Collisions |
| 618 | // Knowing what is the state of pending scheduling info |
| 619 | // |
| 620 | assign reset_asserted = tx_dma_cfg_dma_rst; |
| 621 | assign tx_dma_cfg_dma_stop_state = dma_stopped; |
| 622 | assign pkt_counts_equal = ( {DMA_RingCurrentPtrWrap,DMA_RingCurrentPtr} == tx_rng_head_dma[`PTR_WIDTH:0] ); |
| 623 | |
| 624 | always@(posedge SysClk) begin |
| 625 | if(!Reset_L) begin |
| 626 | dma_stopped <= 1'b0; |
| 627 | tx_dma_cfg_dma_stop_d <= 1'b0; |
| 628 | restart_asserted <= 1'b0; |
| 629 | stop_asserted <= 1'b0; |
| 630 | end else begin |
| 631 | tx_dma_cfg_dma_stop_d <= tx_dma_cfg_dma_stop; |
| 632 | stop_asserted <= (tx_dma_cfg_dma_stop & ~tx_dma_cfg_dma_stop_d) | ( stop_asserted & ~set_stop_state); |
| 633 | restart_asserted <= ~tx_dma_cfg_dma_stop & tx_dma_cfg_dma_stop_d; |
| 634 | dma_stopped <= set_stop_state | (dma_stopped & ~restart_asserted); |
| 635 | end |
| 636 | end // always@ (posedge SysClk) |
| 637 | |
| 638 | |
| 639 | |
| 640 | |
| 641 | always@(posedge SysClk) begin |
| 642 | if(!Reset_L) begin |
| 643 | DMA_ResetSM <= RESET_IDLE; |
| 644 | dma_reset_scheduled <= 1'b0; |
| 645 | stop_fetch_descriptors <= 1'b0; |
| 646 | dma_clear_reset <= 1'b0; |
| 647 | flush_dma_cache <= 1'b0; |
| 648 | set_stop_state <= 1'b0; |
| 649 | end else begin |
| 650 | case(DMA_ResetSM) |
| 651 | RESET_IDLE: begin |
| 652 | dma_reset_scheduled <= 1'b0; |
| 653 | flush_dma_cache <= 1'b0; |
| 654 | stop_fetch_descriptors <= 1'b0; |
| 655 | dma_clear_reset <= 1'b0; |
| 656 | set_stop_state <= 1'b0; |
| 657 | if(reset_asserted | stop_asserted ) begin |
| 658 | if(!DMAActive) begin |
| 659 | if(reset_asserted) begin |
| 660 | DMA_ResetSM <= WAIT_FOR_PKT_DONE; |
| 661 | end else begin |
| 662 | DMA_ResetSM <= RESET_IDLE; |
| 663 | set_stop_state <= 1'b1; |
| 664 | end |
| 665 | end else begin |
| 666 | DMA_ResetSM <= WAIT_FOR_DONE; |
| 667 | dma_reset_scheduled <= 1'b1; |
| 668 | end // else: !if(!DMAActive) |
| 669 | end // if (reset_asserted | stop_asserted ) |
| 670 | end // case: RESET_IDLE |
| 671 | WAIT_FOR_DONE: begin |
| 672 | if(dma_reset_done_hold) begin |
| 673 | DMA_ResetSM <= WAIT_FOR_RESP; |
| 674 | stop_fetch_descriptors <= 1'b1; |
| 675 | end else begin |
| 676 | DMA_ResetSM <= WAIT_FOR_DONE; |
| 677 | end // else: !if(!dma_reset_done) |
| 678 | end // case: WAIT_FOR_DONE |
| 679 | WAIT_FOR_RESP: begin |
| 680 | if(!DMA_ReqPending) begin |
| 681 | DMA_ResetSM <= WAIT_FOR_PKT_DONE; |
| 682 | end else begin |
| 683 | DMA_ResetSM <= WAIT_FOR_RESP; |
| 684 | end |
| 685 | end |
| 686 | WAIT_FOR_PKT_DONE: begin |
| 687 | if(pkt_counts_equal) begin |
| 688 | DMA_ResetSM <= FLUSH_STATES; |
| 689 | dma_reset_scheduled <= 1'b0; |
| 690 | if(reset_asserted) begin |
| 691 | flush_dma_cache <= 1'b1; |
| 692 | dma_clear_reset <= 1'b1; |
| 693 | end // if (reset_asserted) |
| 694 | if(stop_asserted) |
| 695 | set_stop_state <= 1'b1; |
| 696 | end else begin |
| 697 | DMA_ResetSM <= WAIT_FOR_PKT_DONE; |
| 698 | end // else: !if(pkt_counts_equal) |
| 699 | end // case: WAIT_FOR_PKT_DONE |
| 700 | FLUSH_STATES: begin |
| 701 | // clear caches, clear reset bit etc |
| 702 | flush_dma_cache <= 1'b0; |
| 703 | dma_clear_reset <= 1'b0; |
| 704 | set_stop_state <= 1'b0; |
| 705 | dma_reset_scheduled <= 1'b0; |
| 706 | DMA_ResetSM <= RESET_IDLE; |
| 707 | end |
| 708 | endcase // case(DMA_ResetSM) |
| 709 | end // else: !if(!Reset_L) |
| 710 | end |
| 711 | |
| 712 | |
| 713 | |
| 714 | assign dma_debug_port = {3'h0,dmc_txc_dma_cacheready,dmc_txc_dma_partial,stop_fetch_descriptors,FetchMoreDMA,DMA_AvailableFor_Fetch,DMA_Cache_SpaceAvailable, |
| 715 | DMA_ReqPending,DMACacheEntryValid,DMA_EmptySpace,DMA_EntriesValid,DMA_ResetSM,DMA_CacheReadPtr,DMA_CacheWritePtr}; |
| 716 | // DMA_CacheWritePtr Cache Write Pointer Updated only after transfer_complete is received bit 3 is used for wrap_arounds |
| 717 | // DMA_CacheReadPtr Cache Read Pointer Updated based upon number of getNextDesc signals from TXC bit3 is used for wrap_arounds |
| 718 | // DMA_ResetSM DMA Reset/Stop State Machine |
| 719 | // DMA_EntriesValid Number of descriptors currently within the cache - Max of 16 |
| 720 | // DMA_EmptySpace Indicates how much Space is available withing the cache. Computed based upon ( ShadowWritePtr - ReadPtr) |
| 721 | // DMACacheEntryValid Compares current local ringptr with software tail pointer |
| 722 | // DMA_ReqPending Pending Request Flag |
| 723 | // DMA_Cache_SpaceAvailable Is Space available for new requests? |
| 724 | // DMA_AvailableFor_Fetch - This triggers a new descriptor fetch, function of new kicks, space availble and any errors |
| 725 | // FetchMoreDMA ShadowHead Pointer and tail pointer difference indicator. |
| 726 | // stop_fetch_descriptors - Function of reaset/stop issued by Software. This needs to be cleared for any new fetch requests |
| 727 | // dmc_txc_dma_partial - Partial signal to TXC |
| 728 | |
| 729 | endmodule |
| 730 | |
| 731 | /* -- algorithm for page table translation |
| 732 | if( ~page0_valid & ~page1_valid ){ |
| 733 | set_error(); |
| 734 | } |
| 735 | |
| 736 | if(page0_valid) { // check page0 match |
| 737 | mask0 = page0_mask; |
| 738 | value0 = page0_value; |
| 739 | reloc0 = page0_reloc; |
| 740 | if( (mask0 & address[43:12] ) == (value0)) { |
| 741 | page0_match = 1; |
| 742 | page1_match = 0; |
| 743 | |
| 744 | } |
| 745 | } |
| 746 | if(page1_valid & ~page0_match) {// check page1 match |
| 747 | mask1 = page1_mask; |
| 748 | value1 = page1_value; |
| 749 | reloc1 = page1_reloc; |
| 750 | if( (mask1 & address[43:12] ) == (value1)) { |
| 751 | page1_match = 1; |
| 752 | page0_match = 0; |
| 753 | |
| 754 | } |
| 755 | } |
| 756 | |
| 757 | // Calculate new address |
| 758 | if(page0_match) { |
| 759 | new_address[43:12] = (address[43:12] & ~mask0) | ( reloc0 & mask0) ; |
| 760 | new_address[11:0] = address[11:0]; |
| 761 | } else if(page1_match) { |
| 762 | new_address[43:12] = (address[43:12] & ~mask1) | ( reloc1 & mask1) ; |
| 763 | new_address[11:0] = address[11:0]; |
| 764 | } else { |
| 765 | new_address = address; // no change |
| 766 | } |
| 767 | |
| 768 | */ |
| 769 | |
| 770 | |
| 771 | |
| 772 | |
| 773 | |
| 774 | |
| 775 | |