Initial commit of OpenSPARC T2 design and verification files.
[OpenSPARC-T2-DV] / verif / env / niu / rxc_sat / vera / rxdma / niu_rx_descp.vr
CommitLineData
86530b38
AT
1// ========== Copyright Header Begin ==========================================
2//
3// OpenSPARC T2 Processor File: niu_rx_descp.vr
4// Copyright (C) 1995-2007 Sun Microsystems, Inc. All Rights Reserved
5// 4150 Network Circle, Santa Clara, California 95054, U.S.A.
6//
7// * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8//
9// This program is free software; you can redistribute it and/or modify
10// it under the terms of the GNU General Public License as published by
11// the Free Software Foundation; version 2 of the License.
12//
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU General Public License for more details.
17//
18// You should have received a copy of the GNU General Public License
19// along with this program; if not, write to the Free Software
20// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21//
22// For the avoidance of doubt, and except that if any non-GPL license
23// choice is available it will apply instead, Sun elects to use only
24// the General Public License version 2 (GPLv2) at this time for any
25// software where a choice of GPL license versions is made
26// available with the language indicating that GPLv2 or any later version
27// may be used, or where a choice of which version of the GPL is applied is
28// otherwise unspecified.
29//
30// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
31// CA 95054 USA or visit www.sun.com if you need additional information or
32// have any questions.
33//
34// ========== Copyright Header End ============================================
35#include <vera_defines.vrh>
36#include <ListMacros.vrh>
37#include "niu_mem.vrh"
38#include "pcg_defines.vri"
39#include "pcg_types.vri"
40#include "pg_top_pp.vrh"
41#include "pc_top_pp.vrh"
42#include "pcg_token.vrh"
43#include "niu_rxtoken.vrh"
44#include "niu_dma.vrh"
45#include "niu_rx_descp_sch.vrh"
46#include "niu_rx_descp_cr.vrh"
47#include "niu_rx_crentry.vrh"
48#include "dmc_memory_map.vri"
49#include "zcp_memory_map.vri"
50
51#define RBR_MAX_RING_LEN 65535
52#define RCR_MAX_RING_LEN 65535
53#define RBR_PAGE_ALIGNMENT 65536
54#define RCR_PAGE_ALIGNMENT 65536
55
56#ifdef N2_FC
57#include "fc_niu_ev2a.vrh"
58#endif
59
60extern CSparseMem SparseMem;
61extern niu_gen_pio gen_pio_drv;
62extern mbox_class mbox_id;
63extern pg pack_gen[16];
64extern integer RX_TEST_REACHED_END;
65extern CNiuDMABind NiuDMABind;
66
67class CRxBufferPool {
68 bit [63:0] address;
69 integer page_id;
70 integer bufsz;
71 integer status;
72 integer no_of_expected_packets;
73 integer no_of_packets_received;
74 task new() { }
75 function integer self_destroy() {
76 if(no_of_packets_received>=no_of_expected_packets) {
77 printf("debug: Pkt address to be deleted - %x \n",address);
78 status = SparseMem.free_addr(address,1,page_id);
79 printf("debug: xlate address to be deleted - %x %d \n",address,status);
80 self_destroy = 1;
81 } else self_destroy = 0;
82 }
83}
84
85class rxdma_ctl_stat_reg {
86
87 bit MEX_bit;
88
89 bit status_DC_FIFO_ERR;
90 bit status_RCRTHRES;
91 bit status_RCRTO;
92 bit status_PORT_DROP_PKT;
93 bit status_WRED_DROP;
94 bit status_RBR_PRE_EMPTY;
95 bit status_RCR_SHADOW_FULL;
96 bit status_RBR_EMPTY;
97
98 bit [15:0] update_PTRREAD;
99 bit [15:0] update_PKTREAD;
100
101 task new() {
102 status_DC_FIFO_ERR = 0;
103 MEX_bit = 0;
104 status_RCRTHRES = 0;
105 status_RCRTO = 0;
106 status_PORT_DROP_PKT = 0;
107 status_WRED_DROP = 0;
108 status_RBR_PRE_EMPTY = 0;
109 status_RCR_SHADOW_FULL = 0;
110 status_RBR_EMPTY = 0;
111 update_PTRREAD = 0;
112 update_PKTREAD = 0;
113 }
114
115}
116
117class RxDMAChannel extends CDMA {
118
119 integer id; // dmaid
120 string type; // Tx or Rx
121 integer active;
122 integer total_packet_checked=0;
123 integer total_packets_to_defaultdma=0;
124 integer total_packets_to_nondefaultdma=0;
125
126 integer desc_ring_head_ptr;
127 bit [63:0] ring_start_addr;
128 bit [63:0] ring_current_addr;
129 integer ring_size;
130 integer page0_id;
131 integer page1_id;
132 integer xlate_on;
133 bit RBR_page;
134 bit RCR_page;
135 bit MailBox_page;
136 bit PktBuffers_page;
137 bit page0_valid;
138 bit page1_valid;
139 integer RBR_page_id;
140 integer RCR_page_id;
141 integer MailBox_page_id;
142 integer PktBuffers_page_id;
143 bit random_page_alloc;
144 bit pkts_in_alternate_pages;
145 integer ctrl_hdr_len;
146 integer buffer_offset;
147 bit [31:0] page_mask0, page_value0, page_reloc0;
148 bit [31:0] page_mask1, page_value1, page_reloc1;
149 bit [15:0] dma_block_size;
150 bit [15:0] rcr_ring_len, rbr_ring_len;
151 integer dis_pio_virt=0;
152
153 bit[63:0] CR_rcr_start_addr, CR_rcr_tail_l, CR_rcr_tail_h;
154 bit[63:0] CR_last_rcr_tail_l, CR_last_rcr_tail_h;
155 integer CR_ring_length;
156 integer poll_cr_active=0;
157 integer poll_cr_done=0;
158
159 bit dring_addr_error=0;
160 bit cring_addr_error=0;
161 integer descr_addr_error_pkt_num=0;
162 integer curr_rbr_desc_kicked_cnt = 0;
163 bit fun_no_has_been_set = 0;
164 bit [1:0] function_no;
165
166 integer rxdma_ctl_stat_update_done = -1;
167
168 CRxdescpScheduler descpScheduler;
169 CCompletionRing completionring;
170 CRxBufferPool buf_pool[];
171
172
173 // descriptor_ring -- -
174 CRxDescrRing desc_ring;
175
176 task new(integer i,string t,(integer dev_id = 0) ){
177
178 super.new(i,1,dev_id);
179 id = i;
180 xlate_on =0;
181 page_mask0=0; page_value0=0; page_reloc0=0;
182 page_mask1=0; page_value1=0; page_reloc1=0;
183 // if(xlate_on) {
184 page0_id = 2*i + 64;
185 page1_id = page0_id + 1;
186 // } else {
187 // page0_id = 0;
188 // page1_id = 0;
189 // }
190
191 total_packet_checked=0;
192 total_packets_to_defaultdma=0;
193 total_packets_to_nondefaultdma=0;
194
195 address_incr = 4;
196 type = t;
197 active = 0;
198 desc_ring = new();
199 dis_pio_virt=1;
200 descpScheduler = new(i);
201 completionring = new();
202 completionring.dma_num = id;
203 printf(" DMA Channel %d -newed\n",i);
204 RBR_page = 0;
205 RCR_page = 0;
206 MailBox_page = 0;
207 PktBuffers_page = 0;
208 page0_valid = 1;
209 page1_valid = 1;
210 RBR_page_id = page0_id;
211 RCR_page_id = page0_id;
212 MailBox_page_id = page0_id;
213 PktBuffers_page_id = page0_id;
214 random_page_alloc = 0;
215 pkts_in_alternate_pages = 0;
216 ctrl_hdr_len = 2;
217 buffer_offset = 0;
218 dma_block_size = 4096;
219 rcr_ring_len = 0;
220 rbr_ring_len = 0;
221
222 auto_periodic_kick();
223 rxdma_ctl_stat_update_done = alloc(SEMAPHORE, 0, 1, 1);
224 if(get_plus_arg( CHECK, "RXTX_PIO_STRESS_BINDING=")) {
225 function_no = get_plus_arg( NUM, "RXTX_PIO_STRESS_BINDING=");
226 function_no = function_no % 4;
227 } else function_no = id/4;
228
229 }
230
231
232 // local task add_descriptor(CRxdescriptor desc);
233 local function integer incr_ptr( integer ptr){
234 incr_ptr = (ptr++)%ring_size;
235 }
236
237 local task create_descriptor(var CRxdescriptor desc, bit [31:0] blk_addr, (integer pkt_page_id = 0) );
238 local task set_descriptor(integer no_of_desc);
239
240 function integer CheckDMAStatus( CRxToken RxToken) ;
241 function integer getPacketAddress( CRxToken RxToken) ;
242 function integer getPageId(bit [43:0] virt_address);
243
244 function CRxdescriptor getNextDesc() {
245 getNextDesc = desc_ring.front();
246 desc_ring.pop_front();
247 }
248 function integer get_curr_ring_size() {
249 get_curr_ring_size=descpScheduler.desc_ring.desc_ring.size();
250 }
251
252 local task UpdateCompletionRing(CRxToken RxToken);
253 // functions to update RxDMA Rx Kick Registers
254
255 task SetCurrentPtrs() ;
256 task CheckCR_Entries();
257
258 task pollCRPtr( (integer poll_interval=1000) );
259 task reclaim_buffers( (integer reclaim=1) );
260 task setRxDmaCfig_1(bit [63:0] data,(bit read_back = 1'b0),(bit read_only = 1'b0));
261 task setRxLogPgVld(bit [63:0] data,(bit read_back = 1'b0));
262 task setRbrConfig_A(bit [63:0] data,(bit read_back = 1'b0),(integer ring_page_id =0));
263 task setRbrConfig_B(bit [63:0] data,(bit read_back = 1'b0));
264 task setRxRingKick(bit [63:0] data);
265 task readRxRingHead(var bit [63:0] data);
266 task setZcpRdc(bit [63:0] data,(bit read_back = 1'b0));
267 task setRcrConfig_A(bit [63:0] data,(bit read_back = 1'b0),(integer ring_page_id =0));
268 local task updateHeaderLength(CRxToken RxToken);
269 task SetPage0Registers(bit [31:0] mask,bit [31:0] value, bit [31:0] reloc);
270 task SetPage1Registers(bit [31:0] mask,bit [31:0] value, bit [31:0] reloc);
271 task InitDMAChan(integer dma_chnl, integer desc_ring_length, integer compl_ring_len, bit [63:0] rbr_config_B_data, bit [15:0] initial_kick, integer xlation);
272 task CheckMailboxData(bit [63:0] ctl_data_mask);
273 task UpdateRCRStat();
274
275 // functions to check data
276 task CheckCRData (integer no_of_entries , (integer update_ptrs = 0) );
277 task free_memory(CRxToken RxToken);
278 task flush_rcr((integer wait_for_done=0) );
279 task reset_bind_to_group( (integer dummy_g=0));
280 task bind_to_group( integer g);
281 task enableWRED(bit [15:0] red_ran_init,
282 bit [11:0] TCP_SYN_THR,
283 bit [3:0] TCP_SYN_WIN,
284 bit [11:0] THR,
285 bit [3:0] WIN);
286
287 task resetRxDma();
288
289 task incDefPktCnt () {
290 total_packet_checked++;
291 total_packets_to_defaultdma++;
292 }
293
294 task incPktCnt () {
295 total_packet_checked++;
296 total_packets_to_nondefaultdma++;
297 }
298 //task RXDMA_CFIG1_pio_wr(bit [63:0] address, bit [63:0] data);
299 //task RXDMA_CFIG1_pio_rd(bit [63:0] address, var bit [63:0] data);
300 //task RXDMA_CFIG2_pio_wr(bit [63:0] address, bit [63:0] data);
301 //task RXDMA_CFIG2_pio_rd(bit [63:0] address, var bit [63:0] data);
302
303 task init_descr_cache();
304 task init_compl_cache();
305
306 task periodic_kick((integer interval = 3000), (integer num_desc = 256), (integer threshold = 256));
307 task auto_periodic_kick();
308
309 task pio_wr_RXDMA_CFIG1(bit [63:0] rd_data);
310 task pio_rd_RXDMA_CFIG1(var bit [63:0] rd_data);
311 task pio_wr_RXDMA_CFIG2(bit [63:0] rd_data);
312 task pio_rd_RXDMA_CFIG2(var bit [63:0] rd_data);
313 task pio_rd_RCR_CFIG_A(var bit [63:0] rd_data);
314 task pio_rd_RX_MISC_DROP(var bit [63:0] rd_data);
315 task pio_rd_RED_DISC(var bit [63:0] rd_data);
316 task pio_wr_RDC_RED_PARA(bit [63:0] rd_data);
317 task pio_rd_RDC_RED_PARA(var bit [63:0] rd_data);
318
319 task pio_wr_RX_DMA_CTL_STAT_START(bit [63:0] rd_data);
320 task pio_rd_RX_DMA_CTL_STAT_START(var bit [63:0] rd_data);
321 task pio_wr_RX_DMA_ENT_MSK_START(bit [63:0] rd_data);
322 task pio_rd_RX_DMA_ENT_MSK_START(var bit [63:0] rd_data);
323 task pio_wr_RCR_CFIG_B_START(bit [63:0] rd_data);
324 task pio_rd_RCR_CFIG_B_START(var bit [63:0] rd_data);
325 task pio_wr_RX_DMA_INTR_DEBUG_START(bit [63:0] rd_data);
326 task pio_rd_RX_DMA_INTR_DEBUG_START(var bit [63:0] rd_data);
327 task rxdma_ctl_stat_update(integer field, rxdma_ctl_stat_reg ctl_stat_reg);
328}
329
330task RxDMAChannel :: rxdma_ctl_stat_update(integer field, rxdma_ctl_stat_reg ctl_stat_reg) {
331bit [63:0] rd_data;
332
333 // first wait for the SEMAPHORE for mutual exclusiveness of multiple calls to this function
334 printf ("RxDMAChannel::rxdma_ctl_stat_update dma=%0d field %0d\n", id, field);
335 semaphore_get(WAIT, rxdma_ctl_stat_update_done, 1);
336 case(field) {
337 0: { // updating the MEX bit with given value. Write Status_bits=0, RCR_Update=0.
338 pio_rd_RX_DMA_CTL_STAT_START(rd_data);
339 rd_data[RX_DMA_CTL_STAT_MEX] = ctl_stat_reg.MEX_bit; // bit 47
340 rd_data[RX_DMA_CTL_STAT_RBR_TMOUT:RX_DMA_CTL_STAT_DC_FIFO_ERR] = 0; // bits 53:48
341 rd_data[RX_DMA_CTL_STAT_RCRTHRES:RX_DMA_CTL_STAT_CFIGLOGPAGE] = 0; // bits 46:32
342 rd_data[RX_DMA_CTL_STAT_PTRREAD] = 0; // bits 31:16
343 rd_data[RX_DMA_CTL_STAT_PKTREAD] = 0; // bits 15:0
344 printf ("RxDMAChannel::rxdma_ctl_stat_update field=0 dma=%0d, updating MEX<=%0d\n", id, ctl_stat_reg.MEX_bit);
345 pio_wr_RX_DMA_CTL_STAT_START(rd_data);
346 }
347 1: { // clearing the non-fatal status bits, Write MEX=MEX-read, RCR_Update=0.
348 pio_rd_RX_DMA_CTL_STAT_START(rd_data);
349 rd_data[RX_DMA_CTL_STAT_DC_FIFO_ERR] =
350 ctl_stat_reg.status_DC_FIFO_ERR; // bit 48
351 rd_data[RX_DMA_CTL_STAT_RCRTHRES] =
352 ctl_stat_reg.status_RCRTHRES; // bit 46
353 rd_data[RX_DMA_CTL_STAT_RCRTO] =
354 ctl_stat_reg.status_RCRTO; // bit 45
355 rd_data[RX_DMA_CTL_STAT_PORT_DROP_PKT] =
356 ctl_stat_reg.status_PORT_DROP_PKT; // bit 42
357 rd_data[RX_DMA_CTL_STAT_WRED_DROP] =
358 ctl_stat_reg.status_WRED_DROP; // bit 41
359 rd_data[RX_DMA_CTL_STAT_RBR_PRE_EMTY] =
360 ctl_stat_reg.status_RBR_PRE_EMPTY; // bit 40
361 rd_data[RX_DMA_CTL_STAT_RCR_SHADOW_FULL] =
362 ctl_stat_reg.status_RCR_SHADOW_FULL; // bit 39
363 rd_data[RX_DMA_CTL_STAT_RBR_EMPTY] =
364 ctl_stat_reg.status_RBR_EMPTY; // bit 35
365 rd_data[RX_DMA_CTL_STAT_PTRREAD] = 0;
366 rd_data[RX_DMA_CTL_STAT_PKTREAD] = 0;
367 printf ("RxDMAChannel::rxdma_ctl_stat_update field=1 dma=%0d, clear Status bits wr_data = 0x%h\n", rd_data);
368 pio_wr_RX_DMA_CTL_STAT_START(rd_data);
369 }
370 2: { // updating the RCR status from software, Write MEX=MEX-read, Status_bits=0.
371 pio_rd_RX_DMA_CTL_STAT_START(rd_data);
372 rd_data[RX_DMA_CTL_STAT_RBR_TMOUT:RX_DMA_CTL_STAT_DC_FIFO_ERR] = 0; // bits 53:48
373 rd_data[RX_DMA_CTL_STAT_RCRTHRES:RX_DMA_CTL_STAT_CFIGLOGPAGE] = 0; // bits 46:32
374 rd_data[RX_DMA_CTL_STAT_PTRREAD] = ctl_stat_reg.update_PTRREAD; // bits 31:16
375 rd_data[RX_DMA_CTL_STAT_PKTREAD] = ctl_stat_reg.update_PKTREAD; // bits 15:0
376 printf ("RxDMAChannel::rxdma_ctl_stat_update field=2 dma=%0d, RCR Update. wr_data = 0x%h\n",id, rd_data);
377 pio_wr_RX_DMA_CTL_STAT_START(rd_data);
378 }
379 default: {
380 }
381 }
382
383 // Now, to allow any other pending call to this function, put key back into the SEMAPHORE bucket
384 semaphore_put(rxdma_ctl_stat_update_done, 1);
385
386}
387
388task RxDMAChannel :: init_descr_cache() {
389 bit [63:0] rd_data;
390 integer ii;
391
392 printf("RxDMAChannel::init_descr_cache() Initializing the descriptor prefetch cache to 0 for DMA - %0d\n",id);
393
394 // Enable the RAM debug read/write mode
395 gen_pio_drv.pio_rd(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
396 rd_data[RX_ADDR_MD_RAM_ACC]=1;
397 gen_pio_drv.pio_wr(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
398
399 //WRITE all entries to 0 (for Neptune, this is a must. Otherwise, PCIE link layer will hang due to pio_rd_data=X)
400 for(ii=0; ii<8; ii++) { //dma0:0-7 dma1:8-15 (descr)
401 gen_pio_drv.pio_rd(getPIOAddress(RDMC_MEM_ADDR, dis_pio_virt), rd_data);
402 rd_data[RDMC_MEM_ADDR_PRE_SHAD]=0; //0=prefetch(descr) 1=shadow(cring)
403 rd_data[RDMC_MEM_ADDR_PRE_ADDR]=(id*8)+ii;
404 gen_pio_drv.pio_wr(getPIOAddress(RDMC_MEM_ADDR, dis_pio_virt), rd_data);
405
406 gen_pio_drv.pio_wr(RDMC_MEM_DAT3, 0);
407 gen_pio_drv.pio_wr(RDMC_MEM_DAT2, 0);
408 gen_pio_drv.pio_wr(RDMC_MEM_DAT1, 0);
409 gen_pio_drv.pio_wr(RDMC_MEM_DAT0, 0);
410 gen_pio_drv.pio_wr(RDMC_MEM_DAT4, 0);
411 }
412
413 // Turn back off the RAM debug read/write mode
414 gen_pio_drv.pio_rd(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
415 rd_data[RX_ADDR_MD_RAM_ACC]=0;
416 gen_pio_drv.pio_wr(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
417
418}
419
420task RxDMAChannel :: init_compl_cache() {
421 bit [63:0] rd_data;
422 integer ii;
423
424 printf("RxDMAChannel::init_compl_cache() Initializing the compl shadow cache to 0 for DMA - %0d\n",id);
425
426 // Enable the RAM debug read/write mode
427 gen_pio_drv.pio_rd(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
428 rd_data[RX_ADDR_MD_RAM_ACC]=1;
429 gen_pio_drv.pio_wr(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
430
431 //WRITE all entries to 0 (for Neptune, this is a must. Otherwise, PCIE link layer will hang due to pio_rd_data=X)
432 for(ii=0; ii<8; ii++) { //dma0:0-7 dma1:8-15 (descr)
433 gen_pio_drv.pio_rd(getPIOAddress(RDMC_MEM_ADDR, dis_pio_virt), rd_data);
434 rd_data[RDMC_MEM_ADDR_PRE_SHAD]=1; //0=prefetch(descr) 1=shadow(cring)
435 rd_data[RDMC_MEM_ADDR_PRE_ADDR]=(id*8)+ii;
436 gen_pio_drv.pio_wr(getPIOAddress(RDMC_MEM_ADDR, dis_pio_virt), rd_data);
437
438 gen_pio_drv.pio_wr(RDMC_MEM_DAT3, 0);
439 gen_pio_drv.pio_wr(RDMC_MEM_DAT2, 0);
440 gen_pio_drv.pio_wr(RDMC_MEM_DAT1, 0);
441 gen_pio_drv.pio_wr(RDMC_MEM_DAT0, 0);
442 gen_pio_drv.pio_wr(RDMC_MEM_DAT4, 0);
443 }
444
445 // Turn back off the RAM debug read/write mode
446 gen_pio_drv.pio_rd(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
447 rd_data[RX_ADDR_MD_RAM_ACC]=0;
448 gen_pio_drv.pio_wr(getPIOAddress(RX_ADDR_MD, dis_pio_virt),rd_data);
449
450}
451
452task RxDMAChannel :: enableWRED(bit [15:0] red_ran_init,
453 bit [11:0] TCP_SYN_THR,
454 bit [3:0] TCP_SYN_WIN,
455 bit [11:0] THR,
456 bit [3:0] WIN) {
457
458 // Enable WRED and set the parameters red_ran_init,TCP_SYN_THR,TCP_SYN_WIN,THR, and WIN
459 gen_pio_drv.pio_wr(getPIOAddress(RED_RAN_INIT, dis_pio_virt), {45'h0, 1, red_ran_init});
460 gen_pio_drv.pio_wr(getPIOAddress(RDC_RED_PARA_START + id*RDC_RED_PARA_STEP, dis_pio_virt), {32'h0,TCP_SYN_THR,TCP_SYN_WIN,THR,WIN});
461 printf ("RxDMAChannel.enableWRED red_ran_init 0x%h TCP_SYN_THR 0x%h TCP_SYN_WIN 0x%h THR 0x%h WIN 0x%h\n",
462 red_ran_init,TCP_SYN_THR,TCP_SYN_WIN,THR,WIN);
463}
464
465task RxDMAChannel :: CheckCR_Entries( ) {
466
467 integer no_of_entries;
468 Ccr_update cr_update;
469 bit [63:0] rd_data;
470
471 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_C_START + RXDMA_STEP*id, dis_pio_virt), CR_rcr_tail_l);
472 printf("RxDMAChannel :: CheckCR_Entries rcr_tail_l - %x last_rcr_tail_l - %x \n",CR_rcr_tail_l,CR_last_rcr_tail_l);
473 if(CR_last_rcr_tail_l>CR_rcr_tail_l) { //wrap around case
474 no_of_entries=((CR_rcr_tail_l-CR_rcr_start_addr[31:0])+((CR_rcr_start_addr[31:0]+CR_ring_length)-CR_last_rcr_tail_l))/8;
475 printf("pollCR dma=%0d l>c last_rcr_tail_l=%x rcr_tail_l=%x no_of_entries=%0d\n", id, CR_last_rcr_tail_l, CR_rcr_tail_l, no_of_entries);
476 CR_last_rcr_tail_l = CR_rcr_tail_l;
477 } else if(CR_last_rcr_tail_l<CR_rcr_tail_l) {
478 no_of_entries=(CR_rcr_tail_l - CR_last_rcr_tail_l)/8;
479 printf("pollCR dma=%0d l<c last_rcr_tail_l=%x rcr_tail_l=%x no_of_entries=%0d\n", id, CR_last_rcr_tail_l, CR_rcr_tail_l, no_of_entries);
480 CR_last_rcr_tail_l = CR_rcr_tail_l;
481 } else {
482 no_of_entries=0;
483 }
484
485 if(no_of_entries>0) {
486 cr_update = new();
487 cr_update.dma_no = this.id;
488 cr_update.no_of_entries = no_of_entries;
489 if(get_plus_arg (CHECK, "RX_DROP_PKT_CHECK"))
490 mailbox_put(mbox_id.niu_rxpath_cr, cr_update);
491
492 // ######## updating PTRREAD/PKTREAD fields to RX_DMA_CTL_STAT has to be done in 1 place: checker #######
493 //gen_pio_drv.pio_rd(getPIOAddress(RX_DMA_CTL_STAT_START + RXDMA_STEP*id, dis_pio_virt), rd_data, 1'b0);
494 //rd_data[31:16] = no_of_entries;
495 //gen_pio_drv.pio_wr(getPIOAddress(RX_DMA_CTL_STAT_START + RXDMA_STEP*id, dis_pio_virt), rd_data);
496 }
497
498}
499
500task RxDMAChannel :: SetCurrentPtrs( ) {
501
502 CR_ring_length = completionring.ring_length;
503 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_B_START + RXDMA_STEP*id, dis_pio_virt), CR_last_rcr_tail_h);
504 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_C_START + RXDMA_STEP*id, dis_pio_virt), CR_last_rcr_tail_l);
505 gen_pio_drv.pio_rd(getPIOAddress(RCR_CFIG_A_START + RXDMA_STEP*id, dis_pio_virt), CR_rcr_start_addr);
506 printf("SetCurrentPtrs:dma=%0d cring_len=0x%0h start_addr=0x%0h starting at time=%d ptr - %x \n", id, CR_ring_length, CR_rcr_start_addr, {get_time(HI), get_time(LO)},CR_last_rcr_tail_l);
507}
508
509task RxDMAChannel :: reset_bind_to_group( (integer dummy_g=0)) {
510 if(NiuDMABind.rx_dma_func_bind[id] != -1) {
511 ResetDMAGroupBind(NiuDMABind.rx_dma_func_bind[id]);
512 }
513}
514
515task RxDMAChannel :: bind_to_group( integer g) {
516 bit [63:0] address;
517 bit [63:0] rdata;
518 dis_pio_virt = 0;
519 address = RX_LOG_PAGE_VLD + id*40'h40;
520 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),rdata);
521 rdata[3:2] = g/16;
522 fun_no_has_been_set = 1;
523 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),rdata);
524 printf("<%0d> RxDMAChannel :: bind_to_group : RX_LOG_PAGE_VLD : addr:%h data:%h, g:%0d, id:%0d\n",
525 get_time(LO), address, rdata, g, id);
526 SetDMAGroupBind(g);
527}
528
529task RxDMAChannel::flush_rcr((integer wait_for_done=0) ) {
530 bit[63:0] rdata;
531 integer count;
532 count = 0;
533 gen_pio_drv.pio_wr(getPIOAddress(RCR_FLUSH_START + 12'h200*id, dis_pio_virt), 64'h1);
534 if(wait_for_done) {
535 rdata= 1;
536 while(rdata!=0) {
537 gen_pio_drv.pio_rd(getPIOAddress(RCR_FLUSH_START + 12'h200*id, dis_pio_virt), rdata);
538 repeat(50)@(posedge CLOCK);
539 count++;
540 if(count>1000){
541 printf("RxDMAChannel::flush_rcr DMA- %d ERROR, RCR FLUSH NOT DONE after %d Clocks\n",id,50*count);
542 return;
543 }
544 }
545 printf("RxDMAChannel::flush_rcr DMA - %d Done with flush\n",id);
546 }
547}
548
549task RxDMAChannel::free_memory(CRxToken RxToken) {
550 integer bufsz;
551 bit[63:0] packet_address[3];
552 bit[63:0] norm_address[3];
553 integer blk_size;
554 integer status;
555 integer no_of_expected_packets;
556 integer i,bit_to_ignore;
557
558 CRxBufferPool buf_poollocal;
559
560 bufsz = RxToken.bufsz;
561 blk_size = descpScheduler.blk_size;
562 if(bufsz==3) {
563 no_of_expected_packets = 1;
564 } else if(bufsz==2) {
565 no_of_expected_packets = blk_size/descpScheduler.bufsz2;
566 } else if(bufsz==1) {
567 no_of_expected_packets = blk_size/descpScheduler.bufsz1;
568 } else if(bufsz==0) {
569 no_of_expected_packets = blk_size/descpScheduler.bufsz0;
570 }
571 bit_to_ignore = 0;
572 while(blk_size !=0) {
573 bit_to_ignore++;
574 blk_size = blk_size>>1;
575 }
576 bit_to_ignore--;
577 printf("blk_size - %d bit_to_ignore - %d no_of_expected_packets - %d \n",blk_size,bit_to_ignore,no_of_expected_packets);
578
579 for(i=0;i<RxToken.NoOfScatter;i++) {
580 packet_address[i] = RxToken.packet_start_address[i];
581 // ignore lower bits
582 norm_address[i] = packet_address[i] >> bit_to_ignore;
583 }
584
585 for(i=0;i<RxToken.NoOfScatter;i++) {
586 if(assoc_index(CHECK,buf_pool, norm_address[i])) {
587 // if this exists
588 buf_pool[norm_address[i]].no_of_packets_received++;
589 printf(" destroyed address - %x no_of_packet_sofar - %d \n",norm_address[i], buf_pool[norm_address[i]].no_of_packets_received);
590 buf_pool[norm_address[i]].page_id = getPageId(RxToken.packet_virtaddress[i]);
591 if(buf_pool[norm_address[i]].self_destroy() ) {
592 status = assoc_index(DELETE,buf_pool,norm_address[i]);
593 }
594 } else {
595 buf_poollocal = new();
596 buf_poollocal.address = packet_address[i];
597 buf_poollocal.no_of_expected_packets = no_of_expected_packets;
598 buf_poollocal.no_of_packets_received = 1;
599 buf_poollocal.page_id = getPageId(RxToken.packet_virtaddress[i]);
600 buf_pool[norm_address[i]] = new buf_poollocal;
601 if(buf_pool[norm_address[i]].self_destroy() ) {
602 status = assoc_index(DELETE,buf_pool,norm_address[i]);
603 printf("destroyed address - %x \n",norm_address[i]);
604 }
605 }
606 }
607}
608
609task RxDMAChannel::reclaim_buffers(( integer reclaim =1) ) {
610 integer n;
611 integer max_number_to_claim = 65535; // Make this under test control
612 // gen_pio_drv.pio_rd(getPIOAddress(RBR_HDH_START + RXDMA_STEP*id, dis_pio_virt), get_rbr_head_ptr);
613 if(reclaim) {
614 n = 1;//CHOOSE A VALUE calculate the delta from previous reclaim
615 n = descpScheduler.get_reclaim_index() - descpScheduler.last_reclaim_index;
616 printf(" DMA - %d n - %d last- %d index - %d \n",id,n, descpScheduler.last_reclaim_index, descpScheduler.reclaim_index);
617 if(n>max_number_to_claim) {
618 n= max_number_to_claim;
619 }
620 descpScheduler.last_reclaim_index = descpScheduler.last_reclaim_index + n;
621 desc_ring.reclaim_buffers(n);
622 }
623
624
625}
626
627task RxDMAChannel::updateHeaderLength(CRxToken RxToken) {
628
629 // Add code here to update the header length and buffer_offset
630 RxToken.header_length = ctrl_hdr_len + buffer_offset;
631
632}
633
634task RxDMAChannel::CheckCRData( integer no_of_entries , (integer update_ptrs = 0) ) {
635// if update_ptrs == 1
636// set RX_DMA_CTL_STAT accordingly
637bit [63:0] rcr_start_addr, rcr_tail_l, rcr_tail_h;
638integer time_cntr = 0;
639
640
641 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_B_START + RXDMA_STEP*id, dis_pio_virt), rcr_tail_h);
642 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_C_START + RXDMA_STEP*id, dis_pio_virt), rcr_tail_l);
643 gen_pio_drv.pio_rd(getPIOAddress(RCR_CFIG_A_START + RXDMA_STEP*id, dis_pio_virt), rcr_start_addr);
644
645 printf ("RCR_Tail = %h, RCR_Start_Addr = %h, Expected_Tail_Update = %h\n",
646 {rcr_tail_h[11:0],rcr_tail_l[31:3],3'b000},
647 {rcr_start_addr[43:19], rcr_start_addr[18:6], 6'b0}, 8*no_of_entries);
648
649 while (({rcr_tail_h[11:0],rcr_tail_l[31:3],3'b000}-{rcr_start_addr[43:6],6'b0}) < (8*no_of_entries)) {
650 repeat (100) @(posedge CLOCK);
651 printf ("RCR_Tail = %h, RCR_Start_Addr = %h, Expected_Tail_Update = %h\n",
652 {rcr_tail_h[11:0],rcr_tail_l[31:3],3'b000},
653 {rcr_start_addr[43:19], rcr_start_addr[18:6], 6'b0}, 8*no_of_entries);
654 if (time_cntr++ > 2000) {
655 printf ("ERROR: completion entries not written, timing out\n");
656 exit(0);
657 }
658 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_B_START + RXDMA_STEP*id, dis_pio_virt), rcr_tail_h);
659 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_C_START + RXDMA_STEP*id, dis_pio_virt), rcr_tail_l);
660 gen_pio_drv.pio_rd(getPIOAddress(RCR_CFIG_A_START + RXDMA_STEP*id, dis_pio_virt), rcr_start_addr);
661 }
662
663 repeat (50) @(posedge CLOCK);
664
665 completionring.CheckSysMem(no_of_entries);
666
667}
668
669function integer RxDMAChannel::getPageId(bit [43:0] virt_address) {
670 if( (( virt_address[43:12]&page_mask0) == page_value0) & page0_valid)
671 getPageId = page0_id;
672 else if( ((virt_address[43:12]&page_mask1) == page_value1) & page1_valid)
673 getPageId = page1_id;
674 else
675 getPageId = -1;
676 printf(" virt_address - %x page_mask0 = %x page_value0 - %x valid - %d getPageId - %d \n",virt_address,page_mask0,page_value0,page0_valid,getPageId);
677}
678///////////////////////////////////////////////////////////
679
680task RxDMAChannel::pollCRPtr( (integer poll_interval=1000) ) {
681 bit[63:0] rcr_start_addr, rcr_tail_l, rcr_tail_h;
682 bit[63:0] last_rcr_tail_l, last_rcr_tail_h;
683 integer end_reached=0;
684 integer no_of_entries, ring_length;
685 Ccr_update cr_update;
686 bit[63:0] rd_data;
687 integer not_done=1;
688 integer NO_OF_ITER = 2;
689
690 ring_length = completionring.ring_length;
691 //gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_B_START + RXDMA_STEP*id, dis_pio_virt), last_rcr_tail_h);
692 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_C_START + RXDMA_STEP*id, dis_pio_virt), last_rcr_tail_l);
693 gen_pio_drv.pio_rd(getPIOAddress(RCR_CFIG_A_START + RXDMA_STEP*id, dis_pio_virt), rcr_start_addr);
694 printf("RxDMAChannel::pollCRPtr dma=%0d cring_len=0x%0h start_addr=0x%0h starting at time=%d\n", id, ring_length, rcr_start_addr, {get_time(HI), get_time(LO)});
695
696 poll_cr_active = 1;
697 while(not_done) {
698 if(RX_TEST_REACHED_END){
699 end_reached++ ;
700 printf("%d RxDMAChannel::pollCRPtr dma_id=%d REACHED END count - %d \n", {get_time(HI), get_time(LO)}, id,end_reached);
701 }
702 //gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_B_START + RXDMA_STEP*id, dis_pio_virt), rcr_tail_h);
703 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_C_START + RXDMA_STEP*id, dis_pio_virt), rcr_tail_l);
704
705 if(last_rcr_tail_l>rcr_tail_l) { //wrap around case
706 no_of_entries=((rcr_tail_l-rcr_start_addr[31:0])+((rcr_start_addr[31:0]+ring_length)-last_rcr_tail_l))/8;
707 //printf("pollCR dma=%0d l>c rcr-start=0x%0h rcr+ring_len-last=0x%0h\n", id, (rcr_tail_l-rcr_start_addr[31:0]), ((rcr_start_addr[31:0 ]+ring_length)-last_rcr_tail_l));
708 printf("pollCR dma=%0d l>c last_rcr_tail_l=%x rcr_tail_l=%x no_of_entries=%0d\n", id, last_rcr_tail_l, rcr_tail_l, no_of_entries);
709 //last_rcr_tail_h = rcr_tail_h;
710 last_rcr_tail_l = rcr_tail_l;
711 }
712 else if(last_rcr_tail_l<rcr_tail_l) {
713 no_of_entries=(rcr_tail_l-last_rcr_tail_l)/8;
714 printf("pollCR dma=%0d l<c last_rcr_tail_l=%x rcr_tail_l=%x no_of_entries=%0d\n", id, last_rcr_tail_l, rcr_tail_l, no_of_entries);
715 //last_rcr_tail_h = rcr_tail_h;
716 last_rcr_tail_l = rcr_tail_l;
717 }
718 else {
719 no_of_entries=0;
720 //printf("pollCR dma=%0d l=c last_rcr_tail_l=%x rcr_tail_l=%x\n", id, last_rcr_tail_l, rcr_tail_l);
721 }
722
723 if(no_of_entries>0) {
724 printf(" Sending mbox to checker DMA - %d no_of_entries - %d \n",id,no_of_entries);
725 cr_update = new();
726 cr_update.dma_no = id;
727 cr_update.no_of_entries = no_of_entries;
728 if(get_plus_arg (CHECK, "RX_DROP_PKT_CHECK"))
729 mailbox_put(mbox_id.niu_rxpath_cr, cr_update);
730
731 printf(" Done Sending mbox to checker DMA - %d no_of_entries - %d \n",id,no_of_entries);
732
733 // ######## updating PTRREAD/PKTREAD fields to RX_DMA_CTL_STAT has to be done in 1 place: checker #######
734 //gen_pio_drv.pio_rd(getPIOAddress(RX_DMA_CTL_STAT_START + RXDMA_STEP*id, dis_pio_virt), rd_data, 1'b0);
735 //rd_data[31:16] = no_of_entries;
736 //gen_pio_drv.pio_wr(getPIOAddress(RX_DMA_CTL_STAT_START + RXDMA_STEP*id, dis_pio_virt), rd_data);
737 }
738 if(end_reached>=NO_OF_ITER) {
739 not_done =0;
740 }
741
742 repeat(poll_interval) @(posedge CLOCK);
743 }
744
745 poll_cr_done= 1;
746}
747task RxDMAChannel::UpdateRCRStat() {
748
749 bit[63:0] rd_data;
750 rxdma_ctl_stat_reg ctlstat_reg;
751 Crcr_update rcr_update;
752 integer no_of_entries;
753 integer total_pkts, total_rcr_entries;
754 integer i;
755
756 while(1) {
757 no_of_entries = mailbox_get(WAIT,mbox_id.niu_rxpath_rcr_update[id], rcr_update);
758 ctlstat_reg = new();
759
760 total_pkts = rcr_update.pkts_checked;
761 total_rcr_entries = 0;
762 for(i=0;i<rcr_update.pkts_checked;i++) {
763 total_rcr_entries += rcr_update.scatters[i];
764 }
765
766 ctlstat_reg.update_PKTREAD = total_pkts;
767 ctlstat_reg.update_PTRREAD = total_rcr_entries;
768 rxdma_ctl_stat_update(2, ctlstat_reg);
769
770 printf("UpdateRCRStat rxdma=%0d updated RX_DMA_CTL_STAT with PTRREAD=0x%x PKTREAD=0x%x at %0d\n",
771 id, total_rcr_entries, total_pkts, get_time(LO));
772
773/*
774 for(i=0;i<rcr_update.pkts_checked;i++) {
775 ctlstat_reg.update_PKTREAD = 1;
776 ctlstat_reg.update_PTRREAD = rcr_update.scatters[i];
777 rxdma_ctl_stat_update(2, ctlstat_reg);
778 }
779
780 //update CR read pointer with num of entries read
781 gen_pio_drv.pio_rd(getPIOAddress(RX_DMA_CTL_STAT_START + RXDMA_STEP*id, dis_pio_virt), rd_data, 1'b0);
782 rd_data[RX_DMA_CTL_STAT_PKTREAD] = 1; // Choose these appropriately this cannot be more than rcr_update.pkts_checked
783 rd_data[RX_DMA_CTL_STAT_PTRREAD] = rcr_update.scatters[i];
784 gen_pio_drv.pio_wr(getPIOAddress(RX_DMA_CTL_STAT_START + RXDMA_STEP*id, dis_pio_virt), rd_data);
785 printf("UpdateRCRStat dma=%0d updated rx_dma_ctl_stat[31:0] with rcr_update 0x%h at %0d\n",
786 id, {rd_data[RX_DMA_CTL_STAT_PTRREAD],rd_data[RX_DMA_CTL_STAT_PKTREAD]}, get_time(LO));
787*/
788
789 }
790}
791///////////////////////////////////////////////////////////
792task RxDMAChannel::UpdateCompletionRing(CRxToken RxToken) {
793 // Parse through the info in the Token and format the completion ring content
794 completionring.UpdateCompletionRing(RxToken);
795}
796
797function integer RxDMAChannel::getPacketAddress(CRxToken RxToken) {
798 // This is where the scheduler kicks in --
799 // Need to get the exact address from the scheduler
800 integer status;
801 integer i;
802
803 printf ("[%0d] curr_ring_size = %0d, max_size = %0d\n", get_time(LO),
804 descpScheduler.desc_ring.desc_ring.size(), desc_ring.ring_size);
805// TO BE CHANGED - should be baed upon reading head pointer before adding more descriptors-
806 if (descpScheduler.desc_ring.desc_ring.size() > desc_ring.ring_size) {
807 RxToken.pkt_type = RNGFULL_DROP_RxPKT;
808 status = -1;
809 } else {
810 // Update Header length-
811 updateHeaderLength(RxToken);
812 status = descpScheduler.getAddress(RxToken);
813 printf(" RDMC DEBUG - SCHEDULER RETURN ADDRESS - %x PktType Set to %d \n",RxToken.packet_start_address[0],RxToken.pkt_type);
814
815 // update completion ring here
816 if(RxToken.pkt_type == GOOD_RxPKT)
817 UpdateCompletionRing( RxToken );
818
819 // The following are the various status-
820
821 // No More - descriptor available - ie errors
822 // successful and address get set in the Token
823 }
824 getPacketAddress = status;
825}
826
827function integer RxDMAChannel::CheckDMAStatus(CRxToken RxToken) {
828
829// A generic function which is supposed to check the DMA's status and return
830// the status of DMA. The status being -
831
832// If the DMA is inactive -
833// Is the Ring Full/Empty?
834// Is Completion Ring Full
835// Any Errors?
836
837 // For now lets return valid if the DMA is active and has been kicked
838
839 // Check if Completion ring ack is still pending in the host, if so
840 // check for CR shadow getting full, based upon that drop the packet
841 integer cr_status;
842
843 if(active) {
844 // cr_status = completionring.CheckPendingAckStatus();
845 // if(cr_status)
846 // else {
847 // CheckDMAStatus = 0;
848 // RxToken.pkt_type = CRCACHE_FULL_DROP_RxPKT;
849 // }
850 CheckDMAStatus = 1;
851 } else {
852 CheckDMAStatus = 0;
853 RxToken.pkt_type = RNGFULL_DROP_RxPKT;
854 }
855}
856
857task RxDMAChannel::resetRxDma() {
858 bit [39:0] address;
859 bit [63:0] data;
860
861 address = RXDMA_CFIG1 + id*40'h200;
862 data = 64'h0;
863 data[30] = 1; // reset bit
864 //gen_pio_drv.pio_wr(address,data);
865 //RXDMA_CFIG1_pio_wr(address,data);
866 pio_wr_RXDMA_CFIG1(data);
867 printf ("RxDMAChannel::resetRxDma() Time %0d, DMA - %0d was just reset\n", {get_time(HI),get_time(LO)}, id);
868}
869
870task RxDMAChannel::setRxDmaCfig_1(bit [63:0] data, (bit read_back = 1'b0), (bit read_only = 1'b0)){
871 bit [39:0] address;
872 bit [63:0] rd_data;
873 bit [63:0] data_tmp0;
874 bit [63:0] Rdata0;
875 bit rst_done = 1;
876 bit [63:0] memArray_addr;
877 bit [7:0] be = 8'hff;
878
879 if(~read_only) {
880 active = data[31];
881 address = RXDMA_CFIG1 + id*40'h200;
882 //gen_pio_drv.pio_wr(address,data);
883 //RXDMA_CFIG1_pio_wr(address, data);
884 pio_wr_RXDMA_CFIG1(data);
885
886 if(read_back) {
887 repeat(10) @(posedge CLOCK);
888 //gen_pio_drv.pio_rd(address,rd_data);
889 //RXDMA_CFIG1_pio_rd(address,rd_data);
890 pio_wr_RXDMA_CFIG1(data);
891 }
892 } else {
893 while(rst_done) {
894 repeat(100) @(posedge CLOCK);
895 address = RXDMA_CFIG1 + id*40'h200;
896 //gen_pio_drv.pio_rd(address,rd_data);
897 //RXDMA_CFIG1_pio_rd(address,rd_data);
898 pio_wr_RXDMA_CFIG1(rd_data);
899 rst_done = rd_data[30];
900 }
901 }
902
903#ifdef N2_FC
904 Fc_Niu_Ev2a_setRxDmaCfig_1(data);
905#endif
906}
907
908task RxDMAChannel::SetPage0Registers(bit [31:0] mask,bit [31:0] value, bit [31:0] reloc) {
909 bit [39:0] address;
910 bit [63:0] memArray_addr;
911 bit [63:0] Rdata0;
912 bit [63:0] Rdata1;
913 bit [63:0] Rdata2;
914 bit [63:0] data_tmp0;
915 bit [63:0] data_tmp1;
916 bit [63:0] data_tmp2;
917 bit [7:0] be = 8'hff;
918
919 page_mask0=mask; page_value0=value; page_reloc0=reloc;
920 address = RX_LOG_MASK1_START + id*8'h40;
921 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),{32'h0,mask});
922
923 address = RX_LOG_VAL1_START + id*8'h40;
924 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),{32'h0,value});
925
926 address = RX_LOG_RELO1_START + id*8'h40;
927 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),{32'h0,reloc});
928
929#ifdef N2_FC
930 Fc_Niu_Ev2a_SetPage0Registers (mask, value, reloc);
931#endif
932
933}
934
935task RxDMAChannel::SetPage1Registers(bit [31:0] mask,bit [31:0] value, bit [31:0] reloc) {
936 bit [39:0] address;
937 bit [63:0] memArray_addr;
938 bit [7:0] be = 8'hff;
939 bit [63:0] Rdata0;
940 bit [63:0] Rdata1;
941 bit [63:0] Rdata2;
942 bit [63:0] data_tmp0;
943 bit [63:0] data_tmp1;
944 bit [63:0] data_tmp2;
945
946 page_mask1=mask; page_value1=value; page_reloc1=reloc;
947 address = RX_LOG_MASK2_START + id*8'h40;
948 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),{32'h0,mask});
949
950 address = RX_LOG_VAL2_START + id*8'h40;
951 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),{32'h0,value});
952
953 address = RX_LOG_RELO2_START + id*8'h40;
954 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),{32'h0,reloc});
955
956#ifdef N2_FC
957Fc_Niu_Ev2a_SetPage1Registers (mask, value, reloc);
958#endif
959
960}
961
962
963// Should be used from common class between Tx and Rx
964task RxDMAChannel::setRxLogPgVld(bit [63:0] data,(bit read_back = 1'b0)) {
965
966
967 bit [39:0] address;
968 bit [63:0] rd_data;
969 bit [63:0] Rdata0;
970 bit [63:0] data_tmp0;
971 bit [63:0] memArray_addr;
972 bit [7:0] be = 8'hff;
973
974 address = RX_LOG_PAGE_VLD + id*40'h40;
975 printf("Log Page Address is %x \n", address);
976
977 if(fun_no_has_been_set) {
978 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),rd_data);
979 data[3:2] = rd_data[3:2]; // retain the original function number.
980 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
981 } else {
982 SetDefFunc(data[3:2]);
983 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
984 }
985
986#ifdef N2_FC
987 Fc_Niu_Ev2a_setRxLogPgVld (data);
988#endif
989
990
991
992 if(read_back) {
993 repeat(10) @(posedge CLOCK);
994 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),rd_data);
995 }
996}
997
998
999task RxDMAChannel::setRbrConfig_A(bit [63:0] data, (bit read_back = 1'b0),(integer ring_page_id = 0) ){
1000
1001 bit [39:0] address;
1002 integer status;
1003 bit [63:0] config_rbr_data1;
1004 bit [63:0] rd_data;
1005 bit [63:0] Rdata0;
1006 bit [63:0] data_tmp0;
1007 bit [5:0] rand_num;
1008 bit [63:0] memArray_addr;
1009 bit [7:0] be = 8'hff;
1010
1011
1012 // RBR_CFIG_A address is a function of dma channel
1013
1014 rand_num = random()%64;
1015 ring_size = data[63:48];
1016 address = RBR_CFIG_A + id*40'h200;
1017 //config_rbr_data1 = {data[63:48],4'hf,ring_start_addr[43:6],rand_num};
1018 config_rbr_data1 = data[63:0];
1019 ring_start_addr = data[43:0];
1020 printf("RNG_STADDR %0h\n",ring_start_addr);
1021 desc_ring.initRing(ring_start_addr,ring_size,xlate_on, ring_page_id);
1022 desc_ring.xlate_on = xlate_on;
1023 // ncu_driver.write_data(address,data);
1024 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),config_rbr_data1);
1025
1026#ifdef N2_FC
1027 Fc_Niu_Ev2a_setRbrConfig_A (config_rbr_data1);
1028#endif
1029
1030 if(read_back) {
1031 repeat(10) @(posedge CLOCK);
1032 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),rd_data);
1033 }
1034
1035}
1036
1037task RxDMAChannel::setZcpRdc(bit [63:0] data, (bit read_back = 1'b0)){
1038
1039
1040 bit [39:0] address;
1041 bit [63:0] rd_data;
1042 bit [4:0] dma_chnl;
1043 dma_chnl = data[4:0];
1044
1045 address = ZCP_RDC_TBL;
1046 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
1047
1048 if(read_back) {
1049 repeat(10) @(posedge CLOCK);
1050 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),rd_data);
1051 }
1052
1053
1054}
1055task RxDMAChannel::setRbrConfig_B(bit [63:0] data, (bit read_back = 1'b0)){
1056
1057
1058 bit [39:0] address;
1059 bit[1:0] blk_size; //00=4k,01=8K,10=16K,11=32K
1060 bit[1:0] buf_siz0;
1061 bit[1:0] buf_siz1;
1062 bit[1:0] buf_siz2;
1063 bit [63:0] config_rbr_data2;
1064 bit [63:0] rd_data;
1065 bit [63:0] Rdata0;
1066 bit [63:0] data_tmp0;
1067 bit valid0,valid1,valid2;
1068 bit [63:0] memArray_addr;
1069 bit [7:0] be = 8'hff;
1070
1071 //config_data2 = {38'h00_0000_0000,blk_size,valid2,5'b0_0000,buf_siz2,valid1,5'b0_0000,buf_size1,valid0,5'b0_0000,buf_size0};
1072
1073 valid0 = data[7];
1074 valid1 = data[15];
1075 valid2 = data[23];
1076
1077 descpScheduler.set_blk_size(data[25:24]);
1078 case(data[25:24]) {
1079 0: dma_block_size = 4096;
1080 1: dma_block_size = 8192;
1081 2: dma_block_size = 16384;
1082 3: dma_block_size = 32768;
1083 default: { printf("WARNING: Not a valid block size for the RxDMA\n"); }
1084 }
1085 descpScheduler.set_bufsz0(data[1:0],valid0);
1086 descpScheduler.set_bufsz1(data[9:8],valid1);
1087 descpScheduler.set_bufsz2(data[17:16],valid2);
1088
1089 descpScheduler.print();
1090
1091 address = RBR_CFIG_B + id*40'h200;
1092 // Add this to descriptor carving class
1093
1094
1095
1096 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
1097
1098 if(read_back) {
1099 repeat(10) @(posedge CLOCK);
1100 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),rd_data);
1101 }
1102
1103#ifdef N2_FC
1104 Fc_Niu_Ev2a_setRbrConfig_B (data);
1105#endif
1106
1107}
1108task RxDMAChannel::readRxRingHead(var bit [63:0] data){
1109 bit [39:0] address;
1110 integer status;
1111
1112 // This should be called once at the config time and then hardware updates it.
1113 address = RBR_HDH + id*40'h200;
1114 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),data);
1115}
1116
1117task RxDMAChannel::setRxRingKick(bit[63:0] data){
1118 bit[39:0] address;
1119 bit[15:0] no_of_desc;
1120 bit [63:0] memArray_addr;
1121 bit [63:0] Rdata0;
1122 bit [63:0] data_tmp0;
1123 bit [7:0] be = 8'hff;
1124
1125 no_of_desc = data[15:0];
1126 printf("No of descriptors kicked is %0h\n", data);
1127 address = RBR_KICK + id*40'h200;
1128 set_descriptor(no_of_desc);
1129 curr_rbr_desc_kicked_cnt += data;
1130 curr_rbr_desc_kicked_cnt = curr_rbr_desc_kicked_cnt % (desc_ring.ring_size + 1);
1131
1132 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
1133
1134#ifdef N2_FC
1135Fc_Niu_Ev2a_setRxRingKick (data);
1136#endif
1137}
1138
1139
1140task RxDMAChannel::setRcrConfig_A(bit[63:0] data, (bit read_back = 1'b0), (integer ring_page_id = 0)) {
1141 bit[39:0] address;
1142 bit[63:0] rd_data;
1143 bit[63:0] Rdata0;
1144 bit[63:0] data_tmp0;
1145 bit [63:0] memArray_addr;
1146 bit [7:0] be = 8'hff;
1147
1148 // Add this to RCR class
1149
1150 address = RCR_CFIG_A + id*40'h200;
1151 completionring.config_ring(data[43:0],data[63:48],ring_page_id);
1152
1153 gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
1154
1155 if(read_back) {
1156 repeat(10) @(posedge CLOCK);
1157 gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),rd_data);
1158 }
1159#ifdef N2_FC
1160 Fc_Niu_Ev2a_setRcrConfig_A (data);
1161#endif
1162}
1163
1164
1165// add transmit channel control and status reg
1166task RxDMAChannel::create_descriptor(var CRxdescriptor desc, bit[31:0] address, (integer pkt_page_id = 0) ){
1167 desc = new(0,pkt_page_id);
1168 desc.blk_addr = address;
1169 desc.valid = 1;
1170 desc.pkt_page_id = pkt_page_id;
1171}
1172
1173
1174task RxDMAChannel::set_descriptor(integer no_of_desc) {
1175 integer i;
1176 bit[43:0] address;
1177 CRxdescriptor desc;
1178
1179 for(i=0; i<no_of_desc; i++) {
1180 if(pkts_in_alternate_pages)
1181 PktBuffers_page_id = (i%2) ? page0_id : page1_id;
1182 else
1183 PktBuffers_page_id = PktBuffers_page ? page1_id : page0_id;
1184 address = SparseMem.get_address( (dma_block_size/SparseMem.get_block_size()),PktBuffers_page_id,dma_block_size);
1185
1186 if(descr_addr_error_pkt_num!=0) {
1187 if(i==descr_addr_error_pkt_num-1) { //only corrupt descr specified
1188 printf("InitRXDMA descr_addr_error_pkt_num=%0d\n", descr_addr_error_pkt_num);
1189 address = SparseMem.get_address( (dma_block_size/SparseMem.get_block_size()), PktBuffers_page_id+3, dma_block_size);
1190 }
1191 }
1192
1193 create_descriptor(desc,address[43:12],PktBuffers_page_id);
1194 desc_ring.add_descriptor(desc);
1195 descpScheduler.pushDescForSch(address[43:12],PktBuffers_page_id);
1196 }
1197}
1198
1199
1200task RxDMAChannel::InitDMAChan(integer dma_chnl, integer desc_ring_length, integer compl_ring_len, bit [63:0] rbr_config_B_data, bit [15:0] initial_kick, integer xlation) {
1201
1202bit [19:0] handle;
1203bit [15:0] RBR_LEN;
1204bit [15:0] compl_ring_length;
1205bit [39:0] ring_start_address, rcr_start_addr, mailbox_address;
1206integer status0;
1207integer status1;
1208bit [31:0] mask0, value0, reloc0;
1209bit [31:0] mask1, value1, reloc1;
1210bit full_hdr;
1211integer page_id, ret ;
1212integer byte_alignment;
1213integer blk_size, buf_siz0, buf_siz1, buf_siz2, vld0, vld1, vld2;
1214integer desired_blocks;
1215integer mailbox_size;
1216integer no_of_pages;
1217integer pkt_configurator;
1218integer sp_bsize;
1219bit rbr_addr_overflow;
1220bit [63:0] cfig2_wr_data=0;
1221bit [63:0] memArray_addr;
1222bit [7:0] be = 8'hff;
1223bit [63:0] Rdata0;
1224bit [63:0] Rdata1;
1225bit [63:0] data_tmp0;
1226bit [63:0] data_tmp1;
1227bit [1:0] func_num;
1228
1229sp_bsize = SparseMem.get_block_size();
1230
1231if (get_plus_arg (CHECK,"PKT_CONFIGURATOR"))
1232 pkt_configurator = 1;
1233else
1234 pkt_configurator = 0;
1235
1236if (!pkt_configurator) {
1237// Set the dma number in the ZCP RDC Table
1238 setZcpRdc({32'h0000_0000,dma_chnl});
1239}
1240
1241// program the control header length in the hardware/shadow, bit[0] in RXDMA_CFIG2 reg
1242printf ("buffer_offset=%0d, ctrl_hdr_len=%0d\n", buffer_offset,ctrl_hdr_len);
1243if (ctrl_hdr_len==18)
1244 cfig2_wr_data[0] = 1;
1245if (buffer_offset==64)
1246 cfig2_wr_data[1] = 1;
1247if (buffer_offset==128)
1248 cfig2_wr_data[2] = 1;
1249if (cfig2_wr_data[2:0] && (cfig2_wr_data[2:1] != 2'b11)) {
1250 //gen_pio_drv.pio_wr(RXDMA_CFIG2_START + RXDMA_STEP*dma_chnl, cfig2_wr_data);
1251 gen_pio_drv.pio_wr(getPIOAddress(RXDMA_CFIG2_START + RXDMA_STEP*dma_chnl, dis_pio_virt), cfig2_wr_data);
1252 printf ("InitRXDMA buffer offset encoding = 2'b%b, full_hdr_len = %b for dma - %0d\n", \
1253 cfig2_wr_data[2:1], cfig2_wr_data[0], dma_chnl);
1254}
1255
1256 //printf("InitRXDMA calling setRbrConfig_B rbr_config_B_data=0x%0h dma=%0d\n", rbr_config_B_data, dma_chnl);
1257 setRbrConfig_B(rbr_config_B_data,1'b0);
1258
1259#ifdef N2_FC
1260#else
1261 xlate_on = xlation;
1262#endif
1263
1264// RBR Setup. Using SparseMem model for unique start_addresses allocation across RDMA channels
1265 handle = SparseMem.get_page_handle();
1266 printf("Value of the Page handle is %h\n", handle);
1267
1268 // get mask,value and reloc for page0
1269 compl_ring_length = compl_ring_len;
1270 RBR_LEN = desc_ring_length;
1271 mailbox_size = 64;
1272 no_of_pages = (4*RBR_LEN + 8*compl_ring_length + RBR_LEN*dma_block_size + mailbox_size)/sp_bsize;
1273 //no_of_pages = (4*RBR_LEN + 8*compl_ring_length + RBR_LEN*sp_bsize + mailbox_size)/sp_bsize;
1274 if (no_of_pages < 1) no_of_pages = 1; else no_of_pages = no_of_pages + 1;
1275
1276 // to account for prefetch which is 4 wide 8 deep, we need to allocate 32 more than desired
1277 no_of_pages += 32;
1278
1279#ifdef N2_FC
1280 xlate_on = xlation;
1281 if(xlate_on){
1282 status0 = SparseMem.get_page_mask(no_of_pages,0,page0_id,mask0,value0,reloc0);
1283 } else {
1284 mask0 = 0;
1285 value0 = 0;
1286 reloc0 = 0;
1287 status0 = 1;
1288 }
1289#else
1290 status0 = SparseMem.get_page_mask(no_of_pages,0,page0_id,mask0,value0,reloc0);
1291 if (!xlate_on) {
1292 ret = SparseMem.force_page_contexts(page0_id, 32'h0, 32'h0, 32'h0);
1293 mask0 = 0;
1294 value0 = 0;
1295 reloc0 = 0;
1296 status0 = 1;
1297 }
1298#endif
1299
1300 if(status0 == -1) {
1301 return;
1302 printf("TB_ERROR: SparseMem.get_page_mask() function call was not Successful\n");
1303 } else {
1304 // add the task set page0 registers
1305 if(xlate_on) {
1306 printf("InitRXDMA: RxAddressTranslation Mask0 %h Page_id %4d for DMA-%0d\n",mask0,page0_id,dma_chnl);
1307 printf("InitRXDMA: RxAddressTranslation Value0 %h Page_id %4d for DMA-%0d\n",value0,page0_id,dma_chnl);
1308 printf("InitRXDMA: RxAddressTranslation Reloc0 %h Page_id %4d for DMA-%0d\n",reloc0,page0_id,dma_chnl);
1309 }
1310 // call task setpage0 registers
1311 if (xlate_on)
1312 SetPage0Registers( mask0,value0,reloc0);
1313 }
1314
1315 // get mask,value and reloc for page1
1316 compl_ring_length = compl_ring_len;
1317 RBR_LEN = desc_ring_length;
1318 mailbox_size = 64;
1319 no_of_pages = (4*RBR_LEN + 8*compl_ring_length + RBR_LEN*dma_block_size + mailbox_size)/sp_bsize;
1320 //no_of_pages = (4*RBR_LEN + 8*compl_ring_length + RBR_LEN*sp_bsize + mailbox_size)/sp_bsize;
1321 if (no_of_pages < 1) no_of_pages = 1; else no_of_pages = no_of_pages + 1;
1322
1323 // to account for prefetch which is 4 wide 8 deep, we need to allocate 32 more than desired
1324 no_of_pages += 32;
1325
1326#ifdef N2_FC
1327 if(xlate_on){
1328 status1 = SparseMem.get_page_mask(no_of_pages,0,page1_id,mask1,value1,reloc1);
1329 } else {
1330 mask1 = 0;
1331 value1 = 0;
1332 reloc1 = 0;
1333 status1 = 1;
1334 }
1335#else
1336 status1 = SparseMem.get_page_mask(no_of_pages,0,page1_id,mask1,value1,reloc1);
1337 if (!xlate_on) {
1338 ret = SparseMem.force_page_contexts(page1_id, 32'h0, 32'h0, 32'h0);
1339 mask1 = 0;
1340 value1 = 0;
1341 reloc1 = 0;
1342 status1 = 1;
1343 }
1344#endif
1345
1346 if(status1 == -1) {
1347 return;
1348 printf("TB_ERROR: SparseMem.get_page_mask() function call was not Successful\n");
1349 } else {
1350 // add the task set page1 registers
1351 if(xlate_on) {
1352 printf("InitRXDMA: RxAddressTranslation Mask1 %h Page_id %4d for DMA-%0d\n",mask1,page1_id,dma_chnl);
1353 printf("InitRXDMA: RxAddressTranslation Value1 %h Page_id %4d for DMA-%0d\n",value1,page1_id,dma_chnl);
1354 printf("InitRXDMA: RxAddressTranslation Reloc1 %h Page_id %4d for DMA-%0d\n",reloc1,page1_id,dma_chnl);
1355 }
1356 // call task setpage1 registers
1357 if (xlate_on)
1358 SetPage1Registers( mask1,value1,reloc1);
1359 }
1360
1361 // set the id
1362 id = dma_chnl;
1363
1364
1365
1366 // Enable the logical pages
1367 func_num = function_no;
1368 setRxLogPgVld({60'h0, func_num, page1_valid, page0_valid},1'b0);
1369 printf("function number assigned to rxdma%0d is %0d\n", id, func_num);
1370
1371 // page_id = (random()%2) ? page0_id : page1_id;
1372
1373
1374 if (random_page_alloc) {
1375 RBR_page_id = (random()%2) ? page0_id : page1_id;
1376 RCR_page_id = (random()%2) ? page0_id : page1_id;
1377 MailBox_page_id = (random()%2) ? page0_id : page1_id;
1378 PktBuffers_page_id = (random()%2) ? page0_id : page1_id;
1379 }
1380 else {
1381 RBR_page_id = RBR_page ? page1_id : page0_id;
1382 RCR_page_id = RCR_page ? page1_id : page0_id;
1383 MailBox_page_id = MailBox_page ? page1_id : page0_id;
1384 PktBuffers_page_id = PktBuffers_page ? page1_id : page0_id;
1385 }
1386 printf ("InitRXDMA(): random_page_alloc=%b, RBR_page=%0d, RCR_page=%0d, MailBox_page=%0d, PktBuffers_page=%0d\n",
1387 random_page_alloc,RBR_page,RCR_page,MailBox_page,PktBuffers_page);
1388 page_id = page0_id;
1389 byte_alignment = 64;
1390
1391 RBR_LEN = desc_ring_length;
1392 printf ("SparseMem.get_block_size() = %0d\n", SparseMem.get_block_size());
1393 if ((4*RBR_LEN/SparseMem.get_block_size()) < 1)
1394 desired_blocks = 1;
1395 else
1396 desired_blocks = 4*RBR_LEN/SparseMem.get_block_size() + 1;
1397
1398 //printf ("desired_blocks asking SparseMem.get_page_mask() for ring_start_address = %0d\n", desired_blocks);
1399 //To avoid rbr_addr_overflow,
1400 ring_start_address = SparseMem.get_address(desired_blocks,RBR_page_id,RBR_PAGE_ALIGNMENT);
1401
1402 if(dring_addr_error==1) {
1403 printf("InitRXDMA dring_addr_error=1 'before corrupt' Dring_start_addr=0x%0h\n", ring_start_address);
1404 ring_start_address=SparseMem.get_address(desired_blocks, RBR_page_id+3, RBR_PAGE_ALIGNMENT);
1405 }
1406 printf("InitRXDMA Dring_start_addr=0x%0h\n", ring_start_address);
1407
1408
1409 rbr_addr_overflow = (ring_start_address[17:2]+RBR_LEN) > RBR_MAX_RING_LEN;
1410
1411 while (rbr_addr_overflow) {
1412 printf ("SparseMem allocated a non-64KB-aligned addr. start_addr+RBR_LEN=%0d, MAX_RBR_PAGE_LEN=65536\n");
1413 ring_start_address = SparseMem.get_address(desired_blocks,RBR_page_id,64);
1414 rbr_addr_overflow = (ring_start_address[17:2]+RBR_LEN) > RBR_MAX_RING_LEN;
1415 }
1416
1417
1418 if(ring_start_address === 40'hzz_zzzz_zzzz) {
1419 printf("TESTBENCH ERROR. SparseMem.get_address() returned an unknown value.\n");
1420 return;
1421 }
1422 else {
1423 printf("Start Address of the RBR for dma[%0d] is %h\n", id, ring_start_address);
1424 }
1425
1426 // Now that we have start_addr, program the RBR_CONFIG_A register with LEN and STADDR
1427 RBR_LEN = desc_ring_length;
1428 rbr_ring_len = desc_ring_length; // global variable for other functions' use
1429 setRbrConfig_A({RBR_LEN, 8'h00, ring_start_address}, 1'b0,RBR_page_id);
1430 //moved above to get dma_block_size setRbrConfig_B(rbr_config_B_data,1'b0);
1431
1432 if ((8*compl_ring_length/SparseMem.get_block_size()) < 1)
1433 desired_blocks = 1;
1434 else
1435 desired_blocks = 8*compl_ring_length/SparseMem.get_block_size() + 1;
1436
1437 rcr_start_addr = SparseMem.get_address(desired_blocks,RCR_page_id,RCR_PAGE_ALIGNMENT);
1438
1439 if(cring_addr_error==1) {
1440 printf("InitRXDMA cring_addr_error=1 'before corrupt' Cring_start_addr=0x%0h\n", rcr_start_addr);
1441 rcr_start_addr=SparseMem.get_address(desired_blocks, RCR_page_id+3, RCR_PAGE_ALIGNMENT);
1442 }
1443 printf("InitRXDMA Cring_start_addr=0x%0h\n", rcr_start_addr);
1444
1445
1446 if(rcr_start_addr === 40'hzz_zzzz_zzzz) {
1447 printf("TESTBENCH ERROR. SparseMem.get_address() returned an unknown value.\n");
1448 return;
1449 }
1450 else {
1451 printf("Start Address of the RCR for dma[%0d] is %h\n", id, rcr_start_addr);
1452 }
1453
1454 rcr_ring_len = rcr_start_addr;
1455 setRcrConfig_A({compl_ring_length, 8'h0, rcr_start_addr}, 1'b0,RCR_page_id);
1456
1457 // Get the mailbox address too
1458 mailbox_address = SparseMem.get_address(8,MailBox_page_id,64);
1459 printf ("MAILBOX address allocated by SparseMem for dma[%0d] is %h\n", id, mailbox_address);
1460
1461 // program the mailbox address
1462 setRxDmaCfig_1({32'h0000_0000, 24'h000000, mailbox_address[39:32]}, 1'b0);
1463 full_hdr = (ctrl_hdr_len==18);
1464 //gen_pio_drv.pio_wr(RXDMA_CFIG2_START+12'h200*id, {32'h0000_0000, mailbox_address[31:6], 3'b000, cfig2_wr_data[2:1], full_hdr});
1465 gen_pio_drv.pio_wr(getPIOAddress(RXDMA_CFIG2_START+12'h200*id,dis_pio_virt), {32'h0000_0000, mailbox_address[31:6], 3'b000, cfig2_wr_data[2:1], full_hdr});
1466
1467#ifdef N2_FC
1468 Fc_Niu_Ev2a_setRxDmaCfig_2 ({32'h0000_0000, mailbox_address[31:6], 6'h00});
1469#endif
1470 // Enable this particular DMA channel (#dma_chnl)
1471 setRxDmaCfig_1({32'h0000_0000, 24'h800000, mailbox_address[39:32]}, 1'b0);
1472#ifdef N2_FC
1473 Fc_Niu_Ev2a_setRxDmaCfig_1_1 ({32'h0000_0000, 24'h800000, mailbox_address[39:32]});
1474#endif
1475
1476 // Enable WRED by default (extremely desirable)
1477 enableWRED(16'h6512, rcr_ring_len-16'h0020, 4'h0, rcr_ring_len-16'h0020, 4'h0);
1478
1479 // Kick the initial number of blocks specified as the argument
1480 setRxRingKick({48'h0, initial_kick});
1481 repeat (50) @(posedge CLOCK);
1482
1483
1484}
1485
1486
1487task RxDMAChannel::CheckMailboxData(bit [63:0] ctl_data_mask) {
1488bit [63:0] mbox_addr_h, mbox_addr_l,rx_dma_ctl_stat,rbr_stat,rbr_hdl,rbr_hdh,rcr_stat_a,rcr_stat_b,rcr_stat_c;
1489bit [63:0] mailbox_address,mem_read_data;
1490
1491 // read the mailbox address
1492 //gen_pio_drv.pio_rd(RXDMA_CFIG1_START+12'h200*id,mbox_addr_h);
1493 gen_pio_drv.pio_rd(getPIOAddress(RXDMA_CFIG1_START+12'h200*id, dis_pio_virt),mbox_addr_h);
1494 //gen_pio_drv.pio_rd(RXDMA_CFIG2_START+12'h200*id,mbox_addr_l);
1495 gen_pio_drv.pio_rd(getPIOAddress(RXDMA_CFIG2_START+12'h200*id, dis_pio_virt),mbox_addr_l);
1496
1497 printf ("RxDMAChannel::CheckMailboxData: xlate_on = %d \n", xlate_on);
1498 if (!xlate_on)
1499 mailbox_address = {20'h0,mbox_addr_h[11:0],mbox_addr_l[31:6],6'b0};
1500 else
1501 mailbox_address = { 24'h0, SparseMem.xlate_addr({20'h0,mbox_addr_h[11:0],mbox_addr_l[31:6],6'b0}, page0_id) };
1502
1503 printf ("RxDMAChannel::CheckMailboxData: actual mailbox address %h\n", mailbox_address);
1504
1505 // read all the status registers which go into the mailbox update
1506 gen_pio_drv.pio_rd(getPIOAddress(RX_DMA_CTL_STAT_START+12'h200*id, dis_pio_virt),rx_dma_ctl_stat);
1507 gen_pio_drv.pio_rd(getPIOAddress(RBR_STAT_START+12'h200*id, dis_pio_virt),rbr_stat);
1508 gen_pio_drv.pio_rd(getPIOAddress(RBR_HDL_START+12'h200*id, dis_pio_virt),rbr_hdl);
1509 gen_pio_drv.pio_rd(getPIOAddress(RBR_HDH_START+12'h200*id, dis_pio_virt),rbr_hdh);
1510 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_C_START+12'h200*id, dis_pio_virt),rcr_stat_c);
1511 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_B_START+12'h200*id, dis_pio_virt),rcr_stat_b);
1512 gen_pio_drv.pio_rd(getPIOAddress(RCR_STAT_A_START+12'h200*id, dis_pio_virt),rcr_stat_a);
1513
1514
1515 SparseMem.ReadMem(mailbox_address,mem_read_data,8'hff);
1516 if ((mem_read_data&ctl_data_mask) != ((rx_dma_ctl_stat|64'h0000_8000_0000_0000)&ctl_data_mask))
1517 printf( "ERROR: MailBox update WRONG in bytes 0-7. Expected %h Got %h @ Addr %h\n", rx_dma_ctl_stat, mem_read_data, mailbox_address);
1518 else
1519 printf ("MailBox update is CORRECT in bytes 0-7. Expected %h Got %h @ Addr %h\n", rx_dma_ctl_stat, mem_read_data, mailbox_address);
1520
1521 SparseMem.ReadMem(mailbox_address+8,mem_read_data,8'hff);
1522 if (mem_read_data != rbr_stat)
1523 printf( "ERROR: MailBox update WRONG in bytes 8-15. Expected %h Got %h @ Addr %h\n", rbr_stat, mem_read_data, mailbox_address+8);
1524 else
1525 printf ("MailBox update is CORRECT in bytes 8-15. Expected %h Got %h @ Addr %h\n", rbr_stat, mem_read_data, mailbox_address+8);
1526
1527 SparseMem.ReadMem(mailbox_address+16,mem_read_data,8'hff);
1528 if (mem_read_data[31:0] != rbr_hdl[31:0])
1529 printf( "ERROR: MailBox update WRONG in bytes 16-19. Expected %h Got %h @ Addr %h\n", rbr_hdl[31:0], mem_read_data, mailbox_address+16);
1530 else
1531 printf ("MailBox update is CORRECT in bytes 16-19. Expected %h Got %h @ Addr %h\n", rbr_hdl[31:0], mem_read_data, mailbox_address+16);
1532
1533 SparseMem.ReadMem(mailbox_address+20,mem_read_data,8'hff);
1534 if (mem_read_data[31:0] != rbr_hdh[31:0])
1535 printf( "ERROR: MailBox update WRONG in bytes 20-23. Expected %h Got %h @ Addr %h\n", rbr_hdh[31:0], mem_read_data, mailbox_address+20);
1536 else
1537 printf ("MailBox update is CORRECT in bytes 20-23. Expected %h Got %h @ Addr %h\n", rbr_hdh[31:0], mem_read_data, mailbox_address+20);
1538
1539 SparseMem.ReadMem(mailbox_address+32,mem_read_data,8'hff);
1540 if (mem_read_data[31:0] != rcr_stat_c[31:0])
1541 printf( "ERROR: MailBox update WRONG in bytes 32-35. Expected %h Got %h @ Addr %h\n", rcr_stat_c[31:0], mem_read_data, mailbox_address+32);
1542 else
1543 printf ("MailBox update is CORRECT in bytes 32-35. Expected %h Got %h @ Addr %h\n", rcr_stat_c[31:0], mem_read_data, mailbox_address+32);
1544
1545 SparseMem.ReadMem(mailbox_address+36,mem_read_data,8'hff);
1546 if (mem_read_data[31:0] != rcr_stat_b[31:0])
1547 printf( "ERROR: MailBox update WRONG in bytes 36-39. Expected %h Got %h @ Addr %h\n", rcr_stat_b[31:0], mem_read_data, mailbox_address+36);
1548 else
1549 printf ("MailBox update is CORRECT in bytes 36-39. Expected %h Got %h @ Addr %h\n", rcr_stat_b[31:0], mem_read_data, mailbox_address+36);
1550
1551 SparseMem.ReadMem(mailbox_address+40,mem_read_data,8'hff);
1552 if (mem_read_data != rcr_stat_a)
1553 printf( "ERROR: MailBox update WRONG in bytes 40-47. Expected %h Got %h @ Addr %h\n", rcr_stat_a, mem_read_data, mailbox_address+40);
1554 else
1555 printf ("MailBox update is CORRECT in bytes 40-47. Expected %h Got %h @ Addr %h\n", rcr_stat_a, mem_read_data, mailbox_address+40);
1556
1557}
1558
1559// obseleted, see other funs
1560//task RxDMAChannel::RXDMA_CFIG1_pio_wr(bit [63:0] address, bit [63:0] data) {
1561// gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
1562//}
1563//
1564//task RxDMAChannel::RXDMA_CFIG1_pio_rd(bit [63:0] address, var bit [63:0] data) {
1565// gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),data);
1566//}
1567//
1568//task RxDMAChannel::RXDMA_CFIG2_pio_wr(bit [63:0] address, bit [63:0] data) {
1569// gen_pio_drv.pio_wr(getPIOAddress(address, dis_pio_virt),data);
1570//}
1571//
1572//task RxDMAChannel::RXDMA_CFIG2_pio_rd(bit [63:0] address, var bit [63:0] data) {
1573// gen_pio_drv.pio_rd(getPIOAddress(address, dis_pio_virt),data);
1574//}
1575
1576task RxDMAChannel::periodic_kick((integer interval = 3000),
1577 (integer num_desc = -1), // -1 indicate random #desc kick
1578 (integer threshold = 256)) {
1579
1580 bit [31:0] num_desc_local;
1581 integer max_desc_can_be_kicked;
1582
1583 // over write input parameters using plus args
1584 if(get_plus_arg( CHECK, "RX_PERIODIC_KICK_NUM_DESC="))
1585 num_desc = get_plus_arg( NUM, "RX_PERIODIC_KICK_NUM_DESC=");
1586
1587 if(get_plus_arg( CHECK, "RX_PERIODIC_KICK_THRESHOLD="))
1588 threshold = get_plus_arg( NUM, "RX_PERIODIC_KICK_THRESHOLD=");
1589
1590 if(get_plus_arg( CHECK, "RX_PERIODIC_KICK_INTERVAL="))
1591 interval = get_plus_arg( NUM, "RX_PERIODIC_KICK_INTERVAL=");
1592
1593 printf("<%0d> RxDMAChannel::periodic_kick: interval:%0d num_desc:%0d threshold:%0d id:%0d\n",
1594 get_time(LO), interval, num_desc, threshold, id);
1595
1596 // wait for init done
1597 while(!active) { repeat (100) @(posedge CLOCK); }
1598
1599 while(1) {
1600
1601 // interval
1602 if (interval <= 100)
1603 interval = 3000;
1604
1605 repeat (interval) @(posedge CLOCK);
1606
1607 // avoid infinite loop
1608 @(posedge CLOCK);
1609
1610
1611 // check input parameters
1612 max_desc_can_be_kicked = desc_ring.ring_size - descpScheduler.desc_ring.desc_ring.size();
1613
1614 // avoid ERROR: Modulo by zero!
1615 if(max_desc_can_be_kicked == 0)
1616 max_desc_can_be_kicked =1;
1617
1618 // threshold
1619 if(threshold > desc_ring.ring_size) {
1620 threshold = desc_ring.ring_size;
1621 } else if(threshold < 2) {
1622 threshold = 2; // to account for large/medium/min
1623 }
1624
1625 // num_desc
1626 if(num_desc > max_desc_can_be_kicked) {
1627 num_desc_local = max_desc_can_be_kicked;
1628 } else if (num_desc == -1) {
1629 num_desc_local = urandom();
1630 num_desc_local = num_desc_local % (max_desc_can_be_kicked+1);
1631 } else {
1632 num_desc_local = num_desc;
1633 }
1634
1635 // Kick multiples of 16 to accomadate issue with rdmc reorder mechanism.
1636 num_desc_local = num_desc_local - (num_desc_local % 16);
1637
1638 // kick 16 more descriptors if it is reaching max count to avoid rtl to issue
1639 // non aligned 64 byte address which will land up in out of order responses from
1640 // siu bus.
1641 if ((desc_ring.ring_size - curr_rbr_desc_kicked_cnt) < 16) {
1642 num_desc_local += desc_ring.ring_size - curr_rbr_desc_kicked_cnt;
1643 }
1644
1645
1646 num_desc_local = num_desc_local % (max_desc_can_be_kicked+1);
1647
1648 // avoid -ve number
1649 num_desc_local[31] = 0;
1650
1651
1652 if(RX_TEST_REACHED_END) {
1653 printf("<%0d> RxDMAChannel::periodic_kick: exiting the task since RX_TEST_REACHED_END=1\n", get_time(LO));
1654 return;
1655 }
1656
1657 // if current ring size reduced to threshold, kick specified num_desc
1658 if(descpScheduler.desc_ring.desc_ring.size() <= threshold) {
1659 printf("<%0d> RxDMAChannel::periodic_kick: interval:%0d num_desc:%0d threshold:%0d curr_desc_size:%0d, max_desc_size:%0d, dma_id:%0d\n",
1660 get_time(LO), interval, num_desc_local, threshold, descpScheduler.desc_ring.desc_ring.size(), desc_ring.ring_size, id);
1661 setRxRingKick(num_desc_local);
1662 }
1663 }
1664}
1665
1666task RxDMAChannel::auto_periodic_kick() {
1667 bit [31:0] dma_num;
1668
1669 if(get_plus_arg( CHECK, "RX_PERIODIC_KICK_AUTO=")) {
1670 dma_num = get_plus_arg( HNUM, "RX_PERIODIC_KICK_AUTO=");
1671 if(dma_num[id] == 1'b1 && (id < 16)) {
1672 fork
1673 periodic_kick();
1674 join none
1675 }
1676 }
1677}
1678task RxDMAChannel::pio_wr_RXDMA_CFIG1(bit [63:0] rd_data) {
1679 gen_pio_drv.pio_wr(getPIOAddress(RXDMA_CFIG1 + RXDMA_STEP*id, dis_pio_virt), rd_data);
1680}
1681task RxDMAChannel::pio_rd_RXDMA_CFIG1(var bit [63:0] rd_data) {
1682 gen_pio_drv.pio_rd(getPIOAddress(RXDMA_CFIG1+RXDMA_STEP*id, dis_pio_virt),rd_data);
1683}
1684task RxDMAChannel::pio_wr_RXDMA_CFIG2(bit [63:0] rd_data) {
1685 gen_pio_drv.pio_wr(getPIOAddress(RXDMA_CFIG2 + RXDMA_STEP*id, dis_pio_virt), rd_data);
1686}
1687task RxDMAChannel::pio_rd_RXDMA_CFIG2(var bit [63:0] rd_data) {
1688 gen_pio_drv.pio_rd(getPIOAddress(RXDMA_CFIG2+RXDMA_STEP*id, dis_pio_virt),rd_data);
1689}
1690
1691task RxDMAChannel::pio_wr_RX_DMA_CTL_STAT_START(bit [63:0] rd_data) {
1692 gen_pio_drv.pio_wr(getPIOAddress(RX_DMA_CTL_STAT_START + RXDMA_STEP*id, dis_pio_virt), rd_data);
1693}
1694task RxDMAChannel::pio_rd_RX_DMA_CTL_STAT_START(var bit [63:0] rd_data) {
1695 gen_pio_drv.pio_rd(getPIOAddress(RX_DMA_CTL_STAT_START+RXDMA_STEP*id, dis_pio_virt),rd_data);
1696}
1697
1698task RxDMAChannel::pio_wr_RX_DMA_ENT_MSK_START(bit [63:0] rd_data) {
1699 gen_pio_drv.pio_wr(getPIOAddress(RX_DMA_ENT_MSK_START + RXDMA_STEP*id, dis_pio_virt), rd_data);
1700}
1701task RxDMAChannel::pio_rd_RX_DMA_ENT_MSK_START(var bit [63:0] rd_data) {
1702 gen_pio_drv.pio_rd(getPIOAddress(RX_DMA_ENT_MSK_START+RXDMA_STEP*id, dis_pio_virt),rd_data);
1703}
1704
1705task RxDMAChannel::pio_wr_RCR_CFIG_B_START(bit [63:0] rd_data) {
1706 gen_pio_drv.pio_wr(getPIOAddress(RCR_CFIG_B_START + RXDMA_STEP*id, dis_pio_virt), rd_data);
1707}
1708task RxDMAChannel::pio_rd_RCR_CFIG_B_START(var bit [63:0] rd_data) {
1709 gen_pio_drv.pio_rd(getPIOAddress(RCR_CFIG_B_START+RXDMA_STEP*id, dis_pio_virt),rd_data);
1710}
1711
1712task RxDMAChannel::pio_wr_RX_DMA_INTR_DEBUG_START(bit [63:0] rd_data) {
1713 gen_pio_drv.pio_wr(getPIOAddress(RX_DMA_INTR_DEBUG_START + RXDMA_STEP*id, dis_pio_virt), rd_data);
1714}
1715task RxDMAChannel::pio_rd_RX_DMA_INTR_DEBUG_START(var bit [63:0] rd_data) {
1716 gen_pio_drv.pio_rd(getPIOAddress(RX_DMA_INTR_DEBUG_START+RXDMA_STEP*id, dis_pio_virt),rd_data);
1717}
1718
1719task RxDMAChannel::pio_rd_RCR_CFIG_A(var bit [63:0] rd_data){
1720 gen_pio_drv.pio_rd(getPIOAddress(RCR_CFIG_A+RXDMA_STEP*id, dis_pio_virt), rd_data);
1721}
1722task RxDMAChannel::pio_rd_RX_MISC_DROP(var bit [63:0] rd_data){
1723 gen_pio_drv.pio_rd(getPIOAddress(RX_MISC_START+RXDMA_STEP*id, dis_pio_virt), rd_data);
1724}
1725task RxDMAChannel::pio_rd_RED_DISC(var bit [63:0] rd_data){
1726 gen_pio_drv.pio_rd(getPIOAddress(RED_DIS_CNT_START+RED_DIS_CNT_STEP*id, dis_pio_virt), rd_data);
1727}
1728task RxDMAChannel::pio_wr_RDC_RED_PARA(bit [63:0] rd_data) {
1729 gen_pio_drv.pio_wr(getPIOAddress(RDC_RED_PARA + 'h40*id, dis_pio_virt), rd_data);
1730}
1731task RxDMAChannel::pio_rd_RDC_RED_PARA(var bit [63:0] rd_data) {
1732 gen_pio_drv.pio_rd(getPIOAddress(RDC_RED_PARA+'h40*id, dis_pio_virt),rd_data);
1733}
1734
1735
1736
1737