Commit | Line | Data |
---|---|---|
800f879a AT |
1 | /* |
2 | * Copyright 2010-2017 Intel Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, | |
6 | * as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * Disclaimer: The codes contained in these modules may be specific to | |
14 | * the Intel Software Development Platform codenamed Knights Ferry, | |
15 | * and the Intel product codenamed Knights Corner, and are not backward | |
16 | * compatible with other Intel products. Additionally, Intel will NOT | |
17 | * support the codes or instruction set in future products. | |
18 | * | |
19 | * Intel offers no warranty of any kind regarding the code. This code is | |
20 | * licensed on an "AS IS" basis and Intel is not obligated to provide | |
21 | * any support, assistance, installation, training, or other services | |
22 | * of any kind. Intel is also not obligated to provide any updates, | |
23 | * enhancements or extensions. Intel specifically disclaims any warranty | |
24 | * of merchantability, non-infringement, fitness for any particular | |
25 | * purpose, and any other warranty. | |
26 | * | |
27 | * Further, Intel disclaims all liability of any kind, including but | |
28 | * not limited to liability for infringement of any proprietary rights, | |
29 | * relating to the use of the code, even if Intel is notified of the | |
30 | * possibility of such liability. Except as expressly stated in an Intel | |
31 | * license agreement provided with this code and agreed upon with Intel, | |
32 | * no license, express or implied, by estoppel or otherwise, to any | |
33 | * intellectual property rights is granted herein. | |
34 | */ | |
35 | ||
36 | #include<linux/module.h> | |
37 | #include<linux/slab.h> | |
38 | #include<asm/io.h> | |
39 | #include<linux/kernel.h> | |
40 | ||
41 | #include <mic/micscif_smpt.h> | |
42 | #include <mic/mic_dma_md.h> | |
43 | #include <mic/mic_dma_api.h> | |
44 | ||
45 | #define PR_PREFIX "DMA_LIB_MD:" | |
46 | ||
47 | #ifdef CONFIG_ML1OM | |
48 | #define MIC_DMA_AES_CHAN_NUM 7 | |
49 | #define is_AES_channel(n) ((n) == MIC_DMA_AES_CHAN_NUM) | |
50 | #else | |
51 | #define is_AES_channel(n) ((void)(n), 0) | |
52 | #endif | |
53 | ||
54 | #define DMA_CHAN_COOKIE 0xdeadc0d | |
55 | ||
56 | #define SBOX_DCAR_IM0 (0x1 << 24) // APIC Interrupt mask bit | |
57 | #define SBOX_DCAR_IM1 (0x1 << 25) // MSI-X Interrupt mask bit | |
58 | #define SBOX_DCAR_IS0 (0x1 << 26) // Interrupt status | |
59 | ||
60 | #define SBOX_DRARHI_SYS_MASK (0x1 << 26) | |
61 | ||
62 | #ifdef _MIC_SCIF_ | |
63 | static inline uint32_t chan_to_dcr_mask(uint32_t dcr, struct md_mic_dma_chan *chan, struct mic_dma_device *dma_dev) | |
64 | { | |
65 | uint32_t chan_num = chan->ch_num; | |
66 | uint32_t owner; | |
67 | ||
68 | if (!is_AES_channel(chan_num)) | |
69 | owner = chan->owner; | |
70 | else | |
71 | owner = chan->endianness; | |
72 | ||
73 | return ((dcr & ~(0x1 << (chan_num * 2))) | (owner << (chan_num * 2))); | |
74 | } | |
75 | #endif | |
76 | ||
77 | static inline uint32_t drar_hi_to_ba_bits(uint32_t drar_hi) | |
78 | { | |
79 | /* | |
80 | * Setting bits 3:2 should generate a DESC_ADDR_ERR but the hardware ignores | |
81 | * these bits currently and doesn't generate the error. | |
82 | */ | |
83 | #ifdef _MIC_SCIF_ | |
84 | return drar_hi & 0xf; | |
85 | #else | |
86 | return drar_hi & 0x3; | |
87 | #endif | |
88 | } | |
89 | ||
90 | static inline uint32_t physaddr_to_drarhi_ba(phys_addr_t phys_addr) | |
91 | { | |
92 | return drar_hi_to_ba_bits((uint32_t)(phys_addr >> 32)); | |
93 | } | |
94 | ||
95 | static inline uint32_t size_to_drar_hi_size(uint32_t size) | |
96 | { | |
97 | return (size & 0x1ffff) << 4; | |
98 | } | |
99 | ||
100 | static inline uint32_t addr_to_drar_hi_smpt_bits(phys_addr_t mic_phys_addr) | |
101 | { | |
102 | return ((mic_phys_addr >> MIC_SYSTEM_PAGE_SHIFT) & 0x1f) << 21; | |
103 | } | |
104 | ||
105 | static inline uint32_t drar_hi_to_smpt(uint32_t drar_hi, uint32_t chan_num) | |
106 | { | |
107 | return ((drar_hi >> 21) & 0x1f); | |
108 | } | |
109 | ||
110 | void md_mic_dma_enable_chan(struct mic_dma_device *dma_dev, uint32_t chan_num, bool enable); | |
111 | ||
112 | ||
113 | #ifdef _MIC_SCIF_ | |
114 | /** | |
115 | * md_mic_dma_chan_init_attr - Set channel attributes like owner and endianness | |
116 | * @chan: The DMA channel handle | |
117 | */ | |
118 | void md_mic_dma_chan_init_attr(struct mic_dma_device *dma_dev, | |
119 | struct md_mic_dma_chan *chan) | |
120 | { | |
121 | uint32_t dcr; | |
122 | ||
123 | CHECK_CHAN(chan); | |
124 | ||
125 | dcr = mic_sbox_read_mmio(dma_dev->mm_sbox, SBOX_DCR); | |
126 | dcr = chan_to_dcr_mask(dcr, chan, dma_dev); | |
127 | mic_sbox_write_mmio(dma_dev->mm_sbox, SBOX_DCR, dcr); | |
128 | } | |
129 | #endif | |
130 | ||
131 | /* One time DMA Init API */ | |
132 | void md_mic_dma_init(struct mic_dma_device *dma_dev, uint8_t *mmio_va_base) | |
133 | { | |
134 | int i; | |
135 | #ifdef _MIC_SCIF_ | |
136 | dma_dev->mm_sbox = mic_sbox_md_init(); | |
137 | #else | |
138 | dma_dev->mm_sbox = mmio_va_base; | |
139 | #endif | |
140 | //pr_debug("sbox: va=%p\n", dma_dev.mm_sbox); | |
141 | ||
142 | for (i = 0; i < MAX_NUM_DMA_CHAN; i++) { | |
143 | atomic_set(&(dma_dev->chan_info[i].in_use), CHAN_AVAILABLE); | |
144 | dma_dev->chan_info[i].cookie = DMA_CHAN_COOKIE; | |
145 | dma_dev->chan_info[i].dstat_wb_phys = 0; | |
146 | dma_dev->chan_info[i].dstat_wb_loc = NULL; | |
147 | } | |
148 | return; | |
149 | } | |
150 | ||
151 | /* One time DMA Uninit API */ | |
152 | void md_mic_dma_uninit(struct mic_dma_device *dma_dev) | |
153 | { | |
154 | return; | |
155 | } | |
156 | ||
157 | /** | |
158 | * md_mic_dma_request_chan | |
159 | * @owner: DMA channel owner: MIC or Host | |
160 | * | |
161 | * Return - The DMA channel handle or NULL if failed | |
162 | * | |
163 | * Note: Allocating a Host owned channel is not allowed currently | |
164 | */ | |
165 | struct md_mic_dma_chan *md_mic_dma_request_chan(struct mic_dma_device *dma_dev, | |
166 | enum md_mic_dma_chan_owner owner) | |
167 | { | |
168 | struct md_mic_dma_chan *tmp = NULL; | |
169 | int i; | |
170 | ||
171 | for (i = 0; i < MAX_NUM_DMA_CHAN; i++) { | |
172 | if (CHAN_AVAILABLE == atomic_cmpxchg(&(dma_dev->chan_info[i].in_use), | |
173 | CHAN_AVAILABLE, CHAN_INUSE)) { | |
174 | tmp = &dma_dev->chan_info[i]; | |
175 | tmp->owner = owner; | |
176 | tmp->ch_num = i; | |
177 | /* | |
178 | * Setting endianness by default to MIC_LITTLE_ENDIAN | |
179 | * in case the AES channel is used for clear transfers | |
180 | * This is a don't care for clear transfers. | |
181 | */ | |
182 | tmp->endianness = MIC_LITTLE_ENDIAN; | |
183 | #ifdef _MIC_SCIF_ | |
184 | md_mic_dma_chan_init_attr(dma_dev, tmp); | |
185 | #endif | |
186 | break; | |
187 | } | |
188 | } | |
189 | return tmp; | |
190 | } | |
191 | ||
192 | /** | |
193 | * md_mic_dma_free_chan - Frees up a DMA channel | |
194 | * @chan: The DMA channel handle | |
195 | */ | |
196 | void md_mic_dma_free_chan(struct mic_dma_device *dma_dev, | |
197 | struct md_mic_dma_chan *chan) | |
198 | { | |
199 | CHECK_CHAN(chan); | |
200 | atomic_set(&(chan->in_use), CHAN_AVAILABLE); | |
201 | md_mic_dma_enable_chan(dma_dev, chan->ch_num, false); | |
202 | } | |
203 | ||
204 | /** | |
205 | * md_mic_dma_enable_chan - Enable/disable the DMA channel | |
206 | * @chan_num: The DMA channel | |
207 | * @enable: enable/disable | |
208 | * | |
209 | * Must set desc ring and update head pointer only | |
210 | * after disabling the channel | |
211 | */ | |
212 | void md_mic_dma_enable_chan(struct mic_dma_device *dma_dev, | |
213 | uint32_t chan_num, bool enable) | |
214 | { | |
215 | uint32_t dcr = mic_sbox_read_mmio(dma_dev->mm_sbox, SBOX_DCR); | |
216 | ||
217 | /* | |
218 | * There is a separate bit for every channel. | |
219 | * Look up sboxDcrReg. | |
220 | */ | |
221 | if (enable) { | |
222 | dcr |= 2 << (chan_num << 1); | |
223 | } else { | |
224 | dcr &= ~(2 << (chan_num << 1)); | |
225 | } | |
226 | mic_sbox_write_mmio(dma_dev->mm_sbox, SBOX_DCR, dcr); | |
227 | } | |
228 | ||
229 | #if 0 | |
230 | uint32_t md_mic_dma_chan_read_completion_count(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
231 | { | |
232 | CHECK_CHAN(chan); | |
233 | ||
234 | return (md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DSTAT) & 0xffff); | |
235 | } | |
236 | ||
237 | ||
238 | /* This function needs to be used only in error case */ | |
239 | void update_compcount_and_tail(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
240 | { | |
241 | chan->completion_count = md_mic_dma_chan_read_completion_count(dma_dev, chan); | |
242 | chan->cached_tail = md_mic_dma_chan_read_tail(dma_dev, chan); | |
243 | } | |
244 | #endif | |
245 | void md_mic_dma_chan_set_dstat_wb(struct mic_dma_device *dma_dev, | |
246 | struct md_mic_dma_chan *chan) | |
247 | { | |
248 | uint32_t dstat_wb, dstat_wb_hi; | |
249 | CHECK_CHAN(chan); | |
250 | ||
251 | dstat_wb = (uint32_t)chan->dstat_wb_phys; | |
252 | dstat_wb_hi = chan->dstat_wb_phys >> 32; | |
253 | md_mic_dma_write_mmio(dma_dev, chan->ch_num, REG_DSTATWB_LO, dstat_wb); | |
254 | md_mic_dma_write_mmio(dma_dev, chan->ch_num, REG_DSTATWB_HI, dstat_wb_hi); | |
255 | } | |
256 | ||
257 | void md_mic_dma_chan_set_dcherr_msk(struct mic_dma_device *dma_dev, | |
258 | struct md_mic_dma_chan *chan, uint32_t mask) | |
259 | { | |
260 | CHECK_CHAN(chan); | |
261 | md_mic_dma_write_mmio(dma_dev, chan->ch_num, REG_DCHERRMSK, mask); | |
262 | } | |
263 | #if 0 | |
264 | uint32_t md_mic_dma_chan_get_dcherr_msk(struct mic_dma_device *dma_dev, | |
265 | struct md_mic_dma_chan *chan) | |
266 | { | |
267 | CHECK_CHAN(chan); | |
268 | return md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DCHERRMSK); | |
269 | } | |
270 | ||
271 | uint32_t md_mic_dma_chan_get_dcherr(struct mic_dma_device *dma_dev, | |
272 | struct md_mic_dma_chan *chan) | |
273 | { | |
274 | CHECK_CHAN(chan); | |
275 | return md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DCHERR); | |
276 | } | |
277 | ||
278 | void md_mic_dma_chan_set_dcherr(struct mic_dma_device *dma_dev, | |
279 | struct md_mic_dma_chan *chan, uint32_t value) | |
280 | { | |
281 | CHECK_CHAN(chan); | |
282 | md_mic_dma_write_mmio(dma_dev, chan->ch_num, REG_DCHERR, value); | |
283 | printk("dcherr = %d\n", md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DCHERR)); | |
284 | } | |
285 | #endif | |
286 | ||
287 | /** | |
288 | * md_mic_dma_chan_set_desc_ring - Configures the DMA channel desc ring | |
289 | * @chan: The DMA channel handle | |
290 | * @desc_ring_phys_addr: Physical address of the desc ring base. Needs to be | |
291 | * physically contiguous and wired down memory. | |
292 | * @num_desc: Number of descriptors must be a multiple of cache line size. | |
293 | * Descriptor size should be determined using sizeof(union md_mic_dma_desc). | |
294 | * The maximum number of descriptors is defined by | |
295 | * MIC_MAX_NUM_DESC_PER_RING. | |
296 | */ | |
297 | void md_mic_dma_chan_set_desc_ring(struct mic_dma_device *dma_dev, | |
298 | struct md_mic_dma_chan *chan, | |
299 | phys_addr_t desc_ring_phys_addr, | |
300 | uint32_t num_desc) | |
301 | { | |
302 | uint32_t chan_num; | |
303 | uint32_t drar_lo = 0; | |
304 | uint32_t drar_hi = 0; | |
305 | ||
306 | CHECK_CHAN(chan); | |
307 | chan_num = chan->ch_num; | |
308 | /* | |
309 | * TODO: Maybe the 2nd condition should be different considering the | |
310 | * size of union md_mic_dma_desc? | |
311 | */ | |
312 | KASSERT((((num_desc) <= MIC_MAX_NUM_DESC_PER_RING) && | |
313 | (ALIGN((num_desc - (L1_CACHE_BYTES - 1)), L1_CACHE_BYTES) == num_desc)), | |
314 | "num_desc > max or not multiple of cache line num 0x%x", num_desc); | |
315 | ||
316 | md_mic_dma_enable_chan(dma_dev, chan_num, false); | |
317 | ||
318 | drar_hi = size_to_drar_hi_size(num_desc); | |
319 | ||
320 | if (MIC_DMA_CHAN_HOST_OWNED == chan->owner) { | |
321 | drar_hi |= SBOX_DRARHI_SYS_MASK; | |
322 | drar_hi |= addr_to_drar_hi_smpt_bits(desc_ring_phys_addr); | |
323 | } | |
324 | drar_lo = (uint32_t)desc_ring_phys_addr; | |
325 | drar_hi |= physaddr_to_drarhi_ba(desc_ring_phys_addr); | |
326 | md_mic_dma_write_mmio(dma_dev, chan_num, REG_DRAR_LO, drar_lo); | |
327 | md_mic_dma_write_mmio(dma_dev, chan_num, REG_DRAR_HI, drar_hi); | |
328 | chan->num_desc_in_ring = num_desc; | |
329 | pr_debug("md_mic_dma_chan_set_desc_ring addr=0x%llx num=%d drar_hi.bits.pageno 0x%x\n", | |
330 | desc_ring_phys_addr, num_desc, | |
331 | (uint32_t)(desc_ring_phys_addr >> MIC_SYSTEM_PAGE_SHIFT)); | |
332 | chan->cached_tail = md_mic_dma_chan_read_tail(dma_dev, chan); | |
333 | ||
334 | md_mic_dma_enable_chan(dma_dev, chan_num, true); | |
335 | } | |
336 | ||
337 | uint32_t md_mic_dma_chan_read_head(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
338 | { | |
339 | CHECK_CHAN(chan); | |
340 | ||
341 | return md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DHPR); | |
342 | } | |
343 | ||
344 | uint32_t md_mic_dma_chan_read_tail(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
345 | { | |
346 | CHECK_CHAN(chan); | |
347 | ||
348 | return md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DTPR); | |
349 | } | |
350 | ||
351 | /** | |
352 | * md_mic_dma_chan_intr_pending - Reads interrupt status to figure out | |
353 | * if an interrupt is pending. | |
354 | * @chan: The DMA channel handle. | |
355 | */ | |
356 | bool md_mic_dma_chan_intr_pending(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
357 | { | |
358 | uint32_t dcar; | |
359 | CHECK_CHAN(chan); | |
360 | ||
361 | dcar = md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DCAR); | |
362 | return (dcar >> 26) & 0x1; | |
363 | } | |
364 | ||
365 | /** | |
366 | * md_mic_dma_chan_mask_intr - Mask or disable interrupts | |
367 | * @chan: The DMA channel handle | |
368 | * | |
369 | * Masking interrupts will also acknowledge any pending | |
370 | * interrupts on the channel. | |
371 | */ | |
372 | void md_mic_dma_chan_mask_intr(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
373 | { | |
374 | uint32_t dcar; | |
375 | uint32_t chan_num; | |
376 | CHECK_CHAN(chan); | |
377 | chan_num = chan->ch_num; | |
378 | ||
379 | dcar = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCAR); | |
380 | ||
381 | if (MIC_DMA_CHAN_MIC_OWNED == chan->owner) | |
382 | dcar |= SBOX_DCAR_IM0; | |
383 | else | |
384 | dcar |= SBOX_DCAR_IM1; | |
385 | ||
386 | md_mic_dma_write_mmio(dma_dev, chan_num, REG_DCAR, dcar); | |
387 | /* | |
388 | * This read is completed only after previous write is completed. | |
389 | * It guarantees that, interrupts has been acknowledged to SBOX DMA | |
390 | * This read forces previous write to be commited in memory. | |
391 | * This is the actual fix for HSD# 3497216 based on theoretical | |
392 | * hypothesis that somehow previous write is not truly completed | |
393 | * since for writes as long as transactions are accepted by SBOX | |
394 | * ( not necessarily commited in memory) those write transactions | |
395 | * reported as complete. | |
396 | */ | |
397 | dcar = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCAR); | |
398 | } | |
399 | ||
400 | /** | |
401 | * md_mic_dma_chan_unmask_intr - Unmask or enable interrupts | |
402 | * @chan: The DMA channel handle | |
403 | */ | |
404 | void md_mic_dma_chan_unmask_intr(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
405 | { | |
406 | uint32_t dcar; | |
407 | uint32_t chan_num; | |
408 | CHECK_CHAN(chan); | |
409 | chan_num = chan->ch_num; | |
410 | ||
411 | dcar = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCAR); | |
412 | ||
413 | if (MIC_DMA_CHAN_MIC_OWNED == chan->owner) | |
414 | dcar &= ~SBOX_DCAR_IM0; | |
415 | else | |
416 | dcar &= ~SBOX_DCAR_IM1; | |
417 | ||
418 | md_mic_dma_write_mmio(dma_dev, chan_num, REG_DCAR, dcar); | |
419 | /* | |
420 | * This read is completed only after previous write is completed. | |
421 | * It guarantees that, interrupts has been acknowledged to SBOX DMA | |
422 | * This read forces previous write to be commited in memory. | |
423 | * This is the actual fix for HSD# 3497216 based on theoretical | |
424 | * hypothesis that somehow previous write is not truly completed | |
425 | * since for writes as long as transactions are accepted by SBOX | |
426 | * ( not necessarily commited in memory) those write transactions | |
427 | * reported as complete. | |
428 | */ | |
429 | dcar = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCAR); | |
430 | } | |
431 | ||
432 | /** | |
433 | * md_mic_dma_chan_get_desc_ring_phys - Compute the value of the descriptor ring | |
434 | * base physical address from the descriptor ring attributes register. | |
435 | * @dma_dev: DMA device. | |
436 | * @chan: The DMA channel handle | |
437 | */ | |
438 | phys_addr_t | |
439 | md_mic_dma_chan_get_desc_ring_phys(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
440 | { | |
441 | phys_addr_t phys, phys_hi; | |
442 | uint32_t phys_lo, chan_num, drar_hi; | |
443 | ||
444 | CHECK_CHAN(chan); | |
445 | chan_num = chan->ch_num; | |
446 | phys_lo = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DRAR_LO); | |
447 | drar_hi = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DRAR_HI); | |
448 | phys_hi = drar_hi_to_ba_bits(drar_hi); | |
449 | phys_hi |= drar_hi_to_smpt(drar_hi, chan_num) << 2; | |
450 | ||
451 | phys = phys_lo | (phys_hi << 32); | |
452 | return phys; | |
453 | } | |
454 | ||
455 | /** | |
456 | * md_mic_dma_chan_get_dstatwb_phys - Compute the value of the DSTAT write back | |
457 | * physical address. | |
458 | * @dma_dev: DMA device. | |
459 | * @chan: The DMA channel handle | |
460 | */ | |
461 | phys_addr_t md_mic_dma_chan_get_dstatwb_phys(struct mic_dma_device *dma_dev, | |
462 | struct md_mic_dma_chan *chan) | |
463 | { | |
464 | uint32_t reg, chan_num; | |
465 | phys_addr_t phys; | |
466 | ||
467 | CHECK_CHAN(chan); | |
468 | chan_num = chan->ch_num; | |
469 | reg = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DSTATWB_HI); | |
470 | phys = reg; | |
471 | reg = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DSTATWB_LO); | |
472 | ||
473 | phys = phys << 32 | reg; | |
474 | return phys; | |
475 | } | |
476 | ||
477 | /** | |
478 | * md_mic_dma_prep_nop_desc - Prepares a NOP descriptor. | |
479 | * @desc: Descriptor to be populated. | |
480 | * | |
481 | * This descriptor is used to pad a cacheline if the previous | |
482 | * descriptor does not end on a cacheline boundary. | |
483 | */ | |
484 | void md_mic_dma_prep_nop_desc(union md_mic_dma_desc *desc) | |
485 | { | |
486 | KASSERT((desc != 0), ("NULL desc")); | |
487 | ||
488 | desc->qwords.qw0 = 0; | |
489 | desc->qwords.qw1 = 0; | |
490 | desc->desc.nop.type = 0; | |
491 | } | |
492 | ||
493 | /* Only Debug Code Below */ | |
494 | ||
495 | /** | |
496 | * md_mic_dma_print_debug - Print channel debug information | |
497 | * @chan: The DMA channel handle | |
498 | * @sbuf: Print to an sbuf if not NULL else prints to console | |
499 | */ | |
500 | void md_mic_dma_print_debug(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan) | |
501 | { | |
502 | uint32_t dcr; | |
503 | uint32_t dcar; | |
504 | uint32_t dtpr; | |
505 | uint32_t dhpr; | |
506 | uint32_t drar_lo; | |
507 | uint32_t drar_hi; | |
508 | uint32_t dstat; | |
509 | uint32_t chan_num = chan->ch_num; | |
510 | ||
511 | dcr = mic_sbox_read_mmio(dma_dev->mm_sbox, SBOX_DCR); | |
512 | dcar = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DCAR); | |
513 | dtpr = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DTPR); | |
514 | dhpr = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DHPR); | |
515 | drar_lo = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DRAR_LO); | |
516 | drar_hi = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DRAR_HI); | |
517 | dstat = md_mic_dma_read_mmio(dma_dev, chan_num, REG_DSTAT); | |
518 | pr_debug(PR_PREFIX "Chan_Num 0x%x DCR 0x%x DCAR 0x%x DTPR 0x%x" | |
519 | "DHPR 0x%x DRAR_HI 0x%x DRAR_LO 0x%x DSTAT 0x%x\n", | |
520 | chan_num, dcr, dcar, dtpr, dhpr, drar_hi, drar_lo, dstat); | |
521 | pr_debug(PR_PREFIX "DCR 0x%x\n", dcr); | |
522 | } |