Initial commit of files contained in `mpss-modules-3.8.6.tar.bz2` for Intel Xeon...
[xeon-phi-kernel-module] / include / mic / mic_dma_md.h
CommitLineData
800f879a
AT
1/*
2 * Copyright 2010-2017 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * Disclaimer: The codes contained in these modules may be specific to
14 * the Intel Software Development Platform codenamed Knights Ferry,
15 * and the Intel product codenamed Knights Corner, and are not backward
16 * compatible with other Intel products. Additionally, Intel will NOT
17 * support the codes or instruction set in future products.
18 *
19 * Intel offers no warranty of any kind regarding the code. This code is
20 * licensed on an "AS IS" basis and Intel is not obligated to provide
21 * any support, assistance, installation, training, or other services
22 * of any kind. Intel is also not obligated to provide any updates,
23 * enhancements or extensions. Intel specifically disclaims any warranty
24 * of merchantability, non-infringement, fitness for any particular
25 * purpose, and any other warranty.
26 *
27 * Further, Intel disclaims all liability of any kind, including but
28 * not limited to liability for infringement of any proprietary rights,
29 * relating to the use of the code, even if Intel is notified of the
30 * possibility of such liability. Except as expressly stated in an Intel
31 * license agreement provided with this code and agreed upon with Intel,
32 * no license, express or implied, by estoppel or otherwise, to any
33 * intellectual property rights is granted herein.
34 */
35
36#ifndef MIC_DMA_MD_H
37#define MIC_DMA_MD_H
38
39#include "mic_sbox_md.h"
40#include "micsboxdefine.h"
41
42#define MAX_NUM_DMA_CHAN 8
43/*
44 * WE ASSUME 0 to __LAST_HOST_CHAN_NUM are owned by host
45 * Keep this in mind when changing this value
46 */
47#define __LAST_HOST_CHAN_NUM 3
48
49#ifdef _MIC_SCIF_
50static inline int first_dma_chan(void)
51{
52 return __LAST_HOST_CHAN_NUM + 1;
53}
54
55static inline int last_dma_chan(void)
56{
57 return MAX_NUM_DMA_CHAN - 1;
58}
59#else
60static inline int first_dma_chan(void)
61{
62 return 0;
63}
64
65static inline int last_dma_chan(void)
66{
67 return __LAST_HOST_CHAN_NUM;
68}
69#endif
70enum md_mic_dma_chan_reg {
71 REG_DCAR = 0,
72 REG_DHPR,
73 REG_DTPR,
74 REG_DAUX_HI,
75 REG_DAUX_LO,
76 REG_DRAR_HI,
77 REG_DRAR_LO,
78 REG_DITR,
79 REG_DSTAT,
80 REG_DSTATWB_LO,
81 REG_DSTATWB_HI,
82 REG_DCHERR,
83 REG_DCHERRMSK,
84};
85
86
87/* Pre-defined L1_CACHE_SHIFT is 6 on RH and 7 on Suse */
88#undef L1_CACHE_SHIFT
89#define L1_CACHE_SHIFT 6
90#undef L1_CACHE_BYTES
91#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
92
93enum dma_chan_flags {
94 CHAN_AVAILABLE = 2,
95 CHAN_INUSE = 3
96};
97
98/* Maximum DMA transfer size for a single memory copy descriptor */
99#define MIC_MAX_DMA_XFER_SIZE (((1U) * 1024 * 1024) - L1_CACHE_BYTES)
100
101/* TODO:
102 * I think it should be 128K - 64 (even 128k - 4 may work).
103 * SIVA: Check this in the end
104 */
105/*
106 * The maximum number of descriptors in the DMA descriptor queue is
107 * 128K - 1 but since it needs to be a multiple of cache lines it is 128K - 64
108 */
109#define MIC_MAX_NUM_DESC_PER_RING ((128 * 1024) - L1_CACHE_BYTES)
110
111/**
112 * enum md_mic_dma_chan_owner - Memory copy DMA channels can be Host or MIC owned.
113 * AES channel can only be MIC owned.
114 */
115enum md_mic_dma_chan_owner {
116 MIC_DMA_CHAN_MIC_OWNED = 0,
117 MIC_DMA_CHAN_HOST_OWNED
118};
119
120/**
121 * enum md_mic_dma_aes_endianness - Endianness needs to be provided
122 * only for the AES channel
123 */
124enum md_mic_dma_aes_endianness {
125 /*
126 * The following two bits are opposite of what is given in
127 * content protection HAS but this is how it is implemented in RTL.
128 */
129 MIC_BIG_ENDIAN = 0,
130 MIC_LITTLE_ENDIAN
131};
132
133
134/**
135 * struct md_mic_dma_chan - Opaque data structure for DMA channel specific fields.
136 */
137/*
138 * struct md_mic_dma_chan: DMA channel specific structure
139 * @in_use - true if the channel is in use and false otherwise
140 * @owner - host or MIC required for masking/unmasking
141 * interrupts and enabling channels
142 * @endianness - required for enabling AES channel
143 * @cookie - Debug cookie to identify this structure
144 * @num_desc_in_ring - Number of descriptors in the descriptor
145 * ring for this channel.
146 */
147struct md_mic_dma_chan {
148 int ch_num;
149 atomic_t in_use;
150 enum md_mic_dma_chan_owner owner;
151 enum md_mic_dma_aes_endianness endianness;
152 int cookie;
153 uint32_t num_desc_in_ring;
154 uint32_t cached_tail;
155 uint32_t completion_count;
156 void *dstat_wb_loc;
157 dma_addr_t dstat_wb_phys;
158 /* Add debug/profiling stats here */
159};
160
161
162/*
163 * struct mic_dma_device - MIC DMA Device specific structure
164 * @chan_info - static array of MIC DMA channel specific structures
165 * @lock - MTX_DEF lock to synchronize allocation/deallocation of DMA channels
166 */
167struct mic_dma_device {
168 struct md_mic_dma_chan chan_info[MAX_NUM_DMA_CHAN];
169 void *mm_sbox;
170};
171
172
173/**
174 * union md_mic_dma_desc - Opaque data structure for DMA descriptor format.
175 */
176/* TODO: Change bitfields to portable masks */
177union md_mic_dma_desc {
178 union {
179 struct {
180 uint64_t rsvd0;
181 uint64_t rsvd1:60;
182 uint64_t type:4;
183 } nop;
184 struct {
185 uint64_t sap:40;
186 uint64_t index:3;
187 uint64_t rsvd0:3;
188 uint64_t length:14;
189 uint64_t rsvd1:4;
190 uint64_t dap:40;
191 uint64_t resd:15;
192 uint64_t twb:1;
193 uint64_t intr:1;
194 uint64_t c:1;
195 uint64_t co:1;
196 uint64_t ecy:1;
197 uint64_t type:4;
198 } memcopy;
199 struct {
200 uint64_t data;
201 uint64_t dap:40;
202 uint64_t rsvdr0:19;
203 uint64_t intr:1;
204 uint64_t type:4;
205 } status;
206 struct {
207 uint64_t data:32;
208 uint64_t rsvd0:32;
209 uint64_t dap:40;
210 uint64_t rsvd1:20;
211 uint64_t type:4;
212 } general;
213 struct {
214 uint64_t data;
215 uint64_t rsvd0:53;
216 uint64_t cs:1;
217 uint64_t index:3;
218 uint64_t h:1;
219 uint64_t sel:2;
220 uint64_t type:4;
221 } keynoncecnt;
222 struct {
223 uint64_t skap:40;
224 uint64_t ski:3;
225 uint64_t rsvd0:21;
226 uint64_t rsvd1:51;
227 uint64_t di:3;
228 uint64_t rsvd2:6;
229 uint64_t type:4;
230 } key;
231 } desc;
232 struct {
233 uint64_t qw0;
234 uint64_t qw1;
235 } qwords;
236};
237
238/* Initialization functions */
239void md_mic_dma_init(struct mic_dma_device *dma_dev, uint8_t *mmio_va_base);
240void md_mic_dma_uninit(struct mic_dma_device *dma_dev);
241void md_mic_dma_chan_init_attr(struct mic_dma_device *dma_dev,
242 struct md_mic_dma_chan *chan);
243void md_mic_dma_chan_mask_intr(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan);
244void md_mic_dma_chan_unmask_intr(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan);
245void md_mic_dma_chan_set_desc_ring(struct mic_dma_device *dma_dev,
246 struct md_mic_dma_chan *chan,
247 phys_addr_t desc_ring_phys_addr,
248 uint32_t num_desc);
249void md_mic_dma_enable_chan(struct mic_dma_device *dma_dev, uint32_t chan_num, bool enable);
250/* API */
251struct md_mic_dma_chan *md_mic_dma_request_chan(struct mic_dma_device *dma_dev,
252 enum md_mic_dma_chan_owner owner);
253void md_mic_dma_free_chan(struct mic_dma_device *dma_dev,
254 struct md_mic_dma_chan *chan);
255
256static uint32_t mic_dma_reg[8][13] = {
257 {SBOX_DCAR_0, SBOX_DHPR_0, SBOX_DTPR_0, SBOX_DAUX_HI_0, SBOX_DAUX_LO_0, SBOX_DRAR_HI_0,
258 SBOX_DRAR_LO_0, SBOX_DITR_0, SBOX_DSTAT_0,
259 SBOX_DSTATWB_LO_0, SBOX_DSTATWB_HI_0, SBOX_DCHERR_0, SBOX_DCHERRMSK_0},
260 {SBOX_DCAR_1, SBOX_DHPR_1, SBOX_DTPR_1, SBOX_DAUX_HI_1, SBOX_DAUX_LO_1, SBOX_DRAR_HI_1,
261 SBOX_DRAR_LO_1, SBOX_DITR_1, SBOX_DSTAT_1,
262 SBOX_DSTATWB_LO_1, SBOX_DSTATWB_HI_1, SBOX_DCHERR_1, SBOX_DCHERRMSK_1},
263 {SBOX_DCAR_2, SBOX_DHPR_2, SBOX_DTPR_2, SBOX_DAUX_HI_2, SBOX_DAUX_LO_2, SBOX_DRAR_HI_2,
264 SBOX_DRAR_LO_2, SBOX_DITR_2, SBOX_DSTAT_2,
265 SBOX_DSTATWB_LO_2, SBOX_DSTATWB_HI_2, SBOX_DCHERR_2, SBOX_DCHERRMSK_2},
266 {SBOX_DCAR_3, SBOX_DHPR_3, SBOX_DTPR_3, SBOX_DAUX_HI_3, SBOX_DAUX_LO_3, SBOX_DRAR_HI_3,
267 SBOX_DRAR_LO_3, SBOX_DITR_3, SBOX_DSTAT_3,
268 SBOX_DSTATWB_LO_3, SBOX_DSTATWB_HI_3, SBOX_DCHERR_3, SBOX_DCHERRMSK_3},
269 {SBOX_DCAR_4, SBOX_DHPR_4, SBOX_DTPR_4, SBOX_DAUX_HI_4, SBOX_DAUX_LO_4, SBOX_DRAR_HI_4,
270 SBOX_DRAR_LO_4, SBOX_DITR_4, SBOX_DSTAT_4,
271 SBOX_DSTATWB_LO_4, SBOX_DSTATWB_HI_4, SBOX_DCHERR_4, SBOX_DCHERRMSK_4},
272 {SBOX_DCAR_5, SBOX_DHPR_5, SBOX_DTPR_5, SBOX_DAUX_HI_5, SBOX_DAUX_LO_5, SBOX_DRAR_HI_5,
273 SBOX_DRAR_LO_5, SBOX_DITR_5, SBOX_DSTAT_5,
274 SBOX_DSTATWB_LO_5, SBOX_DSTATWB_HI_5, SBOX_DCHERR_5, SBOX_DCHERRMSK_5},
275 {SBOX_DCAR_6, SBOX_DHPR_6, SBOX_DTPR_6, SBOX_DAUX_HI_6, SBOX_DAUX_LO_6, SBOX_DRAR_HI_6,
276 SBOX_DRAR_LO_6, SBOX_DITR_6, SBOX_DSTAT_6,
277 SBOX_DSTATWB_LO_6, SBOX_DSTATWB_HI_6, SBOX_DCHERR_6, SBOX_DCHERRMSK_6},
278 {SBOX_DCAR_7, SBOX_DHPR_7, SBOX_DTPR_7, SBOX_DAUX_HI_7, SBOX_DAUX_LO_7, SBOX_DRAR_HI_7,
279 SBOX_DRAR_LO_7, SBOX_DITR_7, SBOX_DSTAT_7,
280 SBOX_DSTATWB_LO_7, SBOX_DSTATWB_HI_7, SBOX_DCHERR_7, SBOX_DCHERRMSK_7}
281};
282
283static __always_inline uint32_t
284md_mic_dma_read_mmio(struct mic_dma_device *dma_dev,
285 int chan, enum md_mic_dma_chan_reg reg)
286{
287 return mic_sbox_read_mmio(dma_dev->mm_sbox, mic_dma_reg[chan][reg]);
288}
289
290static __always_inline void
291md_mic_dma_write_mmio(struct mic_dma_device *dma_dev, int chan,
292 enum md_mic_dma_chan_reg reg, uint32_t value)
293{
294 mic_sbox_write_mmio(dma_dev->mm_sbox, mic_dma_reg[chan][reg], value);
295}
296
297#ifdef DEBUG
298#ifndef KASSERT
299#define KASSERT(x, y, ...) \
300 do { \
301 if(!x) \
302 printk(y, ##__VA_ARGS__);\
303 BUG_ON(!x); \
304 } while(0)
305#endif
306#define CHECK_CHAN(chan) \
307 do { \
308 KASSERT((chan), "NULL DMA channel\n"); \
309 KASSERT((DMA_CHAN_COOKIE == chan->cookie), \
310 "Bad DMA channel cookie 0x%x\n", chan->cookie); \
311 KASSERT(atomic_read(&(chan->in_use)), "DMA Channel not in use\n"); \
312 } while(0)
313#else // DEBUG
314#ifndef KASSERT
315#define KASSERT(x, y, ...) \
316 do { \
317 if(!x) \
318 printk(y, ##__VA_ARGS__);\
319 BUG_ON(!x); \
320 } while(0)
321#endif
322#define CHECK_CHAN(chan)
323
324#endif // DEBUG
325
326struct mic_dma_ctx_t;
327void md_mic_dma_chan_set_dstat_wb(struct mic_dma_device *dma_dev,
328 struct md_mic_dma_chan *chan);
329
330void md_mic_dma_chan_set_dcherr_msk(struct mic_dma_device *dma_dev,
331 struct md_mic_dma_chan *chan, uint32_t mask);
332
333static __always_inline void
334md_mic_dma_chan_write_head(struct mic_dma_device *dma_dev,
335 struct md_mic_dma_chan *chan, uint32_t head)
336{
337 uint32_t chan_num;
338 CHECK_CHAN(chan);
339 chan_num = chan->ch_num;
340 KASSERT((head < chan->num_desc_in_ring),
341 "head 0x%x > num_desc_in_ring 0x%x chan_num %d\n",
342 head, chan->num_desc_in_ring, chan_num);
343 md_mic_dma_write_mmio(dma_dev, chan_num, REG_DHPR, head);
344}
345
346uint32_t md_mic_dma_chan_read_head(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan);
347uint32_t md_mic_dma_chan_read_tail(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan);
348
349#define TAIL_PTR_READ_RETRIES 500000
350#define HW_CMP_CNT_MASK 0x1ffff
351static __always_inline uint32_t
352md_avail_desc_ring_space(struct mic_dma_device *dma_dev, bool is_astep,
353 struct md_mic_dma_chan *chan, uint32_t head, uint32_t required)
354{
355 uint32_t count = 0, max_num_retries = TAIL_PTR_READ_RETRIES, num_retries = 0;
356 uint32_t tail = chan->cached_tail;
357retry:
358 if (head > tail)
359 count = (tail - 0) + (chan->num_desc_in_ring - head);
360 else if (tail > head)
361 count = tail - head;
362 else
363 return (chan->num_desc_in_ring - 1);
364
365 if (count > required) {
366 return count - 1;
367 } else {
368 if (is_astep)
369 tail = md_mic_dma_chan_read_tail(dma_dev, chan);
370 else
371 tail = HW_CMP_CNT_MASK & md_mic_dma_read_mmio(dma_dev, chan->ch_num, REG_DSTAT);
372 }
373 chan->cached_tail = tail;
374 num_retries++;
375 if (num_retries == max_num_retries)
376 return 0;
377 cpu_relax();
378 goto retry;
379}
380
381bool md_mic_dma_chan_intr_pending(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan);
382phys_addr_t md_mic_dma_chan_get_desc_ring_phys(struct mic_dma_device *dma_dev,
383 struct md_mic_dma_chan *chan);
384phys_addr_t md_mic_dma_chan_get_dstatwb_phys(struct mic_dma_device *dma_dev,
385 struct md_mic_dma_chan *chan);
386inline uint32_t md_mic_dma_read_mmio(struct mic_dma_device *dma_dev,
387 int chan, enum md_mic_dma_chan_reg reg);
388
389/* Descriptor programming helpers */
390void md_mic_dma_prep_nop_desc(union md_mic_dma_desc *desc);
391
392/**
393 * md_mic_dma_memcpy_desc - Prepares a memory copy descriptor
394 * @src_phys: Source Physical Address must be cache line aligned
395 * @dst_phys: Destination physical address must be cache line aligned
396 * @size: Size of the transfer should not be 0 and must be a multiple
397 * of cache line size
398 */
399static __always_inline void
400md_mic_dma_memcpy_desc(union md_mic_dma_desc *desc,
401 uint64_t src_phys,
402 uint64_t dst_phys,
403 uint64_t size)
404{
405 KASSERT((desc != 0), ("NULL desc"));
406 KASSERT((ALIGN(src_phys - (L1_CACHE_BYTES - 1), L1_CACHE_BYTES) == src_phys),
407 "src not cache line aligned 0x%llx\n", (unsigned long long)src_phys);
408 KASSERT((ALIGN(dst_phys - (L1_CACHE_BYTES - 1), L1_CACHE_BYTES) == dst_phys),
409 "dst not cache line aligned 0x%llx\n", (unsigned long long)dst_phys);
410 KASSERT(((size != 0) && (size <= MIC_MAX_DMA_XFER_SIZE) &&
411 (ALIGN(size - (L1_CACHE_BYTES - 1), L1_CACHE_BYTES) == size)),
412 "size > MAX_DMA_XFER_SIZE size 0x%llx", (unsigned long long)size);
413
414 desc->qwords.qw0 = 0;
415 desc->qwords.qw1 = 0;
416 desc->desc.memcopy.type = 1;
417 desc->desc.memcopy.sap = src_phys;
418 desc->desc.memcopy.dap = dst_phys;
419 desc->desc.memcopy.length = (size >> L1_CACHE_SHIFT);
420}
421
422/**
423 * md_mic_dma_prep_status_desc - Prepares a status descriptor
424 * @data - Value to be updated by the DMA engine @ dst_phys
425 * @dst_phys: Destination physical address
426 * @generate_intr: Interrupt must be generated when the DMA HW
427 * completes processing this descriptor
428 */
429static __always_inline void
430md_mic_dma_prep_status_desc(union md_mic_dma_desc *desc, uint64_t data,
431 uint64_t dst_phys, bool generate_intr)
432{
433 KASSERT((desc != 0), ("NULL desc"));
434
435 desc->qwords.qw0 = 0;
436 desc->qwords.qw1 = 0;
437 desc->desc.memcopy.type = 2;
438 desc->desc.status.data = data;
439 desc->desc.status.dap = dst_phys;
440 if (generate_intr)
441 desc->desc.status.intr = 1;
442}
443
444/**
445 * md_mic_dma_prep_gp_desc - Prepares a general purpose descriptor
446 * @data - Value to be updated by the DMA engine @ dst_phys
447 * @dst_phys: Destination physical address
448 */
449static __always_inline void
450md_mic_dma_prep_gp_desc(union md_mic_dma_desc *desc, uint32_t data, uint64_t dst_phys)
451{
452 KASSERT((desc != 0), ("NULL desc"));
453
454 desc->qwords.qw0 = 0;
455 desc->qwords.qw1 = 0;
456 desc->desc.general.type = 3;
457 desc->desc.general.data = data;
458 desc->desc.general.dap = dst_phys;
459}
460/* Debug functions */
461void md_mic_dma_print_debug(struct mic_dma_device *dma_dev, struct md_mic_dma_chan *chan);
462#endif