* Copyright 2010-2017 Intel Corporation.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2,
* as published by the Free Software Foundation.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* Disclaimer: The codes contained in these modules may be specific to
* the Intel Software Development Platform codenamed Knights Ferry,
* and the Intel product codenamed Knights Corner, and are not backward
* compatible with other Intel products. Additionally, Intel will NOT
* support the codes or instruction set in future products.
* Intel offers no warranty of any kind regarding the code. This code is
* licensed on an "AS IS" basis and Intel is not obligated to provide
* any support, assistance, installation, training, or other services
* of any kind. Intel is also not obligated to provide any updates,
* enhancements or extensions. Intel specifically disclaims any warranty
* of merchantability, non-infringement, fitness for any particular
* purpose, and any other warranty.
* Further, Intel disclaims all liability of any kind, including but
* not limited to liability for infringement of any proprietary rights,
* relating to the use of the code, even if Intel is notified of the
* possibility of such liability. Except as expressly stated in an Intel
* license agreement provided with this code and agreed upon with Intel,
* no license, express or implied, by estoppel or otherwise, to any
* intellectual property rights is granted herein.
#include "micsboxdefine.h"
#define MAX_NUM_DMA_CHAN 8
* WE ASSUME 0 to __LAST_HOST_CHAN_NUM are owned by host
* Keep this in mind when changing this value
#define __LAST_HOST_CHAN_NUM 3
static inline int first_dma_chan(void)
return __LAST_HOST_CHAN_NUM
+ 1;
static inline int last_dma_chan(void)
return MAX_NUM_DMA_CHAN
- 1;
static inline int first_dma_chan(void)
static inline int last_dma_chan(void)
return __LAST_HOST_CHAN_NUM
;
enum md_mic_dma_chan_reg
{
/* Pre-defined L1_CACHE_SHIFT is 6 on RH and 7 on Suse */
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/* Maximum DMA transfer size for a single memory copy descriptor */
#define MIC_MAX_DMA_XFER_SIZE (((1U) * 1024 * 1024) - L1_CACHE_BYTES)
* I think it should be 128K - 64 (even 128k - 4 may work).
* SIVA: Check this in the end
* The maximum number of descriptors in the DMA descriptor queue is
* 128K - 1 but since it needs to be a multiple of cache lines it is 128K - 64
#define MIC_MAX_NUM_DESC_PER_RING ((128 * 1024) - L1_CACHE_BYTES)
* enum md_mic_dma_chan_owner - Memory copy DMA channels can be Host or MIC owned.
* AES channel can only be MIC owned.
enum md_mic_dma_chan_owner
{
MIC_DMA_CHAN_MIC_OWNED
= 0,
* enum md_mic_dma_aes_endianness - Endianness needs to be provided
* only for the AES channel
enum md_mic_dma_aes_endianness
{
* The following two bits are opposite of what is given in
* content protection HAS but this is how it is implemented in RTL.
* struct md_mic_dma_chan - Opaque data structure for DMA channel specific fields.
* struct md_mic_dma_chan: DMA channel specific structure
* @in_use - true if the channel is in use and false otherwise
* @owner - host or MIC required for masking/unmasking
* interrupts and enabling channels
* @endianness - required for enabling AES channel
* @cookie - Debug cookie to identify this structure
* @num_desc_in_ring - Number of descriptors in the descriptor
enum md_mic_dma_chan_owner owner
;
enum md_mic_dma_aes_endianness endianness
;
uint32_t num_desc_in_ring
;
uint32_t completion_count
;
dma_addr_t dstat_wb_phys
;
/* Add debug/profiling stats here */
* struct mic_dma_device - MIC DMA Device specific structure
* @chan_info - static array of MIC DMA channel specific structures
* @lock - MTX_DEF lock to synchronize allocation/deallocation of DMA channels
struct md_mic_dma_chan chan_info
[MAX_NUM_DMA_CHAN
];
* union md_mic_dma_desc - Opaque data structure for DMA descriptor format.
/* TODO: Change bitfields to portable masks */
/* Initialization functions */
void md_mic_dma_init(struct mic_dma_device
*dma_dev
, uint8_t *mmio_va_base
);
void md_mic_dma_uninit(struct mic_dma_device
*dma_dev
);
void md_mic_dma_chan_init_attr(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
);
void md_mic_dma_chan_mask_intr(struct mic_dma_device
*dma_dev
, struct md_mic_dma_chan
*chan
);
void md_mic_dma_chan_unmask_intr(struct mic_dma_device
*dma_dev
, struct md_mic_dma_chan
*chan
);
void md_mic_dma_chan_set_desc_ring(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
,
phys_addr_t desc_ring_phys_addr
,
void md_mic_dma_enable_chan(struct mic_dma_device
*dma_dev
, uint32_t chan_num
, bool enable
);
struct md_mic_dma_chan
*md_mic_dma_request_chan(struct mic_dma_device
*dma_dev
,
enum md_mic_dma_chan_owner owner
);
void md_mic_dma_free_chan(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
);
static uint32_t mic_dma_reg
[8][13] = {
{SBOX_DCAR_0
, SBOX_DHPR_0
, SBOX_DTPR_0
, SBOX_DAUX_HI_0
, SBOX_DAUX_LO_0
, SBOX_DRAR_HI_0
,
SBOX_DRAR_LO_0
, SBOX_DITR_0
, SBOX_DSTAT_0
,
SBOX_DSTATWB_LO_0
, SBOX_DSTATWB_HI_0
, SBOX_DCHERR_0
, SBOX_DCHERRMSK_0
},
{SBOX_DCAR_1
, SBOX_DHPR_1
, SBOX_DTPR_1
, SBOX_DAUX_HI_1
, SBOX_DAUX_LO_1
, SBOX_DRAR_HI_1
,
SBOX_DRAR_LO_1
, SBOX_DITR_1
, SBOX_DSTAT_1
,
SBOX_DSTATWB_LO_1
, SBOX_DSTATWB_HI_1
, SBOX_DCHERR_1
, SBOX_DCHERRMSK_1
},
{SBOX_DCAR_2
, SBOX_DHPR_2
, SBOX_DTPR_2
, SBOX_DAUX_HI_2
, SBOX_DAUX_LO_2
, SBOX_DRAR_HI_2
,
SBOX_DRAR_LO_2
, SBOX_DITR_2
, SBOX_DSTAT_2
,
SBOX_DSTATWB_LO_2
, SBOX_DSTATWB_HI_2
, SBOX_DCHERR_2
, SBOX_DCHERRMSK_2
},
{SBOX_DCAR_3
, SBOX_DHPR_3
, SBOX_DTPR_3
, SBOX_DAUX_HI_3
, SBOX_DAUX_LO_3
, SBOX_DRAR_HI_3
,
SBOX_DRAR_LO_3
, SBOX_DITR_3
, SBOX_DSTAT_3
,
SBOX_DSTATWB_LO_3
, SBOX_DSTATWB_HI_3
, SBOX_DCHERR_3
, SBOX_DCHERRMSK_3
},
{SBOX_DCAR_4
, SBOX_DHPR_4
, SBOX_DTPR_4
, SBOX_DAUX_HI_4
, SBOX_DAUX_LO_4
, SBOX_DRAR_HI_4
,
SBOX_DRAR_LO_4
, SBOX_DITR_4
, SBOX_DSTAT_4
,
SBOX_DSTATWB_LO_4
, SBOX_DSTATWB_HI_4
, SBOX_DCHERR_4
, SBOX_DCHERRMSK_4
},
{SBOX_DCAR_5
, SBOX_DHPR_5
, SBOX_DTPR_5
, SBOX_DAUX_HI_5
, SBOX_DAUX_LO_5
, SBOX_DRAR_HI_5
,
SBOX_DRAR_LO_5
, SBOX_DITR_5
, SBOX_DSTAT_5
,
SBOX_DSTATWB_LO_5
, SBOX_DSTATWB_HI_5
, SBOX_DCHERR_5
, SBOX_DCHERRMSK_5
},
{SBOX_DCAR_6
, SBOX_DHPR_6
, SBOX_DTPR_6
, SBOX_DAUX_HI_6
, SBOX_DAUX_LO_6
, SBOX_DRAR_HI_6
,
SBOX_DRAR_LO_6
, SBOX_DITR_6
, SBOX_DSTAT_6
,
SBOX_DSTATWB_LO_6
, SBOX_DSTATWB_HI_6
, SBOX_DCHERR_6
, SBOX_DCHERRMSK_6
},
{SBOX_DCAR_7
, SBOX_DHPR_7
, SBOX_DTPR_7
, SBOX_DAUX_HI_7
, SBOX_DAUX_LO_7
, SBOX_DRAR_HI_7
,
SBOX_DRAR_LO_7
, SBOX_DITR_7
, SBOX_DSTAT_7
,
SBOX_DSTATWB_LO_7
, SBOX_DSTATWB_HI_7
, SBOX_DCHERR_7
, SBOX_DCHERRMSK_7
}
static __always_inline
uint32_t
md_mic_dma_read_mmio(struct mic_dma_device
*dma_dev
,
int chan
, enum md_mic_dma_chan_reg reg
)
return mic_sbox_read_mmio(dma_dev
->mm_sbox
, mic_dma_reg
[chan
][reg
]);
static __always_inline
void
md_mic_dma_write_mmio(struct mic_dma_device
*dma_dev
, int chan
,
enum md_mic_dma_chan_reg reg
, uint32_t value
)
mic_sbox_write_mmio(dma_dev
->mm_sbox
, mic_dma_reg
[chan
][reg
], value
);
#define KASSERT(x, y, ...) \
printk(y, ##__VA_ARGS__);\
#define CHECK_CHAN(chan) \
KASSERT((chan), "NULL DMA channel\n"); \
KASSERT((DMA_CHAN_COOKIE == chan->cookie), \
"Bad DMA channel cookie 0x%x\n", chan->cookie); \
KASSERT(atomic_read(&(chan->in_use)), "DMA Channel not in use\n"); \
#define KASSERT(x, y, ...) \
printk(y, ##__VA_ARGS__);\
void md_mic_dma_chan_set_dstat_wb(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
);
void md_mic_dma_chan_set_dcherr_msk(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
, uint32_t mask
);
static __always_inline
void
md_mic_dma_chan_write_head(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
, uint32_t head
)
KASSERT((head
< chan
->num_desc_in_ring
),
"head 0x%x > num_desc_in_ring 0x%x chan_num %d\n",
head
, chan
->num_desc_in_ring
, chan_num
);
md_mic_dma_write_mmio(dma_dev
, chan_num
, REG_DHPR
, head
);
uint32_t md_mic_dma_chan_read_head(struct mic_dma_device
*dma_dev
, struct md_mic_dma_chan
*chan
);
uint32_t md_mic_dma_chan_read_tail(struct mic_dma_device
*dma_dev
, struct md_mic_dma_chan
*chan
);
#define TAIL_PTR_READ_RETRIES 500000
#define HW_CMP_CNT_MASK 0x1ffff
static __always_inline
uint32_t
md_avail_desc_ring_space(struct mic_dma_device
*dma_dev
, bool is_astep
,
struct md_mic_dma_chan
*chan
, uint32_t head
, uint32_t required
)
uint32_t count
= 0, max_num_retries
= TAIL_PTR_READ_RETRIES
, num_retries
= 0;
uint32_t tail
= chan
->cached_tail
;
count
= (tail
- 0) + (chan
->num_desc_in_ring
- head
);
return (chan
->num_desc_in_ring
- 1);
tail
= md_mic_dma_chan_read_tail(dma_dev
, chan
);
tail
= HW_CMP_CNT_MASK
& md_mic_dma_read_mmio(dma_dev
, chan
->ch_num
, REG_DSTAT
);
chan
->cached_tail
= tail
;
if (num_retries
== max_num_retries
)
bool md_mic_dma_chan_intr_pending(struct mic_dma_device
*dma_dev
, struct md_mic_dma_chan
*chan
);
phys_addr_t
md_mic_dma_chan_get_desc_ring_phys(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
);
phys_addr_t
md_mic_dma_chan_get_dstatwb_phys(struct mic_dma_device
*dma_dev
,
struct md_mic_dma_chan
*chan
);
inline uint32_t md_mic_dma_read_mmio(struct mic_dma_device
*dma_dev
,
int chan
, enum md_mic_dma_chan_reg reg
);
/* Descriptor programming helpers */
void md_mic_dma_prep_nop_desc(union md_mic_dma_desc
*desc
);
* md_mic_dma_memcpy_desc - Prepares a memory copy descriptor
* @src_phys: Source Physical Address must be cache line aligned
* @dst_phys: Destination physical address must be cache line aligned
* @size: Size of the transfer should not be 0 and must be a multiple
static __always_inline
void
md_mic_dma_memcpy_desc(union md_mic_dma_desc
*desc
,
KASSERT((desc
!= 0), ("NULL desc"));
KASSERT((ALIGN(src_phys
- (L1_CACHE_BYTES
- 1), L1_CACHE_BYTES
) == src_phys
),
"src not cache line aligned 0x%llx\n", (unsigned long long)src_phys
);
KASSERT((ALIGN(dst_phys
- (L1_CACHE_BYTES
- 1), L1_CACHE_BYTES
) == dst_phys
),
"dst not cache line aligned 0x%llx\n", (unsigned long long)dst_phys
);
KASSERT(((size
!= 0) && (size
<= MIC_MAX_DMA_XFER_SIZE
) &&
(ALIGN(size
- (L1_CACHE_BYTES
- 1), L1_CACHE_BYTES
) == size
)),
"size > MAX_DMA_XFER_SIZE size 0x%llx", (unsigned long long)size
);
desc
->desc
.memcopy
.type
= 1;
desc
->desc
.memcopy
.sap
= src_phys
;
desc
->desc
.memcopy
.dap
= dst_phys
;
desc
->desc
.memcopy
.length
= (size
>> L1_CACHE_SHIFT
);
* md_mic_dma_prep_status_desc - Prepares a status descriptor
* @data - Value to be updated by the DMA engine @ dst_phys
* @dst_phys: Destination physical address
* @generate_intr: Interrupt must be generated when the DMA HW
* completes processing this descriptor
static __always_inline
void
md_mic_dma_prep_status_desc(union md_mic_dma_desc
*desc
, uint64_t data
,
uint64_t dst_phys
, bool generate_intr
)
KASSERT((desc
!= 0), ("NULL desc"));
desc
->desc
.memcopy
.type
= 2;
desc
->desc
.status
.data
= data
;
desc
->desc
.status
.dap
= dst_phys
;
desc
->desc
.status
.intr
= 1;
* md_mic_dma_prep_gp_desc - Prepares a general purpose descriptor
* @data - Value to be updated by the DMA engine @ dst_phys
* @dst_phys: Destination physical address
static __always_inline
void
md_mic_dma_prep_gp_desc(union md_mic_dma_desc
*desc
, uint32_t data
, uint64_t dst_phys
)
KASSERT((desc
!= 0), ("NULL desc"));
desc
->desc
.general
.type
= 3;
desc
->desc
.general
.data
= data
;
desc
->desc
.general
.dap
= dst_phys
;
void md_mic_dma_print_debug(struct mic_dma_device
*dma_dev
, struct md_mic_dma_chan
*chan
);