本文整理汇总了C++中sg_is_last函数的典型用法代码示例。如果您正苦于以下问题:C++ sg_is_last函数的具体用法?C++ sg_is_last怎么用?C++ sg_is_last使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sg_is_last函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: cc_map_sg
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
bool is_chained = false;
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) {
dev_err(dev, "dma_map_sg() single buffer failed\n");
return -ENOMEM;
}
dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
sg->offset, sg->length);
*lbytes = nbytes;
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
&is_chained);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
if (!is_chained) {
/* In case of mmu the number of mapped nents might
* be changed from the original sgl nents
*/
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
if (*mapped_nents == 0) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
} else {
/*In this case the driver maps entry by entry so it
* must have the same nents before and after map
*/
*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
direction);
if (*mapped_nents != *nents) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
}
}
return 0;
}
示例2: BUG_ON
/**
* sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
*
* Description:
* Usually the next entry will be @[email protected] + 1, but if this sg element is part
* of a chained scatterlist, it could jump to the start of a new
* scatterlist array.
*
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
if (sg_is_last(sg))
return NULL;
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
return sg;
}
示例3: s5p_aes_rx
static void s5p_aes_rx(struct s5p_aes_dev *dev)
{
int err;
s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) {
err = s5p_set_indata(dev, sg_next(dev->sg_src));
if (err) {
s5p_aes_complete(dev, err);
return;
}
s5p_set_dma_indata(dev, dev->sg_src);
}
}
示例4: s5p_aes_tx
static void s5p_aes_tx(struct s5p_aes_dev *dev)
{
int err = 0;
s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) {
err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
if (err) {
s5p_aes_complete(dev, err);
return;
}
s5p_set_dma_outdata(dev, dev->sg_dst);
} else
s5p_aes_complete(dev, err);
}
示例5: for_each_sg
/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
* @nents: Number of entries in the scatterlist
*
* Description:
* Should only be used casually, it (currently) scans the entire list
* to get the last entry.
*
* Note that the @[email protected] pointer passed in need not be the first one,
* the important bit is that @[email protected] denotes the number of entries that
* exist from @[email protected]
*
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
#ifndef ARCH_HAS_SG_CHAIN
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
unsigned int i;
for_each_sg(sgl, sg, nents, i)
ret = sg;
#endif
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
#endif
return ret;
}
示例6: mv_cesa_req_dma_iter_next_transfer
bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter,
struct mv_cesa_sg_dma_iter *sgiter,
unsigned int len)
{
if (!sgiter->sg)
return false;
sgiter->op_offset += len;
sgiter->offset += len;
if (sgiter->offset == sg_dma_len(sgiter->sg)) {
if (sg_is_last(sgiter->sg))
return false;
sgiter->offset = 0;
sgiter->sg = sg_next(sgiter->sg);
}
if (sgiter->op_offset == iter->op_len)
return false;
return true;
}
示例7: dx_map_sg
static int dx_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction,
uint32_t *nents, uint32_t max_sg_nents,
int *lbytes)
{
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if ( unlikely( dma_map_sg(dev, sg, 1, direction) != 1 ) ) {
DX_LOG_ERR("dma_map_sg() single buffer failed %s\n ",
get_dir_type(direction));
return -ENOMEM;
}
DX_LOG_DEBUG("Mapped sg: dma_address=0x%08lX "
"page_link=0x%08lX addr=0x%08lX offset=%u "
"length=%u\n",
(unsigned long)sg_dma_address(sg),
sg->page_link,
(unsigned long)sg_virt(sg),
sg->offset, sg->length);
*lbytes = nbytes;
*nents = 1;
} else { /*sg_is_last*/
*nents = sg_count_ents(sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
DX_LOG_ERR("Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
/* TODO - verify num of entries */
if ( unlikely( dma_map_sg(dev, sg, *nents, direction)
!= *nents ) ) {
DX_LOG_ERR("dma_map_sg() sg buffer failed - %s\n",
get_dir_type(direction));
return -ENOMEM;
}
}
return 0;
}
示例8: rk_ahash_crypto_rx
static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
{
int err = 0;
dev->unload_data(dev);
if (dev->left_bytes) {
if (dev->aligned) {
if (sg_is_last(dev->sg_src)) {
dev_warn(dev->dev, "[%s:%d], Lack of data\n",
__func__, __LINE__);
err = -ENOMEM;
goto out_rx;
}
dev->sg_src = sg_next(dev->sg_src);
}
err = rk_ahash_set_data_start(dev);
} else {
dev->complete(dev, 0);
}
out_rx:
return err;
}
示例9: example_init
static int __init example_init(void)
{
int i;
unsigned int ret;
unsigned int nents;
struct scatterlist sg[10];
printk(KERN_INFO "DMA fifo test start\n");
if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
printk(KERN_WARNING "error kfifo_alloc\n");
return -ENOMEM;
}
printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));
kfifo_in(&fifo, "test", 4);
for (i = 0; i != 9; i++)
kfifo_put(&fifo, i);
/* kick away first byte */
kfifo_skip(&fifo);
printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));
/*
* Configure the kfifo buffer to receive data from DMA input.
*
* .--------------------------------------.
* | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
* |---|------------------|---------------|
* \_/ \________________/ \_____________/
* \ \ \
* \ \_allocated data \
* \_*free space* \_*free space*
*
* We need two different SG entries: one for the free space area at the
* end of the kfifo buffer (19 bytes) and another for the first free
* byte at the beginning, after the kfifo_skip().
*/
sg_init_table(sg, ARRAY_SIZE(sg));
nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
printk(KERN_INFO "DMA sgl entries: %d\n", nents);
if (!nents) {
/* fifo is full and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
return -EIO;
}
/* receive data */
printk(KERN_INFO "scatterlist for receive:\n");
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
i, sg[i].page_link, sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
}
/* put here your code to setup and exectute the dma operation */
/* ... */
/* example: zero bytes received */
ret = 0;
/* finish the dma operation and update the received data */
kfifo_dma_in_finish(&fifo, ret);
/* Prepare to transmit data, example: 8 bytes */
nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
printk(KERN_INFO "DMA sgl entries: %d\n", nents);
if (!nents) {
/* no data was available and no sgl was created */
printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
return -EIO;
}
printk(KERN_INFO "scatterlist for transmit:\n");
for (i = 0; i < nents; i++) {
printk(KERN_INFO
"sg[%d] -> "
"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
i, sg[i].page_link, sg[i].offset, sg[i].length);
if (sg_is_last(&sg[i]))
break;
}
/* put here your code to setup and exectute the dma operation */
/* ... */
/* example: 5 bytes transmitted */
ret = 5;
/* finish the dma operation and update the transmitted data */
kfifo_dma_out_finish(&fifo, ret);
//.........这里部分代码省略.........
示例10: prep_for_next_xfer
/*
* preps Ep pointers & data counters for next packet
* (fragment of the request) xfer returns true if
* there is a next transfer, and false if all bytes in
* current request have been xfered
*/
static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
{
if (!end_points[ep].sg_list_enabled) {
/*
* no further transfers for non storage EPs
* (like EP2 during firmware download, done
* in 64 byte chunks)
*/
if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
__func__, end_points[ep].req_length, ep);
/*
* no more transfers, we are done with the request
*/
return false;
}
/*
* calculate size of the next DMA xfer, corner
* case for non-storage EPs where transfer size
* is not egual N * HAL_DMA_PKT_SZ xfers
*/
if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
>= HAL_DMA_PKT_SZ) {
end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
} else {
/*
* that would be the last chunk less
* than P-port max size
*/
end_points[ep].dma_xfer_sz = end_points[ep].req_length -
end_points[ep].req_xfer_cnt;
}
return true;
}
/*
* for SG_list assisted dma xfers
* are we done with current SG ?
*/
if (end_points[ep].seg_xfer_cnt == end_points[ep].sg_p->length) {
/*
* was it the Last SG segment on the list ?
*/
if (sg_is_last(end_points[ep].sg_p)) {
DBGPRN("<1> %s: EP:%d completed,"
"%d bytes xfered\n",
__func__,
ep,
end_points[ep].req_xfer_cnt
);
return false;
} else {
/*
* There are more SG segments in current
* request's sg list setup new segment
*/
end_points[ep].seg_xfer_cnt = 0;
end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
/* set data pointer for next DMA sg transfer*/
end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
DBGPRN("<1> %s new SG:_va:%p\n\n",
__func__, end_points[ep].data_p);
}
}
/*
* for sg list xfers it will always be 512 or 1024
*/
end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
/*
* next transfer is required
*/
return true;
}
示例11: xts_encrypt
//.........这里部分代码省略.........
}
/* This is the Integrity Check Value (aka the authentication tag length and can
* be 8, 12 or 16 bytes long. */
static int rfc4106_set_authsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm);
int ret;
ret = crypto_aead_setauthsize(child, authsize);
if (!ret)
crypto_aead_crt(parent)->authsize = authsize;
return ret;
}
static int __driver_rfc4106_encrypt(struct aead_request *req)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
u32 key_len = ctx->aes_key_expanded.key_length;
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv_tab[16+AESNI_ALIGN];
u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
struct scatter_walk src_sg_walk;
struct scatter_walk assoc_sg_walk;
struct scatter_walk dst_sg_walk;
unsigned int i;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length equal */
/* to 8 or 12 bytes */
if (unlikely(req->assoclen != 8 && req->assoclen != 12))
return -EINVAL;
if (unlikely(auth_tag_len != 8 && auth_tag_len != 12 && auth_tag_len != 16))
return -EINVAL;
if (unlikely(key_len != AES_KEYSIZE_128 &&
key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256))
return -EINVAL;
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
scatterwalk_start(&assoc_sg_walk, req->assoc);
src = scatterwalk_map(&src_sg_walk);
assoc = scatterwalk_map(&assoc_sg_walk);
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk);
}
} else {
/* Allocate memory for src, dst, assoc */
src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
GFP_ATOMIC);
if (unlikely(!src))
return -ENOMEM;
assoc = (src + req->cryptlen + auth_tag_len);
scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
scatterwalk_map_and_copy(assoc, req->assoc, 0,
req->assoclen, 0);
dst = src;
}
aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+ ((unsigned long)req->cryptlen), auth_tag_len);
/* The authTag (aka the Integrity Check Value) needs to be written
* back to the packet. */
if (one_entry_in_sg) {
if (unlikely(req->src != req->dst)) {
scatterwalk_unmap(dst);
scatterwalk_done(&dst_sg_walk, 0, 0);
}
scatterwalk_unmap(src);
scatterwalk_unmap(assoc);
scatterwalk_done(&src_sg_walk, 0, 0);
scatterwalk_done(&assoc_sg_walk, 0, 0);
} else {
scatterwalk_map_and_copy(dst, req->dst, 0,
req->cryptlen + auth_tag_len, 1);
kfree(src);
}
return 0;
}
示例12: xts_encrypt
//.........这里部分代码省略.........
struct cryptd_aead **ctx = crypto_aead_ctx(parent);
struct cryptd_aead *cryptd_tfm = *ctx;
return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
}
static int helper_rfc4106_encrypt(struct aead_request *req)
{
u8 one_entry_in_sg = 0;
u8 *src, *dst, *assoc;
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
struct scatter_walk src_sg_walk;
struct scatter_walk dst_sg_walk = {};
unsigned int i;
/* Assuming we are supporting rfc4106 64-bit extended */
/* sequence numbers We need to have the AAD length equal */
/* to 16 or 20 bytes */
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
return -EINVAL;
/* IV below built */
for (i = 0; i < 4; i++)
*(iv+i) = ctx->nonce[i];
for (i = 0; i < 8; i++)
*(iv+4+i) = req->iv[i];
*((__be32 *)(iv+12)) = counter;
if (sg_is_last(req->src) &&
req->src->offset + req->src->length <= PAGE_SIZE &&
sg_is_last(req->dst) &&
req->dst->offset + req->dst->length <= PAGE_SIZE) {
one_entry_in_sg = 1;
scatterwalk_start(&src_sg_walk, req->src);
assoc = scatterwalk_map(&src_sg_walk);
src = assoc + req->assoclen;
dst = src;
if (unlikely(req->src != req->dst)) {
scatterwalk_start(&dst_sg_walk, req->dst);
dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
}
} else {
/* Allocate memory for src, dst, assoc */
assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
GFP_ATOMIC);
if (unlikely(!assoc))
return -ENOMEM;
scatterwalk_map_and_copy(assoc, req->src, 0,
req->assoclen + req->cryptlen, 0);
src = assoc + req->assoclen;
dst = src;
}
kernel_fpu_begin();
aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
ctx->hash_subkey, assoc, req->assoclen - 8,
dst + req->cryptlen, auth_tag_len);
kernel_fpu_end();
/* The authTag (aka the Integrity Check Value) needs to be written
* back to the packet. */
示例13: mtk_aes_process_sg
int mtk_aes_process_sg(struct scatterlist* sg_src,
struct scatterlist* sg_dst,
struct mcrypto_ctx *ctx,
unsigned int nbytes,
unsigned int mode)
{
struct scatterlist *next_dst, *next_src;
struct AES_txdesc* txdesc;
struct AES_rxdesc* rxdesc;
u32 aes_txd_info4;
u32 aes_size_total, aes_size_chunk, aes_free_desc;
u32 aes_tx_scatter = 0;
u32 aes_rx_gather = 0;
u32 i = 1, j = 1;
unsigned long flags = 0;
next_src = sg_src;
next_dst = sg_dst;
while (sg_dma_len(next_src) == 0) {
if (sg_is_last(next_src))
return -EINVAL;
next_src = sg_next(next_src);
}
while (sg_dma_len(next_dst) == 0) {
if (sg_is_last(next_dst))
return -EINVAL;
next_dst = sg_next(next_dst);
}
if (ctx->keylen == AES_KEYSIZE_256)
aes_txd_info4 = TX4_DMA_AES_256;
else if (ctx->keylen == AES_KEYSIZE_192)
aes_txd_info4 = TX4_DMA_AES_192;
else
aes_txd_info4 = TX4_DMA_AES_128;
if (mode & MCRYPTO_MODE_ENC)
aes_txd_info4 |= TX4_DMA_ENC;
if (mode & MCRYPTO_MODE_CBC)
aes_txd_info4 |= TX4_DMA_CBC | TX4_DMA_IVR;
spin_lock_irqsave(&AES_Entry.page_lock, flags);
DBGPRINT(DBG_HIGH, "\nStart new scater, TX [front=%u rear=%u]; RX [front=%u rear=%u]\n",
AES_Entry.aes_tx_front_idx, AES_Entry.aes_tx_rear_idx,
AES_Entry.aes_rx_front_idx, AES_Entry.aes_rx_rear_idx);
aes_size_total = nbytes;
if (AES_Entry.aes_tx_front_idx > AES_Entry.aes_tx_rear_idx)
aes_free_desc = NUM_AES_TX_DESC - (AES_Entry.aes_tx_front_idx - AES_Entry.aes_tx_rear_idx);
else
aes_free_desc = AES_Entry.aes_tx_rear_idx - AES_Entry.aes_tx_front_idx;
/* TX descriptor */
while (1) {
if (i > aes_free_desc) {
spin_unlock_irqrestore(&AES_Entry.page_lock, flags);
return -EAGAIN;
}
aes_tx_scatter = (AES_Entry.aes_tx_rear_idx + i) % NUM_AES_TX_DESC;
txdesc = &AES_Entry.AES_tx_ring0[aes_tx_scatter];
if (sg_dma_len(next_src) == 0)
goto next_desc_tx;
aes_size_chunk = min(aes_size_total, sg_dma_len(next_src));
DBGPRINT(DBG_HIGH, "AES set TX Desc[%u] Src=%08X, len=%d, Key=%08X, klen=%d\n",
aes_tx_scatter, (u32)sg_virt(next_src), aes_size_chunk, (u32)ctx->key, ctx->keylen);
if ((mode & MCRYPTO_MODE_CBC) && (i == 1)) {
if (!ctx->iv)
memset((void*)txdesc->IV, 0xFF, sizeof(uint32_t)*4);
else
memcpy((void*)txdesc->IV, ctx->iv, sizeof(uint32_t)*4);
txdesc->txd_info4 = aes_txd_info4 | TX4_DMA_KIU;
} else {
txdesc->txd_info4 = aes_txd_info4;
}
if (i == 1) {
txdesc->SDP0 = (u32)dma_map_single(NULL, ctx->key, ctx->keylen, DMA_TO_DEVICE);
txdesc->txd_info2 = TX2_DMA_SDL0_SET(ctx->keylen);
} else {
txdesc->txd_info2 = 0;
}
txdesc->SDP1 = (u32)dma_map_single(NULL, sg_virt(next_src), aes_size_chunk, DMA_TO_DEVICE);
txdesc->txd_info2 |= TX2_DMA_SDL1_SET(aes_size_chunk);
i++;
aes_size_total -= aes_size_chunk;
next_desc_tx:
if (!aes_size_total || sg_is_last(next_src)) {
txdesc->txd_info2 |= TX2_DMA_LS1;
//.........这里部分代码省略.........
示例14: bcm2835_spi_transfer_prologue
/**
* bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
* @master: SPI master
* @tfr: SPI transfer
* @bs: BCM2835 SPI controller
* @cs: CS register
*
* A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
* Only the final write access is permitted to transmit less than 4 bytes, the
* SPI controller deduces its intended size from the DLEN register.
*
* If a TX or RX sglist contains multiple entries, one per page, and the first
* entry starts in the middle of a page, that first entry's length may not be
* a multiple of 4. Subsequent entries are fine because they span an entire
* page, hence do have a length that's a multiple of 4.
*
* This cannot happen with kmalloc'ed buffers (which is what most clients use)
* because they are contiguous in physical memory and therefore not split on
* page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
* buffers.
*
* The DMA engine is incapable of combining sglist entries into a continuous
* stream of 4 byte chunks, it treats every entry separately: A TX entry is
* rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
* entry is rounded up by throwing away received bytes.
*
* Overcome this limitation by transferring the first few bytes without DMA:
* E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
* write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
* The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
* the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
*
* Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
* write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
* Caution, the additional 4 bytes spill over to the second TX sglist entry
* if the length of the first is *exactly* 1.
*
* At most 6 bytes are written and at most 3 bytes read. Do we know the
* transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
*
* The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
* by the DMA engine. Toggling the DMA Enable flag in the CS register switches
* the width but also garbles the FIFO's contents. The prologue must therefore
* be transmitted in 32-bit width to ensure that the following DMA transfer can
* pick up the residue in the RX FIFO in ungarbled form.
*/
static void bcm2835_spi_transfer_prologue(struct spi_master *master,
struct spi_transfer *tfr,
struct bcm2835_spi *bs,
u32 cs)
{
int tx_remaining;
bs->tfr = tfr;
bs->tx_prologue = 0;
bs->rx_prologue = 0;
bs->tx_spillover = false;
if (!sg_is_last(&tfr->tx_sg.sgl[0]))
bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
if (bs->rx_prologue > bs->tx_prologue) {
if (sg_is_last(&tfr->tx_sg.sgl[0])) {
bs->tx_prologue = bs->rx_prologue;
} else {
bs->tx_prologue += 4;
bs->tx_spillover =
!(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
}
}
}
/* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
if (!bs->tx_prologue)
return;
/* Write and read RX prologue. Adjust first entry in RX sglist. */
if (bs->rx_prologue) {
bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
| BCM2835_SPI_CS_DMAEN);
bcm2835_wr_fifo_count(bs, bs->rx_prologue);
bcm2835_wait_tx_fifo_empty(bs);
bcm2835_rd_fifo_count(bs, bs->rx_prologue);
bcm2835_spi_reset_hw(master);
dma_sync_single_for_device(master->dma_rx->device->dev,
sg_dma_address(&tfr->rx_sg.sgl[0]),
bs->rx_prologue, DMA_FROM_DEVICE);
sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
}
/*
* Write remaining TX prologue. Adjust first entry in TX sglist.
* Also adjust second entry if prologue spills over to it.
//.........这里部分代码省略.........
示例15: map_ablkcipher_request
int map_ablkcipher_request(struct device *dev, struct ablkcipher_request *req)
{
struct ablkcipher_req_ctx *areq_ctx = ablkcipher_request_ctx(req);
unsigned int iv_size = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
struct sg_data_array sg_data;
struct buff_mgr_handle *buff_mgr = crypto_drvdata->buff_mgr_handle;
int dummy = 0;
int rc = 0;
areq_ctx->sec_dir = 0;
areq_ctx->dma_buf_type = DX_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL;
sg_data.num_of_sg = 0;
/* Map IV buffer */
if (likely(iv_size != 0) ) {
dump_byte_array("iv", (uint8_t *)req->info, iv_size);
areq_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)req->info,
iv_size, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev,
areq_ctx->gen_ctx.iv_dma_addr))) {
DX_LOG_ERR("Mapping iv %u B at va=0x%08lX "
"for DMA failed\n",iv_size,
(unsigned long)req->info);
return -ENOMEM;
}
DX_LOG_DEBUG("Mapped iv %u B at va=0x%08lX to dma=0x%08lX\n",
iv_size, (unsigned long)req->info,
(unsigned long)areq_ctx->gen_ctx.iv_dma_addr);
} else {
areq_ctx->gen_ctx.iv_dma_addr = 0;
}
/* Map the src sg */
if ( sg_is_last(req->src) &&
(sg_page(req->src) == NULL) &&
sg_dma_address(req->src)) {
/* The source is secure no mapping is needed */
areq_ctx->sec_dir = DX_SRC_DMA_IS_SECURE;
areq_ctx->in_nents = 1;
} else {
if ( unlikely( dx_map_sg( dev,req->src, req->nbytes,
DMA_BIDIRECTIONAL,
&areq_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy))){
rc = -ENOMEM;
goto fail_unmap_iv;
}
if ( areq_ctx->in_nents > 1 ) {
areq_ctx->dma_buf_type = DX_DMA_BUF_MLLI;
}
}
if ( unlikely(req->src == req->dst)) {
if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) {
DX_LOG_ERR("Secure key inplace operation "
"is not supported \n");
/* both sides are secure */
rc = -ENOMEM;
goto fail_unmap_din;
}
/* Handle inplace operation */
if ( unlikely(areq_ctx->dma_buf_type == DX_DMA_BUF_MLLI) ) {
areq_ctx->out_nents = 0;
buffer_mgr_set_sg_entry(&sg_data,
areq_ctx->in_nents,
req->src,
req->nbytes,
true);
}
} else {
if ( sg_is_last(req->dst) &&
(sg_page(req->dst) == NULL) &&
sg_dma_address(req->dst)) {
if ( areq_ctx->sec_dir == DX_SRC_DMA_IS_SECURE ) {
DX_LOG_ERR("Secure key in both sides is"
"not supported \n");
/* both sides are secure */
rc = -ENOMEM;
goto fail_unmap_din;
}
/* The dest is secure no mapping is needed */
areq_ctx->sec_dir = DX_DST_DMA_IS_SECURE;
areq_ctx->out_nents = 1;
} else {
/* Map the dst sg */
if ( unlikely( dx_map_sg(dev,req->dst, req->nbytes,
DMA_BIDIRECTIONAL,
&areq_ctx->out_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy))){
rc = -ENOMEM;
goto fail_unmap_din;
}
if ( areq_ctx->out_nents > 1 ) {
//.........这里部分代码省略.........