mirror of https://gitee.com/openkylin/linux.git
crypto: sa2ul - Fix DMA mapping API usage
Make sure that we call the dma_unmap_sg on the correct scatterlist on completion with the correct sg_nents. Use sg_table to managed the DMA mapping and at the same time add the needed dma_sync calls for the sg_table. Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
4fc983266d
commit
00c9211f60
|
@ -142,34 +142,39 @@ struct sa_alg_tmpl {
|
||||||
bool registered;
|
bool registered;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct sa_mapped_sg: scatterlist information for tx and rx
|
||||||
|
* @mapped: Set to true if the @sgt is mapped
|
||||||
|
* @dir: mapping direction used for @sgt
|
||||||
|
* @split_sg: Set if the sg is split and needs to be freed up
|
||||||
|
* @static_sg: Static scatterlist entry for overriding data
|
||||||
|
* @sgt: scatterlist table for DMA API use
|
||||||
|
*/
|
||||||
|
struct sa_mapped_sg {
|
||||||
|
bool mapped;
|
||||||
|
enum dma_data_direction dir;
|
||||||
|
struct scatterlist static_sg;
|
||||||
|
struct scatterlist *split_sg;
|
||||||
|
struct sg_table sgt;
|
||||||
|
};
|
||||||
/**
|
/**
|
||||||
* struct sa_rx_data: RX Packet miscellaneous data place holder
|
* struct sa_rx_data: RX Packet miscellaneous data place holder
|
||||||
* @req: crypto request data pointer
|
* @req: crypto request data pointer
|
||||||
* @ddev: pointer to the DMA device
|
* @ddev: pointer to the DMA device
|
||||||
* @tx_in: dma_async_tx_descriptor pointer for rx channel
|
* @tx_in: dma_async_tx_descriptor pointer for rx channel
|
||||||
* @split_src_sg: Set if the src sg is split and needs to be freed up
|
* @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
|
||||||
* @split_dst_sg: Set if the dst sg is split and needs to be freed up
|
|
||||||
* @enc: Flag indicating either encryption or decryption
|
* @enc: Flag indicating either encryption or decryption
|
||||||
* @enc_iv_size: Initialisation vector size
|
* @enc_iv_size: Initialisation vector size
|
||||||
* @iv_idx: Initialisation vector index
|
* @iv_idx: Initialisation vector index
|
||||||
* @rx_sg: Static scatterlist entry for overriding RX data
|
|
||||||
* @tx_sg: Static scatterlist entry for overriding TX data
|
|
||||||
* @src: Source data pointer
|
|
||||||
* @dst: Destination data pointer
|
|
||||||
*/
|
*/
|
||||||
struct sa_rx_data {
|
struct sa_rx_data {
|
||||||
void *req;
|
void *req;
|
||||||
struct device *ddev;
|
struct device *ddev;
|
||||||
struct dma_async_tx_descriptor *tx_in;
|
struct dma_async_tx_descriptor *tx_in;
|
||||||
struct scatterlist *split_src_sg;
|
struct sa_mapped_sg mapped_sg[2];
|
||||||
struct scatterlist *split_dst_sg;
|
|
||||||
u8 enc;
|
u8 enc;
|
||||||
u8 enc_iv_size;
|
u8 enc_iv_size;
|
||||||
u8 iv_idx;
|
u8 iv_idx;
|
||||||
struct scatterlist rx_sg;
|
|
||||||
struct scatterlist tx_sg;
|
|
||||||
struct scatterlist *src;
|
|
||||||
struct scatterlist *dst;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -976,23 +981,46 @@ static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||||
return sa_cipher_setkey(tfm, key, keylen, &ad);
|
return sa_cipher_setkey(tfm, key, keylen, &ad);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sa_sync_from_device(struct sa_rx_data *rxd)
|
||||||
|
{
|
||||||
|
struct sg_table *sgt;
|
||||||
|
|
||||||
|
if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
|
||||||
|
sgt = &rxd->mapped_sg[0].sgt;
|
||||||
|
else
|
||||||
|
sgt = &rxd->mapped_sg[1].sgt;
|
||||||
|
|
||||||
|
dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
|
||||||
|
struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
|
||||||
|
|
||||||
|
if (mapped_sg->mapped) {
|
||||||
|
dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
|
||||||
|
mapped_sg->dir, 0);
|
||||||
|
kfree(mapped_sg->split_sg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
kfree(rxd);
|
||||||
|
}
|
||||||
|
|
||||||
static void sa_aes_dma_in_callback(void *data)
|
static void sa_aes_dma_in_callback(void *data)
|
||||||
{
|
{
|
||||||
struct sa_rx_data *rxd = (struct sa_rx_data *)data;
|
struct sa_rx_data *rxd = (struct sa_rx_data *)data;
|
||||||
struct skcipher_request *req;
|
struct skcipher_request *req;
|
||||||
int sglen;
|
|
||||||
u32 *result;
|
u32 *result;
|
||||||
__be32 *mdptr;
|
__be32 *mdptr;
|
||||||
size_t ml, pl;
|
size_t ml, pl;
|
||||||
int i;
|
int i;
|
||||||
enum dma_data_direction dir_src;
|
|
||||||
bool diff_dst;
|
|
||||||
|
|
||||||
|
sa_sync_from_device(rxd);
|
||||||
req = container_of(rxd->req, struct skcipher_request, base);
|
req = container_of(rxd->req, struct skcipher_request, base);
|
||||||
sglen = sg_nents_for_len(req->src, req->cryptlen);
|
|
||||||
|
|
||||||
diff_dst = (req->src != req->dst) ? true : false;
|
|
||||||
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
|
||||||
|
|
||||||
if (req->iv) {
|
if (req->iv) {
|
||||||
mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
|
mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
|
||||||
|
@ -1003,18 +1031,7 @@ static void sa_aes_dma_in_callback(void *data)
|
||||||
result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
|
result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
|
sa_free_sa_rx_data(rxd);
|
||||||
kfree(rxd->split_src_sg);
|
|
||||||
|
|
||||||
if (diff_dst) {
|
|
||||||
sglen = sg_nents_for_len(req->dst, req->cryptlen);
|
|
||||||
|
|
||||||
dma_unmap_sg(rxd->ddev, req->dst, sglen,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
kfree(rxd->split_dst_sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(rxd);
|
|
||||||
|
|
||||||
skcipher_request_complete(req, 0);
|
skcipher_request_complete(req, 0);
|
||||||
}
|
}
|
||||||
|
@ -1043,7 +1060,6 @@ static int sa_run(struct sa_req *req)
|
||||||
struct device *ddev;
|
struct device *ddev;
|
||||||
struct dma_chan *dma_rx;
|
struct dma_chan *dma_rx;
|
||||||
int sg_nents, src_nents, dst_nents;
|
int sg_nents, src_nents, dst_nents;
|
||||||
int mapped_src_nents, mapped_dst_nents;
|
|
||||||
struct scatterlist *src, *dst;
|
struct scatterlist *src, *dst;
|
||||||
size_t pl, ml, split_size;
|
size_t pl, ml, split_size;
|
||||||
struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
|
struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
|
||||||
|
@ -1052,6 +1068,7 @@ static int sa_run(struct sa_req *req)
|
||||||
u32 *mdptr;
|
u32 *mdptr;
|
||||||
bool diff_dst;
|
bool diff_dst;
|
||||||
enum dma_data_direction dir_src;
|
enum dma_data_direction dir_src;
|
||||||
|
struct sa_mapped_sg *mapped_sg;
|
||||||
|
|
||||||
gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
|
@ -1082,6 +1099,7 @@ static int sa_run(struct sa_req *req)
|
||||||
dma_rx = pdata->dma_rx1;
|
dma_rx = pdata->dma_rx1;
|
||||||
|
|
||||||
ddev = dma_rx->device->dev;
|
ddev = dma_rx->device->dev;
|
||||||
|
rxd->ddev = ddev;
|
||||||
|
|
||||||
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
|
memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
|
||||||
|
|
||||||
|
@ -1109,56 +1127,88 @@ static int sa_run(struct sa_req *req)
|
||||||
|
|
||||||
split_size = req->size;
|
split_size = req->size;
|
||||||
|
|
||||||
|
mapped_sg = &rxd->mapped_sg[0];
|
||||||
if (sg_nents == 1 && split_size <= req->src->length) {
|
if (sg_nents == 1 && split_size <= req->src->length) {
|
||||||
src = &rxd->rx_sg;
|
src = &mapped_sg->static_sg;
|
||||||
|
src_nents = 1;
|
||||||
sg_init_table(src, 1);
|
sg_init_table(src, 1);
|
||||||
sg_set_page(src, sg_page(req->src), split_size,
|
sg_set_page(src, sg_page(req->src), split_size,
|
||||||
req->src->offset);
|
req->src->offset);
|
||||||
src_nents = 1;
|
|
||||||
dma_map_sg(ddev, src, sg_nents, dir_src);
|
mapped_sg->sgt.sgl = src;
|
||||||
|
mapped_sg->sgt.orig_nents = src_nents;
|
||||||
|
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
mapped_sg->dir = dir_src;
|
||||||
|
mapped_sg->mapped = true;
|
||||||
} else {
|
} else {
|
||||||
mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
|
mapped_sg->sgt.sgl = req->src;
|
||||||
dir_src);
|
mapped_sg->sgt.orig_nents = sg_nents;
|
||||||
ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
|
ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
|
||||||
&src, &src_nents, gfp_flags);
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
mapped_sg->dir = dir_src;
|
||||||
|
mapped_sg->mapped = true;
|
||||||
|
|
||||||
|
ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
|
||||||
|
&split_size, &src, &src_nents, gfp_flags);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
src_nents = sg_nents;
|
src_nents = mapped_sg->sgt.nents;
|
||||||
src = req->src;
|
src = mapped_sg->sgt.sgl;
|
||||||
} else {
|
} else {
|
||||||
rxd->split_src_sg = src;
|
mapped_sg->split_sg = src;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (!diff_dst) {
|
if (!diff_dst) {
|
||||||
dst_nents = src_nents;
|
dst_nents = src_nents;
|
||||||
dst = src;
|
dst = src;
|
||||||
} else {
|
} else {
|
||||||
dst_nents = sg_nents_for_len(req->dst, req->size);
|
dst_nents = sg_nents_for_len(req->dst, req->size);
|
||||||
|
mapped_sg = &rxd->mapped_sg[1];
|
||||||
|
|
||||||
if (dst_nents == 1 && split_size <= req->dst->length) {
|
if (dst_nents == 1 && split_size <= req->dst->length) {
|
||||||
dst = &rxd->tx_sg;
|
dst = &mapped_sg->static_sg;
|
||||||
|
dst_nents = 1;
|
||||||
sg_init_table(dst, 1);
|
sg_init_table(dst, 1);
|
||||||
sg_set_page(dst, sg_page(req->dst), split_size,
|
sg_set_page(dst, sg_page(req->dst), split_size,
|
||||||
req->dst->offset);
|
req->dst->offset);
|
||||||
dst_nents = 1;
|
|
||||||
dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
|
|
||||||
} else {
|
|
||||||
mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
|
|
||||||
DMA_FROM_DEVICE);
|
|
||||||
ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
|
|
||||||
&split_size, &dst, &dst_nents,
|
|
||||||
gfp_flags);
|
|
||||||
if (ret)
|
|
||||||
dst = req->dst;
|
|
||||||
else
|
|
||||||
rxd->split_dst_sg = dst;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(src_nents != sg_nents)) {
|
mapped_sg->sgt.sgl = dst;
|
||||||
dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
|
mapped_sg->sgt.orig_nents = dst_nents;
|
||||||
ret = -EIO;
|
ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
|
||||||
goto err_cleanup;
|
DMA_FROM_DEVICE, 0);
|
||||||
|
if (ret)
|
||||||
|
goto err_cleanup;
|
||||||
|
|
||||||
|
mapped_sg->dir = DMA_FROM_DEVICE;
|
||||||
|
mapped_sg->mapped = true;
|
||||||
|
} else {
|
||||||
|
mapped_sg->sgt.sgl = req->dst;
|
||||||
|
mapped_sg->sgt.orig_nents = dst_nents;
|
||||||
|
ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
|
||||||
|
DMA_FROM_DEVICE, 0);
|
||||||
|
if (ret)
|
||||||
|
goto err_cleanup;
|
||||||
|
|
||||||
|
mapped_sg->dir = DMA_FROM_DEVICE;
|
||||||
|
mapped_sg->mapped = true;
|
||||||
|
|
||||||
|
ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
|
||||||
|
0, 1, &split_size, &dst, &dst_nents,
|
||||||
|
gfp_flags);
|
||||||
|
if (ret) {
|
||||||
|
dst_nents = mapped_sg->sgt.nents;
|
||||||
|
dst = mapped_sg->sgt.sgl;
|
||||||
|
} else {
|
||||||
|
mapped_sg->split_sg = dst;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
|
rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
|
||||||
|
@ -1172,9 +1222,6 @@ static int sa_run(struct sa_req *req)
|
||||||
|
|
||||||
rxd->req = (void *)req->base;
|
rxd->req = (void *)req->base;
|
||||||
rxd->enc = req->enc;
|
rxd->enc = req->enc;
|
||||||
rxd->ddev = ddev;
|
|
||||||
rxd->src = src;
|
|
||||||
rxd->dst = dst;
|
|
||||||
rxd->iv_idx = req->ctx->iv_idx;
|
rxd->iv_idx = req->ctx->iv_idx;
|
||||||
rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
|
rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
|
||||||
rxd->tx_in->callback = req->callback;
|
rxd->tx_in->callback = req->callback;
|
||||||
|
@ -1212,16 +1259,7 @@ static int sa_run(struct sa_req *req)
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
|
|
||||||
err_cleanup:
|
err_cleanup:
|
||||||
dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
|
sa_free_sa_rx_data(rxd);
|
||||||
kfree(rxd->split_src_sg);
|
|
||||||
|
|
||||||
if (req->src != req->dst) {
|
|
||||||
dst_nents = sg_nents_for_len(req->dst, req->size);
|
|
||||||
dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
|
|
||||||
kfree(rxd->split_dst_sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(rxd);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1291,11 +1329,12 @@ static void sa_sha_dma_in_callback(void *data)
|
||||||
struct ahash_request *req;
|
struct ahash_request *req;
|
||||||
struct crypto_ahash *tfm;
|
struct crypto_ahash *tfm;
|
||||||
unsigned int authsize;
|
unsigned int authsize;
|
||||||
int i, sg_nents;
|
int i;
|
||||||
size_t ml, pl;
|
size_t ml, pl;
|
||||||
u32 *result;
|
u32 *result;
|
||||||
__be32 *mdptr;
|
__be32 *mdptr;
|
||||||
|
|
||||||
|
sa_sync_from_device(rxd);
|
||||||
req = container_of(rxd->req, struct ahash_request, base);
|
req = container_of(rxd->req, struct ahash_request, base);
|
||||||
tfm = crypto_ahash_reqtfm(req);
|
tfm = crypto_ahash_reqtfm(req);
|
||||||
authsize = crypto_ahash_digestsize(tfm);
|
authsize = crypto_ahash_digestsize(tfm);
|
||||||
|
@ -1306,12 +1345,7 @@ static void sa_sha_dma_in_callback(void *data)
|
||||||
for (i = 0; i < (authsize / 4); i++)
|
for (i = 0; i < (authsize / 4); i++)
|
||||||
result[i] = be32_to_cpu(mdptr[i + 4]);
|
result[i] = be32_to_cpu(mdptr[i + 4]);
|
||||||
|
|
||||||
sg_nents = sg_nents_for_len(req->src, req->nbytes);
|
sa_free_sa_rx_data(rxd);
|
||||||
dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
|
|
||||||
|
|
||||||
kfree(rxd->split_src_sg);
|
|
||||||
|
|
||||||
kfree(rxd);
|
|
||||||
|
|
||||||
ahash_request_complete(req, 0);
|
ahash_request_complete(req, 0);
|
||||||
}
|
}
|
||||||
|
@ -1635,43 +1669,28 @@ static void sa_aead_dma_in_callback(void *data)
|
||||||
unsigned int authsize;
|
unsigned int authsize;
|
||||||
u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
|
u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
|
||||||
size_t pl, ml;
|
size_t pl, ml;
|
||||||
int i, sglen;
|
int i;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
u16 auth_len;
|
u16 auth_len;
|
||||||
u32 *mdptr;
|
u32 *mdptr;
|
||||||
bool diff_dst;
|
|
||||||
enum dma_data_direction dir_src;
|
|
||||||
|
|
||||||
|
sa_sync_from_device(rxd);
|
||||||
req = container_of(rxd->req, struct aead_request, base);
|
req = container_of(rxd->req, struct aead_request, base);
|
||||||
tfm = crypto_aead_reqtfm(req);
|
tfm = crypto_aead_reqtfm(req);
|
||||||
start = req->assoclen + req->cryptlen;
|
start = req->assoclen + req->cryptlen;
|
||||||
authsize = crypto_aead_authsize(tfm);
|
authsize = crypto_aead_authsize(tfm);
|
||||||
|
|
||||||
diff_dst = (req->src != req->dst) ? true : false;
|
|
||||||
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
|
|
||||||
|
|
||||||
mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
|
mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
|
||||||
for (i = 0; i < (authsize / 4); i++)
|
for (i = 0; i < (authsize / 4); i++)
|
||||||
mdptr[i + 4] = swab32(mdptr[i + 4]);
|
mdptr[i + 4] = swab32(mdptr[i + 4]);
|
||||||
|
|
||||||
auth_len = req->assoclen + req->cryptlen;
|
auth_len = req->assoclen + req->cryptlen;
|
||||||
if (!rxd->enc)
|
|
||||||
auth_len -= authsize;
|
|
||||||
|
|
||||||
sglen = sg_nents_for_len(rxd->src, auth_len);
|
|
||||||
dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
|
|
||||||
kfree(rxd->split_src_sg);
|
|
||||||
|
|
||||||
if (diff_dst) {
|
|
||||||
sglen = sg_nents_for_len(rxd->dst, auth_len);
|
|
||||||
dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
|
|
||||||
kfree(rxd->split_dst_sg);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rxd->enc) {
|
if (rxd->enc) {
|
||||||
scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
|
scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
|
||||||
1);
|
1);
|
||||||
} else {
|
} else {
|
||||||
|
auth_len -= authsize;
|
||||||
start -= authsize;
|
start -= authsize;
|
||||||
scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
|
scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
|
||||||
0);
|
0);
|
||||||
|
@ -1679,7 +1698,7 @@ static void sa_aead_dma_in_callback(void *data)
|
||||||
err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
|
err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(rxd);
|
sa_free_sa_rx_data(rxd);
|
||||||
|
|
||||||
aead_request_complete(req, err);
|
aead_request_complete(req, err);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue