Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "This fixes the following issues: API: - Fix kzalloc error path crash in ecryptfs added by skcipher conversion. Note the subject of the commit is screwed up and the correct subject is actually in the body. Drivers: - A number of fixes to the marvell cesa hashing code. - Remove bogus nested irqsave that clobbers the saved flags in ccp" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: marvell/cesa - forward devm_ioremap_resource() error code crypto: marvell/cesa - initialize hash states crypto: marvell/cesa - fix memory leak crypto: ccp - fix lock acquisition code eCryptfs: Use skcipher and shash
This commit is contained in:
commit
c130423620
|
@ -53,7 +53,7 @@ static DEFINE_RWLOCK(ccp_unit_lock);
|
|||
static LIST_HEAD(ccp_units);
|
||||
|
||||
/* Round-robin counter */
|
||||
static DEFINE_RWLOCK(ccp_rr_lock);
|
||||
static DEFINE_SPINLOCK(ccp_rr_lock);
|
||||
static struct ccp_device *ccp_rr;
|
||||
|
||||
/* Ever-increasing value to produce unique unit numbers */
|
||||
|
@ -128,14 +128,14 @@ static struct ccp_device *ccp_get_device(void)
|
|||
*/
|
||||
read_lock_irqsave(&ccp_unit_lock, flags);
|
||||
if (!list_empty(&ccp_units)) {
|
||||
write_lock_irqsave(&ccp_rr_lock, flags);
|
||||
spin_lock(&ccp_rr_lock);
|
||||
dp = ccp_rr;
|
||||
if (list_is_last(&ccp_rr->entry, &ccp_units))
|
||||
ccp_rr = list_first_entry(&ccp_units, struct ccp_device,
|
||||
entry);
|
||||
else
|
||||
ccp_rr = list_next_entry(ccp_rr, entry);
|
||||
write_unlock_irqrestore(&ccp_rr_lock, flags);
|
||||
spin_unlock(&ccp_rr_lock);
|
||||
}
|
||||
read_unlock_irqrestore(&ccp_unit_lock, flags);
|
||||
|
||||
|
|
|
@ -420,7 +420,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
|
|||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
|
||||
cesa->regs = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(cesa->regs))
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(cesa->regs);
|
||||
|
||||
ret = mv_cesa_dev_dma_init(cesa);
|
||||
if (ret)
|
||||
|
|
|
@ -588,6 +588,7 @@ struct mv_cesa_ahash_dma_req {
|
|||
struct mv_cesa_tdma_req base;
|
||||
u8 *padding;
|
||||
dma_addr_t padding_dma;
|
||||
u8 *cache;
|
||||
dma_addr_t cache_dma;
|
||||
};
|
||||
|
||||
|
@ -609,7 +610,7 @@ struct mv_cesa_ahash_req {
|
|||
struct mv_cesa_ahash_std_req std;
|
||||
} req;
|
||||
struct mv_cesa_op_ctx op_tmpl;
|
||||
u8 *cache;
|
||||
u8 cache[CESA_MAX_HASH_BLOCK_SIZE];
|
||||
unsigned int cache_ptr;
|
||||
u64 len;
|
||||
int src_nents;
|
||||
|
|
|
@ -45,69 +45,25 @@ mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
|
|||
return mv_cesa_req_dma_iter_next_op(&iter->base);
|
||||
}
|
||||
|
||||
static inline int mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_req *creq,
|
||||
gfp_t flags)
|
||||
static inline int
|
||||
mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
|
||||
{
|
||||
struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma;
|
||||
|
||||
creq->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
|
||||
&dreq->cache_dma);
|
||||
if (!creq->cache)
|
||||
req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
|
||||
&req->cache_dma);
|
||||
if (!req->cache)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mv_cesa_ahash_std_alloc_cache(struct mv_cesa_ahash_req *creq,
|
||||
gfp_t flags)
|
||||
static inline void
|
||||
mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
|
||||
{
|
||||
creq->cache = kzalloc(CESA_MAX_HASH_BLOCK_SIZE, flags);
|
||||
if (!creq->cache)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mv_cesa_ahash_alloc_cache(struct ahash_request *req)
|
||||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int ret;
|
||||
|
||||
if (creq->cache)
|
||||
return 0;
|
||||
|
||||
if (creq->req.base.type == CESA_DMA_REQ)
|
||||
ret = mv_cesa_ahash_dma_alloc_cache(creq, flags);
|
||||
else
|
||||
ret = mv_cesa_ahash_std_alloc_cache(creq, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_req *creq)
|
||||
{
|
||||
dma_pool_free(cesa_dev->dma->cache_pool, creq->cache,
|
||||
creq->req.dma.cache_dma);
|
||||
}
|
||||
|
||||
static inline void mv_cesa_ahash_std_free_cache(struct mv_cesa_ahash_req *creq)
|
||||
{
|
||||
kfree(creq->cache);
|
||||
}
|
||||
|
||||
static void mv_cesa_ahash_free_cache(struct mv_cesa_ahash_req *creq)
|
||||
{
|
||||
if (!creq->cache)
|
||||
if (!req->cache)
|
||||
return;
|
||||
|
||||
if (creq->req.base.type == CESA_DMA_REQ)
|
||||
mv_cesa_ahash_dma_free_cache(creq);
|
||||
else
|
||||
mv_cesa_ahash_std_free_cache(creq);
|
||||
|
||||
creq->cache = NULL;
|
||||
dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
|
||||
req->cache_dma);
|
||||
}
|
||||
|
||||
static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
|
||||
|
@ -146,6 +102,7 @@ static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
|
|||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
|
||||
dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
|
||||
mv_cesa_ahash_dma_free_cache(&creq->req.dma);
|
||||
mv_cesa_dma_cleanup(&creq->req.dma.base);
|
||||
}
|
||||
|
||||
|
@ -161,8 +118,6 @@ static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
|
|||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
|
||||
mv_cesa_ahash_free_cache(creq);
|
||||
|
||||
if (creq->req.base.type == CESA_DMA_REQ)
|
||||
mv_cesa_ahash_dma_last_cleanup(req);
|
||||
}
|
||||
|
@ -445,14 +400,6 @@ static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
|
|||
static int mv_cesa_ahash_cache_req(struct ahash_request *req, bool *cached)
|
||||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
int ret;
|
||||
|
||||
if (((creq->cache_ptr + req->nbytes) & CESA_HASH_BLOCK_SIZE_MSK) &&
|
||||
!creq->last_req) {
|
||||
ret = mv_cesa_ahash_alloc_cache(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (creq->cache_ptr + req->nbytes < 64 && !creq->last_req) {
|
||||
*cached = true;
|
||||
|
@ -505,10 +452,17 @@ mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
|
|||
gfp_t flags)
|
||||
{
|
||||
struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
|
||||
int ret;
|
||||
|
||||
if (!creq->cache_ptr)
|
||||
return 0;
|
||||
|
||||
ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
|
||||
|
||||
return mv_cesa_dma_add_data_transfer(chain,
|
||||
CESA_SA_DATA_SRAM_OFFSET,
|
||||
ahashdreq->cache_dma,
|
||||
|
@ -848,10 +802,6 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
|
|||
if (!cache_ptr)
|
||||
return 0;
|
||||
|
||||
ret = mv_cesa_ahash_alloc_cache(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(creq->cache, cache, cache_ptr);
|
||||
creq->cache_ptr = cache_ptr;
|
||||
|
||||
|
@ -860,9 +810,14 @@ static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
|
|||
|
||||
static int mv_cesa_md5_init(struct ahash_request *req)
|
||||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
struct mv_cesa_op_ctx tmpl = { };
|
||||
|
||||
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
|
||||
creq->state[0] = MD5_H0;
|
||||
creq->state[1] = MD5_H1;
|
||||
creq->state[2] = MD5_H2;
|
||||
creq->state[3] = MD5_H3;
|
||||
|
||||
mv_cesa_ahash_init(req, &tmpl, true);
|
||||
|
||||
|
@ -923,9 +878,15 @@ struct ahash_alg mv_md5_alg = {
|
|||
|
||||
static int mv_cesa_sha1_init(struct ahash_request *req)
|
||||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
struct mv_cesa_op_ctx tmpl = { };
|
||||
|
||||
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
|
||||
creq->state[0] = SHA1_H0;
|
||||
creq->state[1] = SHA1_H1;
|
||||
creq->state[2] = SHA1_H2;
|
||||
creq->state[3] = SHA1_H3;
|
||||
creq->state[4] = SHA1_H4;
|
||||
|
||||
mv_cesa_ahash_init(req, &tmpl, false);
|
||||
|
||||
|
@ -986,9 +947,18 @@ struct ahash_alg mv_sha1_alg = {
|
|||
|
||||
static int mv_cesa_sha256_init(struct ahash_request *req)
|
||||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
struct mv_cesa_op_ctx tmpl = { };
|
||||
|
||||
mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
|
||||
creq->state[0] = SHA256_H0;
|
||||
creq->state[1] = SHA256_H1;
|
||||
creq->state[2] = SHA256_H2;
|
||||
creq->state[3] = SHA256_H3;
|
||||
creq->state[4] = SHA256_H4;
|
||||
creq->state[5] = SHA256_H5;
|
||||
creq->state[6] = SHA256_H6;
|
||||
creq->state[7] = SHA256_H7;
|
||||
|
||||
mv_cesa_ahash_init(req, &tmpl, false);
|
||||
|
||||
|
|
|
@ -635,8 +635,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
|
|||
if (!s) {
|
||||
printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
|
||||
"[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
(*packet_size) = 0;
|
||||
rc = ecryptfs_find_auth_tok_for_sig(
|
||||
|
@ -922,8 +921,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
|
|||
if (!s) {
|
||||
printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
|
||||
"[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) {
|
||||
printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be "
|
||||
|
|
Loading…
Reference in New Issue