Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Merge the crypto tree to pull in fixes for the next merge window.
This commit is contained in:
commit
035f901eac
|
@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
|
||||||
* signature and returns that to us.
|
* signature and returns that to us.
|
||||||
*/
|
*/
|
||||||
ret = crypto_akcipher_verify(req);
|
ret = crypto_akcipher_verify(req);
|
||||||
if (ret == -EINPROGRESS) {
|
if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
|
||||||
wait_for_completion(&compl.completion);
|
wait_for_completion(&compl.completion);
|
||||||
ret = compl.err;
|
ret = compl.err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1768,9 +1768,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
|
||||||
break;
|
break;
|
||||||
case -EINPROGRESS:
|
case -EINPROGRESS:
|
||||||
case -EBUSY:
|
case -EBUSY:
|
||||||
ret = wait_for_completion_interruptible(
|
wait_for_completion(&drbg->ctr_completion);
|
||||||
&drbg->ctr_completion);
|
if (!drbg->ctr_async_err) {
|
||||||
if (!ret && !drbg->ctr_async_err) {
|
|
||||||
reinit_completion(&drbg->ctr_completion);
|
reinit_completion(&drbg->ctr_completion);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||||
|
|
||||||
err = crypto_skcipher_encrypt(&data->req);
|
err = crypto_skcipher_encrypt(&data->req);
|
||||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
if (err == -EINPROGRESS || err == -EBUSY) {
|
||||||
err = wait_for_completion_interruptible(
|
wait_for_completion(&data->result.completion);
|
||||||
&data->result.completion);
|
err = data->result.err;
|
||||||
if (!err)
|
|
||||||
err = data->result.err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -1187,8 +1187,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||||
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
struct crypto_aead *aead = crypto_aead_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
struct caam_ctx *ctx = crypto_aead_ctx(aead);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||||
struct aead_edesc *edesc;
|
struct aead_edesc *edesc;
|
||||||
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
|
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
|
||||||
|
@ -1475,8 +1475,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||||
struct ablkcipher_edesc *edesc;
|
struct ablkcipher_edesc *edesc;
|
||||||
|
@ -1681,8 +1680,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
||||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
|
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
|
||||||
struct ablkcipher_edesc *edesc;
|
struct ablkcipher_edesc *edesc;
|
||||||
|
|
|
@ -555,8 +555,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
|
||||||
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
|
struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
|
||||||
typeof(*alg), aead);
|
typeof(*alg), aead);
|
||||||
struct device *qidev = ctx->qidev;
|
struct device *qidev = ctx->qidev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||||
struct aead_edesc *edesc;
|
struct aead_edesc *edesc;
|
||||||
dma_addr_t qm_sg_dma, iv_dma = 0;
|
dma_addr_t qm_sg_dma, iv_dma = 0;
|
||||||
|
@ -808,8 +808,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
||||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||||
struct device *qidev = ctx->qidev;
|
struct device *qidev = ctx->qidev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
|
||||||
struct ablkcipher_edesc *edesc;
|
struct ablkcipher_edesc *edesc;
|
||||||
|
@ -953,8 +952,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
||||||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||||
struct device *qidev = ctx->qidev;
|
struct device *qidev = ctx->qidev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
|
||||||
GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
|
int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
|
||||||
struct ablkcipher_edesc *edesc;
|
struct ablkcipher_edesc *edesc;
|
||||||
|
|
|
@ -719,8 +719,8 @@ static int ahash_update_ctx(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
u8 *buf = current_buf(state);
|
u8 *buf = current_buf(state);
|
||||||
int *buflen = current_buflen(state);
|
int *buflen = current_buflen(state);
|
||||||
u8 *next_buf = alt_buf(state);
|
u8 *next_buf = alt_buf(state);
|
||||||
|
@ -849,8 +849,8 @@ static int ahash_final_ctx(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int buflen = *current_buflen(state);
|
int buflen = *current_buflen(state);
|
||||||
u32 *desc;
|
u32 *desc;
|
||||||
int sec4_sg_bytes, sec4_sg_src_index;
|
int sec4_sg_bytes, sec4_sg_src_index;
|
||||||
|
@ -926,8 +926,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int buflen = *current_buflen(state);
|
int buflen = *current_buflen(state);
|
||||||
u32 *desc;
|
u32 *desc;
|
||||||
int sec4_sg_src_index;
|
int sec4_sg_src_index;
|
||||||
|
@ -1013,8 +1013,8 @@ static int ahash_digest(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
u32 *desc;
|
u32 *desc;
|
||||||
int digestsize = crypto_ahash_digestsize(ahash);
|
int digestsize = crypto_ahash_digestsize(ahash);
|
||||||
int src_nents, mapped_nents;
|
int src_nents, mapped_nents;
|
||||||
|
@ -1093,8 +1093,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
u8 *buf = current_buf(state);
|
u8 *buf = current_buf(state);
|
||||||
int buflen = *current_buflen(state);
|
int buflen = *current_buflen(state);
|
||||||
u32 *desc;
|
u32 *desc;
|
||||||
|
@ -1154,8 +1154,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
u8 *buf = current_buf(state);
|
u8 *buf = current_buf(state);
|
||||||
int *buflen = current_buflen(state);
|
int *buflen = current_buflen(state);
|
||||||
u8 *next_buf = alt_buf(state);
|
u8 *next_buf = alt_buf(state);
|
||||||
|
@ -1280,8 +1280,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int buflen = *current_buflen(state);
|
int buflen = *current_buflen(state);
|
||||||
u32 *desc;
|
u32 *desc;
|
||||||
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
|
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
|
||||||
|
@ -1370,8 +1370,8 @@ static int ahash_update_first(struct ahash_request *req)
|
||||||
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
|
||||||
struct caam_hash_state *state = ahash_request_ctx(req);
|
struct caam_hash_state *state = ahash_request_ctx(req);
|
||||||
struct device *jrdev = ctx->jrdev;
|
struct device *jrdev = ctx->jrdev;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
u8 *next_buf = alt_buf(state);
|
u8 *next_buf = alt_buf(state);
|
||||||
int *next_buflen = alt_buflen(state);
|
int *next_buflen = alt_buflen(state);
|
||||||
int to_hash;
|
int to_hash;
|
||||||
|
|
|
@ -173,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
|
||||||
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
|
||||||
struct device *dev = ctx->dev;
|
struct device *dev = ctx->dev;
|
||||||
struct rsa_edesc *edesc;
|
struct rsa_edesc *edesc;
|
||||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
|
GFP_KERNEL : GFP_ATOMIC;
|
||||||
int sgc;
|
int sgc;
|
||||||
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
|
int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
|
||||||
int src_nents, dst_nents;
|
int src_nents, dst_nents;
|
||||||
|
|
|
@ -68,6 +68,7 @@
|
||||||
static inline u32 rxe_crc32(struct rxe_dev *rxe,
|
static inline u32 rxe_crc32(struct rxe_dev *rxe,
|
||||||
u32 crc, void *next, size_t len)
|
u32 crc, void *next, size_t len)
|
||||||
{
|
{
|
||||||
|
u32 retval;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
SHASH_DESC_ON_STACK(shash, rxe->tfm);
|
SHASH_DESC_ON_STACK(shash, rxe->tfm);
|
||||||
|
@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe,
|
||||||
return crc32_le(crc, next, len);
|
return crc32_le(crc, next, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
return *(u32 *)shash_desc_ctx(shash);
|
retval = *(u32 *)shash_desc_ctx(shash);
|
||||||
|
barrier_data(shash_desc_ctx(shash));
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
|
int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
|
||||||
|
|
|
@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
|
||||||
{
|
{
|
||||||
SHASH_DESC_ON_STACK(shash, tfm);
|
SHASH_DESC_ON_STACK(shash, tfm);
|
||||||
u32 *ctx = (u32 *)shash_desc_ctx(shash);
|
u32 *ctx = (u32 *)shash_desc_ctx(shash);
|
||||||
|
u32 retval;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
shash->tfm = tfm;
|
shash->tfm = tfm;
|
||||||
|
@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length)
|
||||||
err = crypto_shash_update(shash, address, length);
|
err = crypto_shash_update(shash, address, length);
|
||||||
BUG_ON(err);
|
BUG_ON(err);
|
||||||
|
|
||||||
return *ctx;
|
retval = *ctx;
|
||||||
|
barrier_data(ctx);
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1078,6 +1078,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
|
||||||
{
|
{
|
||||||
SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
|
SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
|
||||||
u32 *ctx = (u32 *)shash_desc_ctx(shash);
|
u32 *ctx = (u32 *)shash_desc_ctx(shash);
|
||||||
|
u32 retval;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
shash->tfm = sbi->s_chksum_driver;
|
shash->tfm = sbi->s_chksum_driver;
|
||||||
|
@ -1087,7 +1088,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
|
||||||
err = crypto_shash_update(shash, address, length);
|
err = crypto_shash_update(shash, address, length);
|
||||||
BUG_ON(err);
|
BUG_ON(err);
|
||||||
|
|
||||||
return *ctx;
|
retval = *ctx;
|
||||||
|
barrier_data(ctx);
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
|
static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
|
||||||
|
|
|
@ -43,7 +43,7 @@ static struct crypto_shash *tfm;
|
||||||
u32 crc32c(u32 crc, const void *address, unsigned int length)
|
u32 crc32c(u32 crc, const void *address, unsigned int length)
|
||||||
{
|
{
|
||||||
SHASH_DESC_ON_STACK(shash, tfm);
|
SHASH_DESC_ON_STACK(shash, tfm);
|
||||||
u32 *ctx = (u32 *)shash_desc_ctx(shash);
|
u32 ret, *ctx = (u32 *)shash_desc_ctx(shash);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
shash->tfm = tfm;
|
shash->tfm = tfm;
|
||||||
|
@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
|
||||||
err = crypto_shash_update(shash, address, length);
|
err = crypto_shash_update(shash, address, length);
|
||||||
BUG_ON(err);
|
BUG_ON(err);
|
||||||
|
|
||||||
return *ctx;
|
ret = *ctx;
|
||||||
|
barrier_data(ctx);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPORT_SYMBOL(crc32c);
|
EXPORT_SYMBOL(crc32c);
|
||||||
|
|
Loading…
Reference in New Issue